4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1984, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2018 Joyent, Inc.
25 * Copyright (c) 2016 by Delphix. All rights reserved.
28 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
29 /* All Rights Reserved */
32 * Portions of this source code were derived from Berkeley 4.3 BSD
33 * under license from the Regents of the University of California.
36 #include <sys/types.h>
37 #include <sys/t_lock.h>
38 #include <sys/ksynch.h>
39 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysmacros.h>
43 #include <sys/resource.h>
44 #include <sys/signal.h>
49 #include <sys/vnode.h>
53 #include <sys/fcntl.h>
54 #include <sys/flock.h>
55 #include <sys/atomic.h>
61 #include <sys/pathname.h>
62 #include <sys/debug.h>
63 #include <sys/vmsystm.h>
64 #include <sys/cmn_err.h>
65 #include <sys/filio.h>
66 #include <sys/policy.h>
68 #include <sys/fs/ufs_fs.h>
69 #include <sys/fs/ufs_lockfs.h>
70 #include <sys/fs/ufs_filio.h>
71 #include <sys/fs/ufs_inode.h>
72 #include <sys/fs/ufs_fsdir.h>
73 #include <sys/fs/ufs_quota.h>
74 #include <sys/fs/ufs_log.h>
75 #include <sys/fs/ufs_snap.h>
76 #include <sys/fs/ufs_trans.h>
77 #include <sys/fs/ufs_panic.h>
78 #include <sys/fs/ufs_bio.h>
79 #include <sys/dirent.h> /* must be AFTER <sys/fs/fsdir.h>! */
80 #include <sys/errno.h>
81 #include <sys/fssnap_if.h>
82 #include <sys/unistd.h>
83 #include <sys/sunddi.h>
85 #include <sys/filio.h> /* _FIOIO */
92 #include <vm/seg_map.h>
93 #include <vm/seg_vn.h>
94 #include <vm/seg_kmem.h>
98 #include <sys/fs_subr.h>
100 #include <sys/fs/decomp.h>
102 static struct instats ins
;
104 static int ufs_getpage_ra(struct vnode
*, uoff_t
, struct seg
*, caddr_t
);
105 static int ufs_getpage_miss(struct vnode
*, uoff_t
, size_t, struct seg
*,
106 caddr_t
, struct page
**, size_t, enum seg_rw
, int);
107 static int ufs_open(struct vnode
**, int, struct cred
*, caller_context_t
*);
108 static int ufs_close(struct vnode
*, int, int, offset_t
, struct cred
*,
110 static int ufs_read(struct vnode
*, struct uio
*, int, struct cred
*,
111 struct caller_context
*);
112 static int ufs_write(struct vnode
*, struct uio
*, int, struct cred
*,
113 struct caller_context
*);
114 static int ufs_ioctl(struct vnode
*, int, intptr_t, int, struct cred
*,
115 int *, caller_context_t
*);
116 static int ufs_getattr(struct vnode
*, struct vattr
*, int, struct cred
*,
118 static int ufs_setattr(struct vnode
*, struct vattr
*, int, struct cred
*,
120 static int ufs_access(struct vnode
*, int, int, struct cred
*,
122 static int ufs_lookup(struct vnode
*, char *, struct vnode
**,
123 struct pathname
*, int, struct vnode
*, struct cred
*,
124 caller_context_t
*, int *, pathname_t
*);
125 static int ufs_create(struct vnode
*, char *, struct vattr
*, enum vcexcl
,
126 int, struct vnode
**, struct cred
*, int,
127 caller_context_t
*, vsecattr_t
*);
128 static int ufs_remove(struct vnode
*, char *, struct cred
*,
129 caller_context_t
*, int);
130 static int ufs_link(struct vnode
*, struct vnode
*, char *, struct cred
*,
131 caller_context_t
*, int);
132 static int ufs_rename(struct vnode
*, char *, struct vnode
*, char *,
133 struct cred
*, caller_context_t
*, int);
134 static int ufs_mkdir(struct vnode
*, char *, struct vattr
*, struct vnode
**,
135 struct cred
*, caller_context_t
*, int, vsecattr_t
*);
136 static int ufs_rmdir(struct vnode
*, char *, struct vnode
*, struct cred
*,
137 caller_context_t
*, int);
138 static int ufs_readdir(struct vnode
*, struct uio
*, struct cred
*, int *,
139 caller_context_t
*, int);
140 static int ufs_symlink(struct vnode
*, char *, struct vattr
*, char *,
141 struct cred
*, caller_context_t
*, int);
142 static int ufs_readlink(struct vnode
*, struct uio
*, struct cred
*,
144 static int ufs_fsync(struct vnode
*, int, struct cred
*, caller_context_t
*);
145 static void ufs_inactive(struct vnode
*, struct cred
*, caller_context_t
*);
146 static int ufs_fid(struct vnode
*, struct fid
*, caller_context_t
*);
147 static int ufs_rwlock(struct vnode
*, int, caller_context_t
*);
148 static void ufs_rwunlock(struct vnode
*, int, caller_context_t
*);
149 static int ufs_seek(struct vnode
*, offset_t
, offset_t
*, caller_context_t
*);
150 static int ufs_frlock(struct vnode
*, int, struct flock64
*, int, offset_t
,
151 struct flk_callback
*, struct cred
*,
153 static int ufs_space(struct vnode
*, int, struct flock64
*, int, offset_t
,
154 cred_t
*, caller_context_t
*);
155 static int ufs_getpage(struct vnode
*, offset_t
, size_t, uint_t
*,
156 struct page
**, size_t, struct seg
*, caddr_t
,
157 enum seg_rw
, struct cred
*, caller_context_t
*);
158 static int ufs_putpage(struct vnode
*, offset_t
, size_t, int, struct cred
*,
160 static int ufs_putpages(struct vnode
*, offset_t
, size_t, int, struct cred
*);
161 static int ufs_map(struct vnode
*, offset_t
, struct as
*, caddr_t
*, size_t,
162 uchar_t
, uchar_t
, uint_t
, struct cred
*, caller_context_t
*);
163 static int ufs_addmap(struct vnode
*, offset_t
, struct as
*, caddr_t
, size_t,
164 uchar_t
, uchar_t
, uint_t
, struct cred
*, caller_context_t
*);
165 static int ufs_delmap(struct vnode
*, offset_t
, struct as
*, caddr_t
, size_t,
166 uint_t
, uint_t
, uint_t
, struct cred
*, caller_context_t
*);
167 static int ufs_poll(vnode_t
*, short, int, short *, struct pollhead
**,
169 static int ufs_dump(vnode_t
*, caddr_t
, offset_t
, offset_t
,
171 static int ufs_l_pathconf(struct vnode
*, int, ulong_t
*, struct cred
*,
173 static int ufs_pageio(struct vnode
*, struct page
*, uoff_t
, size_t, int,
174 struct cred
*, caller_context_t
*);
175 static int ufs_dumpctl(vnode_t
*, int, offset_t
*, caller_context_t
*);
176 static daddr32_t
*save_dblks(struct inode
*, struct ufsvfs
*, daddr32_t
*,
177 daddr32_t
*, int, int);
178 static int ufs_getsecattr(struct vnode
*, vsecattr_t
*, int, struct cred
*,
180 static int ufs_setsecattr(struct vnode
*, vsecattr_t
*, int, struct cred
*,
182 static int ufs_priv_access(void *, int, struct cred
*);
183 static int ufs_eventlookup(struct vnode
*, char *, struct cred
*,
187 * For lockfs: ulockfs begin/end is now inlined in the ufs_xxx functions.
189 * XXX - ULOCKFS in fs_pathconf and ufs_ioctl is not inlined yet.
191 * NOTE: "not blkd" below means that the operation isn't blocked by lockfs
193 const struct vnodeops ufs_vnodeops
= {
195 .vop_open
= ufs_open
, /* not blkd */
196 .vop_close
= ufs_close
, /* not blkd */
197 .vop_read
= ufs_read
,
198 .vop_write
= ufs_write
,
199 .vop_ioctl
= ufs_ioctl
,
200 .vop_getattr
= ufs_getattr
,
201 .vop_setattr
= ufs_setattr
,
202 .vop_access
= ufs_access
,
203 .vop_lookup
= ufs_lookup
,
204 .vop_create
= ufs_create
,
205 .vop_remove
= ufs_remove
,
206 .vop_link
= ufs_link
,
207 .vop_rename
= ufs_rename
,
208 .vop_mkdir
= ufs_mkdir
,
209 .vop_rmdir
= ufs_rmdir
,
210 .vop_readdir
= ufs_readdir
,
211 .vop_symlink
= ufs_symlink
,
212 .vop_readlink
= ufs_readlink
,
213 .vop_fsync
= ufs_fsync
,
214 .vop_inactive
= ufs_inactive
, /* not blkd */
216 .vop_rwlock
= ufs_rwlock
, /* not blkd */
217 .vop_rwunlock
= ufs_rwunlock
, /* not blkd */
218 .vop_seek
= ufs_seek
,
219 .vop_frlock
= ufs_frlock
,
220 .vop_space
= ufs_space
,
221 .vop_getpage
= ufs_getpage
,
222 .vop_putpage
= ufs_putpage
,
224 .vop_addmap
= ufs_addmap
, /* not blkd */
225 .vop_delmap
= ufs_delmap
, /* not blkd */
226 .vop_poll
= ufs_poll
, /* not blkd */
227 .vop_dump
= ufs_dump
,
228 .vop_pathconf
= ufs_l_pathconf
,
229 .vop_pageio
= ufs_pageio
,
230 .vop_dumpctl
= ufs_dumpctl
,
231 .vop_getsecattr
= ufs_getsecattr
,
232 .vop_setsecattr
= ufs_setsecattr
,
233 .vop_vnevent
= fs_vnevent_support
,
236 #define MAX_BACKFILE_COUNT 9999
239 * Created by ufs_dumpctl() to store a file's disk block info into memory.
240 * Used by ufs_dump() to dump data to disk directly.
243 struct inode
*ip
; /* the file we contain */
244 daddr_t fsbs
; /* number of blocks stored */
245 struct timeval32 time
; /* time stamp for the struct */
246 daddr32_t dblk
[1]; /* place holder for block info */
249 static struct dump
*dump_info
= NULL
;
252 * Previously there was no special action required for ordinary files.
253 * (Devices are handled through the device file system.)
254 * Now we support Large Files and Large File API requires open to
255 * fail if file is large.
256 * We could take care to prevent data corruption
257 * by doing an atomic check of size and truncate if file is opened with
258 * FTRUNC flag set but traditionally this is being done by the vfs/vnode
259 * layers. So taking care of truncation here is a change in the existing
260 * semantics of fop_open and therefore we chose not to implement any thing
261 * here. The check for the size of the file > 2GB is being done at the
262 * vfs layer in routine vn_open().
267 ufs_open(struct vnode
**vpp
, int flag
, struct cred
*cr
, caller_context_t
*ct
)
274 ufs_close(struct vnode
*vp
, int flag
, int count
, offset_t offset
,
275 struct cred
*cr
, caller_context_t
*ct
)
277 cleanlocks(vp
, ttoproc(curthread
)->p_pid
, 0);
278 cleanshares(vp
, ttoproc(curthread
)->p_pid
);
281 * Push partially filled cluster at last close.
282 * ``last close'' is approximated because the dnlc
283 * may have a hold on the vnode.
284 * Checking for VBAD here will also act as a forced umount check.
286 if (vp
->v_count
<= 2 && vp
->v_type
!= VBAD
) {
287 struct inode
*ip
= VTOI(vp
);
288 if (ip
->i_delaylen
) {
289 ins
.in_poc
.value
.ul
++;
290 (void) ufs_putpages(vp
, ip
->i_delayoff
, ip
->i_delaylen
,
291 B_ASYNC
| B_FREE
, cr
);
301 ufs_read(struct vnode
*vp
, struct uio
*uiop
, int ioflag
, struct cred
*cr
,
302 struct caller_context
*ct
)
304 struct inode
*ip
= VTOI(vp
);
305 struct ufsvfs
*ufsvfsp
;
306 struct ulockfs
*ulp
= NULL
;
310 ASSERT(RW_READ_HELD(&ip
->i_rwlock
));
313 * Mandatory locking needs to be done before ufs_lockfs_begin()
314 * and TRANS_BEGIN_SYNC() calls since mandatory locks can sleep.
316 if (MANDLOCK(vp
, ip
->i_mode
)) {
318 * ufs_getattr ends up being called by chklock
320 error
= chklock(vp
, FREAD
, uiop
->uio_loffset
,
321 uiop
->uio_resid
, uiop
->uio_fmode
, ct
);
326 ufsvfsp
= ip
->i_ufsvfs
;
327 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_READ_MASK
);
332 * In the case that a directory is opened for reading as a file
333 * (eg "cat .") with the O_RSYNC, O_SYNC and O_DSYNC flags set.
334 * The locking order had to be changed to avoid a deadlock with
335 * an update taking place on that directory at the same time.
337 if ((ip
->i_mode
& IFMT
) == IFDIR
) {
339 rw_enter(&ip
->i_contents
, RW_READER
);
340 error
= rdip(ip
, uiop
, ioflag
, cr
);
341 rw_exit(&ip
->i_contents
);
349 if (ulp
&& (ioflag
& FRSYNC
) && (ioflag
& (FSYNC
| FDSYNC
)) &&
350 TRANS_ISTRANS(ufsvfsp
)) {
351 rw_exit(&ip
->i_rwlock
);
352 TRANS_BEGIN_SYNC(ufsvfsp
, TOP_READ_SYNC
,
353 TOP_READ_SIZE
, &error
);
355 TRANS_END_SYNC(ufsvfsp
, &error
, TOP_READ_SYNC
,
357 rw_enter(&ip
->i_rwlock
, RW_READER
);
361 * Only transact reads to files opened for sync-read and
362 * sync-write on a file system that is not write locked.
364 * The ``not write locked'' check prevents problems with
365 * enabling/disabling logging on a busy file system. E.g.,
366 * logging exists at the beginning of the read but does not
370 if (ulp
&& (ioflag
& FRSYNC
) && (ioflag
& (FSYNC
| FDSYNC
)) &&
371 TRANS_ISTRANS(ufsvfsp
)) {
372 TRANS_BEGIN_SYNC(ufsvfsp
, TOP_READ_SYNC
,
373 TOP_READ_SIZE
, &error
);
378 rw_enter(&ip
->i_contents
, RW_READER
);
379 error
= rdip(ip
, uiop
, ioflag
, cr
);
380 rw_exit(&ip
->i_contents
);
383 TRANS_END_SYNC(ufsvfsp
, &error
, TOP_READ_SYNC
,
396 extern int ufs_HW
; /* high water mark */
397 extern int ufs_LW
; /* low water mark */
398 int ufs_WRITES
= 1; /* XXX - enable/disable */
399 int ufs_throttles
= 0; /* throttling count */
400 int ufs_allow_shared_writes
= 1; /* directio shared writes */
403 ufs_check_rewrite(struct inode
*ip
, struct uio
*uiop
, int ioflag
)
408 * If the FDSYNC flag is set then ignore the global
409 * ufs_allow_shared_writes in this case.
411 shared_write
= (ioflag
& FDSYNC
) | ufs_allow_shared_writes
;
414 * Filter to determine if this request is suitable as a
415 * concurrent rewrite. This write must not allocate blocks
416 * by extending the file or filling in holes. No use trying
417 * through FSYNC descriptors as the inode will be synchronously
418 * updated after the write. The uio structure has not yet been
419 * checked for sanity, so assume nothing.
421 return (((ip
->i_mode
& IFMT
) == IFREG
) && !(ioflag
& FAPPEND
) &&
422 (uiop
->uio_loffset
>= 0) &&
423 (uiop
->uio_loffset
< ip
->i_size
) && (uiop
->uio_resid
> 0) &&
424 ((ip
->i_size
- uiop
->uio_loffset
) >= uiop
->uio_resid
) &&
425 !(ioflag
& FSYNC
) && !bmap_has_holes(ip
) &&
431 ufs_write(struct vnode
*vp
, struct uio
*uiop
, int ioflag
, cred_t
*cr
,
432 caller_context_t
*ct
)
434 struct inode
*ip
= VTOI(vp
);
435 struct ufsvfs
*ufsvfsp
;
438 int error
, resv
, resid
= 0;
442 long start_resid
= uiop
->uio_resid
;
444 ASSERT(RW_LOCK_HELD(&ip
->i_rwlock
));
448 * Mandatory locking needs to be done before ufs_lockfs_begin()
449 * and TRANS_BEGIN_[A]SYNC() calls since mandatory locks can sleep.
450 * Check for forced unmounts normally done in ufs_lockfs_begin().
452 if ((ufsvfsp
= ip
->i_ufsvfs
) == NULL
) {
456 if (MANDLOCK(vp
, ip
->i_mode
)) {
458 ASSERT(RW_WRITE_HELD(&ip
->i_rwlock
));
461 * ufs_getattr ends up being called by chklock
463 error
= chklock(vp
, FWRITE
, uiop
->uio_loffset
,
464 uiop
->uio_resid
, uiop
->uio_fmode
, ct
);
469 /* i_rwlock can change in chklock */
470 exclusive
= rw_write_held(&ip
->i_rwlock
);
471 rewriteflg
= ufs_check_rewrite(ip
, uiop
, ioflag
);
474 * Check for fast-path special case of directio re-writes.
476 if ((ip
->i_flag
& IDIRECTIO
|| ufsvfsp
->vfs_forcedirectio
) &&
477 !exclusive
&& rewriteflg
) {
479 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_WRITE_MASK
);
483 rw_enter(&ip
->i_contents
, RW_READER
);
484 error
= ufs_directio_write(ip
, uiop
, ioflag
, 1, cr
,
486 if (directio_status
== DIRECTIO_SUCCESS
) {
489 if (start_resid
!= uiop
->uio_resid
)
492 * Special treatment of access times for re-writes.
493 * If IMOD is not already set, then convert it
494 * to IMODACC for this operation. This defers
495 * entering a delta into the log until the inode
496 * is flushed. This mimics what is done for read
497 * operations and inode access time.
499 mutex_enter(&ip
->i_tlock
);
500 i_flag_save
= ip
->i_flag
;
501 ip
->i_flag
|= IUPD
| ICHG
;
504 if ((i_flag_save
& IMOD
) == 0) {
506 ip
->i_flag
|= IMODACC
;
508 mutex_exit(&ip
->i_tlock
);
509 rw_exit(&ip
->i_contents
);
514 rw_exit(&ip
->i_contents
);
519 if (!exclusive
&& !rw_tryupgrade(&ip
->i_rwlock
)) {
520 rw_exit(&ip
->i_rwlock
);
521 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
523 * Mandatory locking could have been enabled
524 * after dropping the i_rwlock.
526 if (MANDLOCK(vp
, ip
->i_mode
))
530 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_WRITE_MASK
);
535 * Amount of log space needed for this write
537 if (!rewriteflg
|| !(ioflag
& FDSYNC
))
538 TRANS_WRITE_RESV(ip
, uiop
, ulp
, &resv
, &resid
);
543 if (ufs_WRITES
&& (ip
->i_writes
> ufs_HW
)) {
544 mutex_enter(&ip
->i_tlock
);
545 while (ip
->i_writes
> ufs_HW
) {
547 cv_wait(&ip
->i_wrcv
, &ip
->i_tlock
);
549 mutex_exit(&ip
->i_tlock
);
555 * If the write is a rewrite there is no need to open a transaction
556 * if the FDSYNC flag is set and not the FSYNC. In this case just
557 * set the IMODACC flag to modify do the update at a later time
558 * thus avoiding the overhead of the logging transaction that is
561 if (ioflag
& (FSYNC
|FDSYNC
)) {
566 rw_enter(&ip
->i_contents
, RW_READER
);
567 mutex_enter(&ip
->i_tlock
);
568 i_flag_save
= ip
->i_flag
;
569 ip
->i_flag
|= IUPD
| ICHG
;
572 if ((i_flag_save
& IMOD
) == 0) {
574 ip
->i_flag
|= IMODACC
;
576 mutex_exit(&ip
->i_tlock
);
577 rw_exit(&ip
->i_contents
);
580 TRANS_BEGIN_SYNC(ufsvfsp
, TOP_WRITE_SYNC
,
587 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_WRITE
, resv
);
593 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
594 rw_enter(&ip
->i_contents
, RW_WRITER
);
595 if ((ioflag
& FAPPEND
) != 0 && (ip
->i_mode
& IFMT
) == IFREG
) {
597 * In append mode start at end of file.
599 uiop
->uio_loffset
= ip
->i_size
;
603 * Mild optimisation, don't call ufs_trans_write() unless we have to
604 * Also, suppress file system full messages if we will retry.
607 ip
->i_flag
|= IQUIET
;
609 TRANS_WRITE(ip
, uiop
, ioflag
, error
, ulp
, cr
, resv
, resid
);
611 error
= wrip(ip
, uiop
, ioflag
, cr
);
613 ip
->i_flag
&= ~IQUIET
;
615 rw_exit(&ip
->i_contents
);
616 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
622 if (ioflag
& (FSYNC
|FDSYNC
)) {
626 TRANS_END_SYNC(ufsvfsp
, &terr
,
627 TOP_WRITE_SYNC
, resv
);
632 TRANS_END_ASYNC(ufsvfsp
, TOP_WRITE
, resv
);
637 if ((error
== ENOSPC
) && retry
&& TRANS_ISTRANS(ufsvfsp
)) {
639 * Any blocks tied up in pending deletes?
641 ufs_delete_drain_wait(ufsvfsp
, 1);
646 if (error
== ENOSPC
&& (start_resid
!= uiop
->uio_resid
))
653 * Don't cache write blocks to files with the sticky bit set.
654 * Used to keep swap files from blowing the page cache on a server.
659 * Free behind hacks. The pager is busted.
660 * XXX - need to pass the information down to writedone() in a flag like B_SEQ
661 * or B_FREE_IF_TIGHT_ON_MEMORY.
665 uoff_t smallfile64
= 32 * 1024;
668 * While we should, in most cases, cache the pages for write, we
669 * may also want to cache the pages for read as long as they are
670 * frequently re-usable.
672 * If cache_read_ahead = 1, the pages for read will go to the tail
673 * of the cache list when they are released, otherwise go to the head.
675 int cache_read_ahead
= 0;
678 * Freebehind exists so that as we read large files sequentially we
679 * don't consume most of memory with pages from a few files. It takes
680 * longer to re-read from disk multiple small files as it does reading
681 * one large one sequentially. As system memory grows customers need
682 * to retain bigger chunks of files in memory. The advent of the
683 * cachelist opens up of the possibility freeing pages to the head or
686 * Not freeing a page is a bet that the page will be read again before
687 * it's segmap slot is needed for something else. If we loose the bet,
688 * it means some other thread is burdened with the page free we did
689 * not do. If we win we save a free and reclaim.
691 * Freeing it at the tail vs the head of cachelist is a bet that the
692 * page will survive until the next read. It's also saying that this
693 * page is more likely to be re-used than a page freed some time ago
694 * and never reclaimed.
696 * Freebehind maintains a range of file offset [smallfile1; smallfile2]
698 * 0 < offset < smallfile1 : pages are not freed.
699 * smallfile1 < offset < smallfile2 : pages freed to tail of cachelist.
700 * smallfile2 < offset : pages freed to head of cachelist.
702 * The range is computed at most once per second and depends on
703 * freemem and ncpus_online. Both parameters are bounded to be
704 * >= smallfile && >= smallfile64.
706 * smallfile1 = (free memory / ncpu) / 1000
707 * smallfile2 = (free memory / ncpu) / 10
709 * A few examples values:
711 * Free Mem (in Bytes) [smallfile1; smallfile2] [smallfile1; smallfile2]
712 * ncpus_online = 4 ncpus_online = 64
713 * ------------------ ----------------------- -----------------------
714 * 1G [256K; 25M] [32K; 1.5M]
715 * 10G [2.5M; 250M] [156K; 15M]
716 * 100G [25M; 2.5G] [1.5M; 150M]
720 #define SMALLFILE1_D 1000
721 #define SMALLFILE2_D 10
722 static uoff_t smallfile1
= 32 * 1024;
723 static uoff_t smallfile2
= 32 * 1024;
724 static clock_t smallfile_update
= 0; /* lbolt value of when to recompute */
725 uint_t smallfile1_d
= SMALLFILE1_D
;
726 uint_t smallfile2_d
= SMALLFILE2_D
;
729 * wrip does the real work of write requests for ufs.
732 wrip(struct inode
*ip
, struct uio
*uio
, int ioflag
, struct cred
*cr
)
734 rlim64_t limit
= uio
->uio_llimit
;
739 struct ufsvfs
*ufsvfsp
;
741 long start_resid
= uio
->uio_resid
; /* save starting resid */
742 long premove_resid
; /* resid before uiomove() */
745 int iupdat_flag
, directio_status
;
747 int error
, pagecreate
;
748 int do_dqrwlock
; /* drop/reacquire vfs_dqrwlock */
753 * ip->i_size is incremented before the uiomove
754 * is done on a write. If the move fails (bad user
755 * address) reset ip->i_size.
756 * The better way would be to increment ip->i_size
757 * only if the uiomove succeeds.
759 int i_size_changed
= 0;
761 int i_seq_needed
= 0;
766 * check for forced unmount - should not happen as
767 * the request passed the lockfs checks.
769 if ((ufsvfsp
= ip
->i_ufsvfs
) == NULL
)
774 ASSERT(RW_WRITE_HELD(&ip
->i_contents
));
776 /* check for valid filetype */
777 type
= ip
->i_mode
& IFMT
;
778 if ((type
!= IFREG
) && (type
!= IFDIR
) && (type
!= IFATTRDIR
) &&
779 (type
!= IFLNK
) && (type
!= IFSHAD
)) {
784 * the actual limit of UFS file size
787 if (limit
== RLIM64_INFINITY
|| limit
> MAXOFFSET_T
)
790 if (uio
->uio_loffset
>= limit
) {
791 proc_t
*p
= ttoproc(curthread
);
793 mutex_enter(&p
->p_lock
);
794 (void) rctl_action(rctlproc_legacy
[RLIMIT_FSIZE
], p
->p_rctls
,
795 p
, RCA_UNSAFE_SIGINFO
);
796 mutex_exit(&p
->p_lock
);
801 * if largefiles are disallowed, the limit is
802 * the pre-largefiles value of 2GB
804 if (ufsvfsp
->vfs_lfflags
& UFS_LARGEFILES
)
805 limit
= MIN(UFS_MAXOFFSET_T
, limit
);
807 limit
= MIN(MAXOFF32_T
, limit
);
809 if (uio
->uio_loffset
< 0) {
812 if (uio
->uio_resid
== 0) {
816 if (uio
->uio_loffset
>= limit
)
819 ip
->i_flag
|= INOACC
; /* don't update ref time in getpage */
821 if (ioflag
& (FSYNC
|FDSYNC
)) {
828 if (ip
->i_flag
& IDIRECTIO
|| ufsvfsp
->vfs_forcedirectio
) {
829 uio
->uio_llimit
= limit
;
830 error
= ufs_directio_write(ip
, uio
, ioflag
, 0, cr
,
833 * If ufs_directio wrote to the file or set the flags,
834 * we need to update i_seq, but it may be deferred.
836 if (start_resid
!= uio
->uio_resid
||
837 (ip
->i_flag
& (ICHG
|IUPD
))) {
841 if (directio_status
== DIRECTIO_SUCCESS
)
846 * Behavior with respect to dropping/reacquiring vfs_dqrwlock:
848 * o shadow inodes: vfs_dqrwlock is not held at all
849 * o quota updates: vfs_dqrwlock is read or write held
850 * o other updates: vfs_dqrwlock is read held
852 * The first case is the only one where we do not hold
853 * vfs_dqrwlock at all while entering wrip().
854 * We must make sure not to downgrade/drop vfs_dqrwlock if we
855 * have it as writer, i.e. if we are updating the quota inode.
856 * There is no potential deadlock scenario in this case as
857 * ufs_getpage() takes care of this and avoids reacquiring
858 * vfs_dqrwlock in that case.
860 * This check is done here since the above conditions do not change
861 * and we possibly loop below, so save a few cycles.
863 if ((type
== IFSHAD
) ||
864 (rw_owner(&ufsvfsp
->vfs_dqrwlock
) == curthread
)) {
871 * Large Files: We cast MAXBMASK to offset_t
872 * inorder to mask out the higher bits. Since offset_t
873 * is a signed value, the high order bit set in MAXBMASK
874 * value makes it do the right thing by having all bits 1
875 * in the higher word. May be removed for _SOLARIS64_.
880 uoff_t uoff
= uio
->uio_loffset
;
881 off
= uoff
& (offset_t
)MAXBMASK
;
882 mapon
= (int)(uoff
& (offset_t
)MAXBOFFSET
);
883 on
= (int)blkoff(fs
, uoff
);
884 n
= (int)MIN(fs
->fs_bsize
- on
, uio
->uio_resid
);
887 if (type
== IFREG
&& uoff
+ n
>= limit
) {
893 * since uoff + n >= limit,
894 * therefore n >= limit - uoff, and n is an int
895 * so it is safe to cast it to an int
897 n
= (int)(limit
- (rlim64_t
)uoff
);
899 if (uoff
+ n
> ip
->i_size
) {
901 * We are extending the length of the file.
902 * bmap is used so that we are sure that
903 * if we need to allocate new blocks, that it
904 * is done here before we up the file size.
906 error
= bmap_write(ip
, uoff
, (int)(on
+ n
),
907 mapon
== 0, NULL
, cr
);
909 * bmap_write never drops i_contents so if
910 * the flags are set it changed the file.
912 if (ip
->i_flag
& (ICHG
|IUPD
)) {
919 * There is a window of vulnerability here.
920 * The sequence of operations: allocate file
921 * system blocks, uiomove the data into pages,
922 * and then update the size of the file in the
923 * inode, must happen atomically. However, due
924 * to current locking constraints, this can not
927 ASSERT(ip
->i_writer
== NULL
);
928 ip
->i_writer
= curthread
;
931 * If we are writing from the beginning of
932 * the mapping, we can just create the
933 * pages without having to read them.
935 pagecreate
= (mapon
== 0);
936 } else if (n
== MAXBSIZE
) {
938 * Going to do a whole mappings worth,
939 * so we can just create the pages w/o
940 * having to read them in. But before
941 * we do that, we need to make sure any
942 * needed blocks are allocated first.
944 iblocks
= ip
->i_blocks
;
945 error
= bmap_write(ip
, uoff
, (int)(on
+ n
),
946 BI_ALLOC_ONLY
, NULL
, cr
);
948 * bmap_write never drops i_contents so if
949 * the flags are set it changed the file.
951 if (ip
->i_flag
& (ICHG
|IUPD
)) {
959 * check if the new created page needed the
960 * allocation of new disk blocks.
962 if (iblocks
== ip
->i_blocks
)
963 new_iblocks
= 0; /* no new blocks allocated */
967 * In sync mode flush the indirect blocks which
968 * may have been allocated and not written on
969 * disk. In above cases bmap_write will allocate
972 if (ioflag
& (FSYNC
|FDSYNC
)) {
973 error
= ufs_indirblk_sync(ip
, uoff
);
980 * At this point we can enter ufs_getpage() in one
982 * 1) segmap_getmapflt() calls ufs_getpage() when the
983 * forcefault parameter is true (pagecreate == 0)
984 * 2) uiomove() causes a page fault.
986 * We have to drop the contents lock to prevent the VM
987 * system from trying to reacquire it in ufs_getpage()
988 * should the uiomove cause a pagefault.
990 * We have to drop the reader vfs_dqrwlock here as well.
992 rw_exit(&ip
->i_contents
);
994 ASSERT(RW_LOCK_HELD(&ufsvfsp
->vfs_dqrwlock
));
995 ASSERT(!(RW_WRITE_HELD(&ufsvfsp
->vfs_dqrwlock
)));
996 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
1000 premove_resid
= uio
->uio_resid
;
1003 * Touch the page and fault it in if it is not in core
1004 * before segmap_getmapflt or vpm_data_copy can lock it.
1005 * This is to avoid the deadlock if the buffer is mapped
1006 * to the same file through mmap which we want to write.
1008 uio_prefaultpages((long)n
, uio
);
1012 * Copy data. If new pages are created, part of
1013 * the page that is not written will be initizliazed
1016 error
= vpm_data_copy(vp
, (off
+ mapon
), (uint_t
)n
,
1017 uio
, !pagecreate
, &newpage
, 0, S_WRITE
);
1020 base
= segmap_getmapflt(segkmap
, vp
, (off
+ mapon
),
1021 (uint_t
)n
, !pagecreate
, S_WRITE
);
1024 * segmap_pagecreate() returns 1 if it calls
1025 * page_create_va() to allocate any pages.
1029 newpage
= segmap_pagecreate(segkmap
, base
,
1032 error
= uiomove(base
+ mapon
, (long)n
, UIO_WRITE
, uio
);
1036 * If "newpage" is set, then a new page was created and it
1037 * does not contain valid data, so it needs to be initialized
1039 * Otherwise the page contains old data, which was overwritten
1040 * partially or as a whole in uiomove.
1041 * If there is only one iovec structure within uio, then
1042 * on error uiomove will not be able to update uio->uio_loffset
1043 * and we would zero the whole page here!
1045 * If uiomove fails because of an error, the old valid data
1046 * is kept instead of filling the rest of the page with zero's.
1048 if (!vpm_enable
&& newpage
&&
1049 uio
->uio_loffset
< roundup(off
+ mapon
+ n
, PAGESIZE
)) {
1051 * We created pages w/o initializing them completely,
1052 * thus we need to zero the part that wasn't set up.
1053 * This happens on most EOF write cases and if
1054 * we had some sort of error during the uiomove.
1058 nmoved
= (int)(uio
->uio_loffset
- (off
+ mapon
));
1059 ASSERT(nmoved
>= 0 && nmoved
<= n
);
1060 nzero
= roundup(on
+ n
, PAGESIZE
) - nmoved
;
1061 ASSERT(nzero
> 0 && mapon
+ nmoved
+ nzero
<= MAXBSIZE
);
1062 (void) kzero(base
+ mapon
+ nmoved
, (uint_t
)nzero
);
1066 * Unlock the pages allocated by page_create_va()
1067 * in segmap_pagecreate()
1069 if (!vpm_enable
&& newpage
)
1070 segmap_pageunlock(segkmap
, base
, (size_t)n
, S_WRITE
);
1073 * If the size of the file changed, then update the
1074 * size field in the inode now. This can't be done
1075 * before the call to segmap_pageunlock or there is
1076 * a potential deadlock with callers to ufs_putpage().
1077 * They will be holding i_contents and trying to lock
1078 * a page, while this thread is holding a page locked
1079 * and trying to acquire i_contents.
1081 if (i_size_changed
) {
1082 rw_enter(&ip
->i_contents
, RW_WRITER
);
1083 old_i_size
= ip
->i_size
;
1084 UFS_SET_ISIZE(uoff
+ n
, ip
);
1085 TRANS_INODE(ufsvfsp
, ip
);
1087 * file has grown larger than 2GB. Set flag
1088 * in superblock to indicate this, if it
1089 * is not already set.
1091 if ((ip
->i_size
> MAXOFF32_T
) &&
1092 !(fs
->fs_flags
& FSLARGEFILES
)) {
1093 ASSERT(ufsvfsp
->vfs_lfflags
& UFS_LARGEFILES
);
1094 mutex_enter(&ufsvfsp
->vfs_lock
);
1095 fs
->fs_flags
|= FSLARGEFILES
;
1096 ufs_sbwrite(ufsvfsp
);
1097 mutex_exit(&ufsvfsp
->vfs_lock
);
1099 mutex_enter(&ip
->i_tlock
);
1100 ip
->i_writer
= NULL
;
1101 cv_broadcast(&ip
->i_wrcv
);
1102 mutex_exit(&ip
->i_tlock
);
1103 rw_exit(&ip
->i_contents
);
1108 * If we failed on a write, we may have already
1109 * allocated file blocks as well as pages. It's
1110 * hard to undo the block allocation, but we must
1111 * be sure to invalidate any pages that may have
1114 * If the page was created without initialization
1115 * then we must check if it should be possible
1116 * to destroy the new page and to keep the old data
1119 * It is possible to destroy the page without
1120 * having to write back its contents only when
1121 * - the size of the file keeps unchanged
1122 * - bmap_write() did not allocate new disk blocks
1123 * it is possible to create big files using "seek" and
1124 * write to the end of the file. A "write" to a
1125 * position before the end of the file would not
1126 * change the size of the file but it would allocate
1128 * - uiomove intended to overwrite the whole page.
1129 * - a new page was created (newpage == 1).
1132 if (i_size_changed
== 0 && new_iblocks
== 0 &&
1135 /* unwind what uiomove eventually last did */
1136 uio
->uio_resid
= premove_resid
;
1139 * destroy the page, do not write ambiguous
1145 * write the page back to the disk, if dirty,
1146 * and remove the page from the cache.
1155 (void) vpm_sync_pages(vp
, off
, n
, flags
);
1157 (void) segmap_release(segkmap
, base
, flags
);
1162 * Force write back for synchronous write cases.
1164 if ((ioflag
& (FSYNC
|FDSYNC
)) || type
== IFDIR
) {
1166 * If the sticky bit is set but the
1167 * execute bit is not set, we do a
1168 * synchronous write back and free
1169 * the page when done. We set up swap
1170 * files to be handled this way to
1171 * prevent servers from keeping around
1172 * the client's swap pages too long.
1173 * XXX - there ought to be a better way.
1175 if (IS_SWAPVP(vp
)) {
1176 flags
= SM_WRITE
| SM_FREE
|
1182 } else if (n
+ on
== MAXBSIZE
|| IS_SWAPVP(vp
)) {
1184 * Have written a whole block.
1185 * Start an asynchronous write and
1186 * mark the buffer to indicate that
1187 * it won't be needed again soon.
1189 flags
= SM_WRITE
| SM_ASYNC
| SM_DONTNEED
;
1195 error
= vpm_sync_pages(vp
, off
, n
, flags
);
1197 error
= segmap_release(segkmap
, base
, flags
);
1200 * If the operation failed and is synchronous,
1201 * then we need to unwind what uiomove() last
1202 * did so we can potentially return an error to
1203 * the caller. If this write operation was
1204 * done in two pieces and the first succeeded,
1205 * then we won't return an error for the second
1206 * piece that failed. However, we only want to
1207 * return a resid value that reflects what was
1210 * Failures for non-synchronous operations can
1211 * be ignored since the page subsystem will
1212 * retry the operation until it succeeds or the
1213 * file system is unmounted.
1216 if ((ioflag
& (FSYNC
| FDSYNC
)) ||
1218 uio
->uio_resid
= premove_resid
;
1226 * Re-acquire contents lock.
1227 * If it was dropped, reacquire reader vfs_dqrwlock as well.
1230 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
1231 rw_enter(&ip
->i_contents
, RW_WRITER
);
1234 * If the uiomove() failed or if a synchronous
1235 * page push failed, fix up i_size.
1238 if (i_size_changed
) {
1240 * The uiomove failed, and we
1241 * allocated blocks,so get rid
1244 (void) ufs_itrunc(ip
, old_i_size
, 0, cr
);
1248 * XXX - Can this be out of the loop?
1250 ip
->i_flag
|= IUPD
| ICHG
;
1252 * Only do one increase of i_seq for multiple
1253 * pieces. Because we drop locks, record
1254 * the fact that we changed the timestamp and
1255 * are deferring the increase in case another thread
1256 * pushes our timestamp update.
1261 ip
->i_flag
|= IATTCHG
;
1262 if ((ip
->i_mode
& (IEXEC
| (IEXEC
>> 3) |
1263 (IEXEC
>> 6))) != 0 &&
1264 (ip
->i_mode
& (ISUID
| ISGID
)) != 0 &&
1265 secpolicy_vnode_setid_retain(cr
,
1266 (ip
->i_mode
& ISUID
) != 0 && ip
->i_uid
== 0) != 0) {
1268 * Clear Set-UID & Set-GID bits on
1269 * successful write if not privileged
1270 * and at least one of the execute bits
1271 * is set. If we always clear Set-GID,
1272 * mandatory file and record locking is
1275 ip
->i_mode
&= ~(ISUID
| ISGID
);
1279 * In the case the FDSYNC flag is set and this is a
1280 * "rewrite" we won't log a delta.
1281 * The FSYNC flag overrides all cases.
1283 if (!ufs_check_rewrite(ip
, uio
, ioflag
) || !(ioflag
& FDSYNC
)) {
1284 TRANS_INODE(ufsvfsp
, ip
);
1286 } while (error
== 0 && uio
->uio_resid
> 0 && n
!= 0);
1290 * Make sure i_seq is increased at least once per write
1294 ip
->i_flag
&= ~ISEQ
; /* no longer deferred */
1298 * Inode is updated according to this table -
1300 * FSYNC FDSYNC(posix.4)
1301 * --------------------------
1302 * always@ IATTCHG|IBDWRITE
1304 * @ - If we are doing synchronous write the only time we should
1305 * not be sync'ing the ip here is if we have the stickyhack
1306 * activated, the file is marked with the sticky bit and
1307 * no exec bit, the file length has not been changed and
1308 * no new blocks have been allocated during this write.
1311 if ((ip
->i_flag
& ISYNC
) != 0) {
1313 * we have eliminated nosync
1315 if ((ip
->i_flag
& (IATTCHG
|IBDWRITE
)) ||
1316 ((ioflag
& FSYNC
) && iupdat_flag
)) {
1322 * If we've already done a partial-write, terminate
1323 * the write but return no error unless the error is ENOSPC
1324 * because the caller can detect this and free resources and
1327 if ((start_resid
!= uio
->uio_resid
) && (error
!= ENOSPC
))
1330 ip
->i_flag
&= ~(INOACC
| ISYNC
);
1336 * rdip does the real work of read requests for ufs.
1339 rdip(struct inode
*ip
, struct uio
*uio
, int ioflag
, cred_t
*cr
)
1344 struct ufsvfs
*ufsvfsp
;
1346 long oresid
= uio
->uio_resid
;
1347 uoff_t n
, on
, mapon
;
1351 int dofree
, directio_status
;
1358 ASSERT(RW_LOCK_HELD(&ip
->i_contents
));
1360 ufsvfsp
= ip
->i_ufsvfs
;
1362 if (ufsvfsp
== NULL
)
1365 fs
= ufsvfsp
->vfs_fs
;
1367 /* check for valid filetype */
1368 type
= ip
->i_mode
& IFMT
;
1369 if ((type
!= IFREG
) && (type
!= IFDIR
) && (type
!= IFATTRDIR
) &&
1370 (type
!= IFLNK
) && (type
!= IFSHAD
)) {
1374 if (uio
->uio_loffset
> UFS_MAXOFFSET_T
) {
1378 if (uio
->uio_loffset
< 0) {
1381 if (uio
->uio_resid
== 0) {
1385 if (!ULOCKFS_IS_NOIACC(ITOUL(ip
)) && (fs
->fs_ronly
== 0) &&
1386 (!ufsvfsp
->vfs_noatime
)) {
1387 mutex_enter(&ip
->i_tlock
);
1389 mutex_exit(&ip
->i_tlock
);
1394 if (ip
->i_flag
& IDIRECTIO
|| ufsvfsp
->vfs_forcedirectio
) {
1395 error
= ufs_directio_read(ip
, uio
, cr
, &directio_status
);
1396 if (directio_status
== DIRECTIO_SUCCESS
)
1400 rwtype
= (rw_write_held(&ip
->i_contents
)?RW_WRITER
:RW_READER
);
1404 uoff_t uoff
= uio
->uio_loffset
;
1405 off
= uoff
& (offset_t
)MAXBMASK
;
1406 mapon
= (uoff_t
)(uoff
& (offset_t
)MAXBOFFSET
);
1407 on
= (uoff_t
)blkoff(fs
, uoff
);
1408 n
= MIN((uoff_t
)fs
->fs_bsize
- on
,
1409 (uoff_t
)uio
->uio_resid
);
1411 diff
= ip
->i_size
- uoff
;
1417 if (diff
< (offset_t
)n
)
1421 * We update smallfile2 and smallfile1 at most every second.
1423 now
= ddi_get_lbolt();
1424 if (now
>= smallfile_update
) {
1425 uint64_t percpufreeb
;
1426 if (smallfile1_d
== 0) smallfile1_d
= SMALLFILE1_D
;
1427 if (smallfile2_d
== 0) smallfile2_d
= SMALLFILE2_D
;
1428 percpufreeb
= ptob((uint64_t)freemem
) / ncpus_online
;
1429 smallfile1
= percpufreeb
/ smallfile1_d
;
1430 smallfile2
= percpufreeb
/ smallfile2_d
;
1431 smallfile1
= MAX(smallfile1
, smallfile
);
1432 smallfile1
= MAX(smallfile1
, smallfile64
);
1433 smallfile2
= MAX(smallfile1
, smallfile2
);
1434 smallfile_update
= now
+ hz
;
1437 dofree
= freebehind
&&
1438 ip
->i_nextr
== (off
& PAGEMASK
) && off
> smallfile1
;
1441 * At this point we can enter ufs_getpage() in one of two
1443 * 1) segmap_getmapflt() calls ufs_getpage() when the
1444 * forcefault parameter is true (value of 1 is passed)
1445 * 2) uiomove() causes a page fault.
1447 * We cannot hold onto an i_contents reader lock without
1448 * risking deadlock in ufs_getpage() so drop a reader lock.
1449 * The ufs_getpage() dolock logic already allows for a
1450 * thread holding i_contents as writer to work properly
1451 * so we keep a writer lock.
1453 if (rwtype
== RW_READER
)
1454 rw_exit(&ip
->i_contents
);
1460 error
= vpm_data_copy(vp
, (off
+ mapon
), (uint_t
)n
,
1461 uio
, 1, NULL
, 0, S_READ
);
1463 base
= segmap_getmapflt(segkmap
, vp
, (off
+ mapon
),
1464 (uint_t
)n
, 1, S_READ
);
1465 error
= uiomove(base
+ mapon
, (long)n
, UIO_READ
, uio
);
1471 * If reading sequential we won't need this
1472 * buffer again soon. For offsets in range
1473 * [smallfile1, smallfile2] release the pages
1474 * at the tail of the cache list, larger
1475 * offsets are released at the head.
1478 flags
= SM_FREE
| SM_ASYNC
;
1479 if ((cache_read_ahead
== 0) &&
1481 flags
|= SM_DONTNEED
;
1484 * In POSIX SYNC (FSYNC and FDSYNC) read mode,
1485 * we want to make sure that the page which has
1486 * been read, is written on disk if it is dirty.
1487 * And corresponding indirect blocks should also
1490 if ((ioflag
& FRSYNC
) && (ioflag
& (FSYNC
|FDSYNC
))) {
1495 error
= vpm_sync_pages(vp
, off
, n
, flags
);
1497 error
= segmap_release(segkmap
, base
, flags
);
1501 (void) vpm_sync_pages(vp
, off
, n
, flags
);
1503 (void) segmap_release(segkmap
, base
, flags
);
1507 if (rwtype
== RW_READER
)
1508 rw_enter(&ip
->i_contents
, rwtype
);
1509 } while (error
== 0 && uio
->uio_resid
> 0 && n
!= 0);
1512 * Inode is updated according to this table if FRSYNC is set.
1514 * FSYNC FDSYNC(posix.4)
1515 * --------------------------
1516 * always IATTCHG|IBDWRITE
1519 * The inode is not updated if we're logging and the inode is a
1520 * directory with FRSYNC, FSYNC and FDSYNC flags set.
1522 if (ioflag
& FRSYNC
) {
1523 if (TRANS_ISTRANS(ufsvfsp
) && ((ip
->i_mode
& IFMT
) == IFDIR
)) {
1527 if ((ioflag
& FSYNC
) ||
1528 ((ioflag
& FDSYNC
) &&
1529 (ip
->i_flag
& (IATTCHG
|IBDWRITE
)))) {
1535 * If we've already done a partial read, terminate
1536 * the read but return no error.
1538 if (oresid
!= uio
->uio_resid
)
1554 caller_context_t
*ct
)
1556 struct lockfs lockfs
, lockfs_out
;
1557 struct ufsvfs
*ufsvfsp
= VTOI(vp
)->i_ufsvfs
;
1558 char *comment
, *original_comment
;
1560 struct ulockfs
*ulp
;
1569 * forcibly unmounted
1571 if (ufsvfsp
== NULL
|| vp
->v_vfsp
== NULL
||
1572 vp
->v_vfsp
->vfs_flag
& VFS_UNMOUNTED
)
1574 fs
= ufsvfsp
->vfs_fs
;
1576 if (cmd
== Q_QUOTACTL
) {
1577 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_QUOTA_MASK
);
1582 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_QUOTA
,
1583 TOP_SETQUOTA_SIZE(fs
));
1586 error
= quotactl(vp
, arg
, flag
, cr
);
1589 TRANS_END_ASYNC(ufsvfsp
, TOP_QUOTA
,
1590 TOP_SETQUOTA_SIZE(fs
));
1591 ufs_lockfs_end(ulp
);
1599 * file system locking
1601 if (secpolicy_fs_config(cr
, ufsvfsp
->vfs_vfs
) != 0)
1604 if ((flag
& DATAMODEL_MASK
) == DATAMODEL_NATIVE
) {
1605 if (copyin((caddr_t
)arg
, &lockfs
,
1606 sizeof (struct lockfs
)))
1609 #ifdef _SYSCALL32_IMPL
1611 struct lockfs32 lockfs32
;
1612 /* Translate ILP32 lockfs to LP64 lockfs */
1613 if (copyin((caddr_t
)arg
, &lockfs32
,
1614 sizeof (struct lockfs32
)))
1616 lockfs
.lf_lock
= (ulong_t
)lockfs32
.lf_lock
;
1617 lockfs
.lf_flags
= (ulong_t
)lockfs32
.lf_flags
;
1618 lockfs
.lf_key
= (ulong_t
)lockfs32
.lf_key
;
1619 lockfs
.lf_comlen
= (ulong_t
)lockfs32
.lf_comlen
;
1621 (caddr_t
)(uintptr_t)lockfs32
.lf_comment
;
1623 #endif /* _SYSCALL32_IMPL */
1625 if (lockfs
.lf_comlen
) {
1626 if (lockfs
.lf_comlen
> LOCKFS_MAXCOMMENTLEN
)
1627 return (ENAMETOOLONG
);
1629 kmem_alloc(lockfs
.lf_comlen
, KM_SLEEP
);
1630 if (copyin(lockfs
.lf_comment
, comment
,
1631 lockfs
.lf_comlen
)) {
1632 kmem_free(comment
, lockfs
.lf_comlen
);
1635 original_comment
= lockfs
.lf_comment
;
1636 lockfs
.lf_comment
= comment
;
1638 if ((error
= ufs_fiolfs(vp
, &lockfs
, 0)) == 0) {
1639 lockfs
.lf_comment
= original_comment
;
1641 if ((flag
& DATAMODEL_MASK
) ==
1643 (void) copyout(&lockfs
, (caddr_t
)arg
,
1644 sizeof (struct lockfs
));
1646 #ifdef _SYSCALL32_IMPL
1648 struct lockfs32 lockfs32
;
1649 /* Translate LP64 to ILP32 lockfs */
1651 (uint32_t)lockfs
.lf_lock
;
1653 (uint32_t)lockfs
.lf_flags
;
1655 (uint32_t)lockfs
.lf_key
;
1656 lockfs32
.lf_comlen
=
1657 (uint32_t)lockfs
.lf_comlen
;
1658 lockfs32
.lf_comment
=
1659 (uint32_t)(uintptr_t)
1661 (void) copyout(&lockfs32
, (caddr_t
)arg
,
1662 sizeof (struct lockfs32
));
1664 #endif /* _SYSCALL32_IMPL */
1667 if (lockfs
.lf_comlen
)
1668 kmem_free(comment
, lockfs
.lf_comlen
);
1674 * get file system locking status
1677 if ((flag
& DATAMODEL_MASK
) == DATAMODEL_NATIVE
) {
1678 if (copyin((caddr_t
)arg
, &lockfs
,
1679 sizeof (struct lockfs
)))
1682 #ifdef _SYSCALL32_IMPL
1684 struct lockfs32 lockfs32
;
1685 /* Translate ILP32 lockfs to LP64 lockfs */
1686 if (copyin((caddr_t
)arg
, &lockfs32
,
1687 sizeof (struct lockfs32
)))
1689 lockfs
.lf_lock
= (ulong_t
)lockfs32
.lf_lock
;
1690 lockfs
.lf_flags
= (ulong_t
)lockfs32
.lf_flags
;
1691 lockfs
.lf_key
= (ulong_t
)lockfs32
.lf_key
;
1692 lockfs
.lf_comlen
= (ulong_t
)lockfs32
.lf_comlen
;
1694 (caddr_t
)(uintptr_t)lockfs32
.lf_comment
;
1696 #endif /* _SYSCALL32_IMPL */
1698 if (error
= ufs_fiolfss(vp
, &lockfs_out
))
1700 lockfs
.lf_lock
= lockfs_out
.lf_lock
;
1701 lockfs
.lf_key
= lockfs_out
.lf_key
;
1702 lockfs
.lf_flags
= lockfs_out
.lf_flags
;
1703 lockfs
.lf_comlen
= MIN(lockfs
.lf_comlen
,
1704 lockfs_out
.lf_comlen
);
1706 if ((flag
& DATAMODEL_MASK
) == DATAMODEL_NATIVE
) {
1707 if (copyout(&lockfs
, (caddr_t
)arg
,
1708 sizeof (struct lockfs
)))
1711 #ifdef _SYSCALL32_IMPL
1713 /* Translate LP64 to ILP32 lockfs */
1714 struct lockfs32 lockfs32
;
1715 lockfs32
.lf_lock
= (uint32_t)lockfs
.lf_lock
;
1716 lockfs32
.lf_flags
= (uint32_t)lockfs
.lf_flags
;
1717 lockfs32
.lf_key
= (uint32_t)lockfs
.lf_key
;
1718 lockfs32
.lf_comlen
= (uint32_t)lockfs
.lf_comlen
;
1719 lockfs32
.lf_comment
=
1720 (uint32_t)(uintptr_t)lockfs
.lf_comment
;
1721 if (copyout(&lockfs32
, (caddr_t
)arg
,
1722 sizeof (struct lockfs32
)))
1725 #endif /* _SYSCALL32_IMPL */
1727 if (lockfs
.lf_comlen
&&
1728 lockfs
.lf_comment
&& lockfs_out
.lf_comment
)
1729 if (copyout(lockfs_out
.lf_comment
,
1730 lockfs
.lf_comment
, lockfs
.lf_comlen
))
1740 * if mounted w/o atime, return quietly.
1741 * I briefly thought about returning ENOSYS, but
1742 * figured that most apps would consider this fatal
1743 * but the idea is to make this as seamless as poss.
1745 if (ufsvfsp
->vfs_noatime
)
1748 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
1749 ULOCKFS_SETATTR_MASK
);
1754 trans_size
= (int)TOP_SETATTR_SIZE(VTOI(vp
));
1755 TRANS_BEGIN_CSYNC(ufsvfsp
, &issync
,
1756 TOP_SETATTR
, trans_size
);
1759 error
= ufs_fiosatime(vp
, (struct timeval
*)arg
,
1763 TRANS_END_CSYNC(ufsvfsp
, &error
, issync
,
1764 TOP_SETATTR
, trans_size
);
1765 ufs_lockfs_end(ulp
);
1773 return (ufs_fiosdio(vp
, (uint_t
*)arg
, flag
, cr
));
1779 return (ufs_fiogdio(vp
, (uint_t
*)arg
, flag
, cr
));
1785 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
1790 error
= ufs_fioio(vp
, (struct fioio
*)arg
, flag
, cr
);
1793 ufs_lockfs_end(ulp
);
1799 * file system flush (push w/invalidate)
1801 if ((caddr_t
)arg
!= NULL
)
1803 return (ufs_fioffs(vp
, NULL
, cr
));
1807 * Contract-private interface for Legato
1808 * Purge this vnode from the DNLC and decide
1809 * if this vnode is busy (*arg == 1) or not
1812 if (secpolicy_fs_config(cr
, ufsvfsp
->vfs_vfs
) != 0)
1814 error
= ufs_fioisbusy(vp
, (int *)arg
, cr
);
1818 return (ufs_fiodirectio(vp
, (int)arg
, cr
));
1822 * Tune the file system (aka setting fs attributes)
1824 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
1825 ULOCKFS_SETATTR_MASK
);
1829 error
= ufs_fiotune(vp
, (struct fiotune
*)arg
, cr
);
1832 ufs_lockfs_end(ulp
);
1836 if (secpolicy_fs_config(cr
, ufsvfsp
->vfs_vfs
) != 0)
1838 return (ufs_fiologenable(vp
, (void *)arg
, cr
, flag
));
1840 case _FIOLOGDISABLE
:
1841 if (secpolicy_fs_config(cr
, ufsvfsp
->vfs_vfs
) != 0)
1843 return (ufs_fiologdisable(vp
, (void *)arg
, cr
, flag
));
1846 return (ufs_fioislog(vp
, (void *)arg
, cr
, flag
));
1848 case _FIOSNAPSHOTCREATE_MULTI
:
1850 struct fiosnapcreate_multi fc
, *fcp
;
1853 if (copyin((void *)arg
, &fc
, sizeof (fc
)))
1855 if (fc
.backfilecount
> MAX_BACKFILE_COUNT
)
1857 fcm_size
= sizeof (struct fiosnapcreate_multi
) +
1858 (fc
.backfilecount
- 1) * sizeof (int);
1859 fcp
= (struct fiosnapcreate_multi
*)
1860 kmem_alloc(fcm_size
, KM_SLEEP
);
1861 if (copyin((void *)arg
, fcp
, fcm_size
)) {
1862 kmem_free(fcp
, fcm_size
);
1865 error
= ufs_snap_create(vp
, fcp
, cr
);
1867 * Do copyout even if there is an error because
1868 * the details of error is stored in fcp.
1870 if (copyout(fcp
, (void *)arg
, fcm_size
))
1872 kmem_free(fcp
, fcm_size
);
1876 case _FIOSNAPSHOTDELETE
:
1878 struct fiosnapdelete fc
;
1880 if (copyin((void *)arg
, &fc
, sizeof (fc
)))
1882 error
= ufs_snap_delete(vp
, &fc
, cr
);
1883 if (!error
&& copyout(&fc
, (void *)arg
, sizeof (fc
)))
1888 case _FIOGETSUPERBLOCK
:
1889 if (copyout(fs
, (void *)arg
, SBSIZE
))
1893 case _FIOGETMAXPHYS
:
1894 if (copyout(&maxphys
, (void *)arg
, sizeof (maxphys
)))
1899 * The following 3 ioctls are for TSufs support
1900 * although could potentially be used elsewhere
1902 case _FIO_SET_LUFS_DEBUG
:
1903 if (secpolicy_fs_config(cr
, ufsvfsp
->vfs_vfs
) != 0)
1905 lufs_debug
= (uint32_t)arg
;
1908 case _FIO_SET_LUFS_ERROR
:
1909 if (secpolicy_fs_config(cr
, ufsvfsp
->vfs_vfs
) != 0)
1911 TRANS_SETERROR(ufsvfsp
);
1914 case _FIO_GET_TOP_STATS
:
1916 fio_lufs_stats_t
*ls
;
1917 ml_unit_t
*ul
= ufsvfsp
->vfs_log
;
1919 ls
= kmem_zalloc(sizeof (*ls
), KM_SLEEP
);
1920 ls
->ls_debug
= ul
->un_debug
; /* return debug value */
1921 /* Copy stucture if statistics are being kept */
1922 if (ul
->un_logmap
->mtm_tops
) {
1923 ls
->ls_topstats
= *(ul
->un_logmap
->mtm_tops
);
1926 if (copyout(ls
, (void *)arg
, sizeof (*ls
)))
1928 kmem_free(ls
, sizeof (*ls
));
1932 case _FIO_SEEK_DATA
:
1933 case _FIO_SEEK_HOLE
:
1934 if (ddi_copyin((void *)arg
, &off
, sizeof (off
), flag
))
1936 /* offset paramater is in/out */
1937 error
= ufs_fio_holey(vp
, cmd
, &off
);
1940 if (ddi_copyout(&off
, (void *)arg
, sizeof (off
), flag
))
1944 case _FIO_COMPRESSED
:
1947 * This is a project private ufs ioctl() to mark
1948 * the inode as that belonging to a compressed
1949 * file. This is used to mark individual
1950 * compressed files in a miniroot archive.
1951 * The files compressed in this manner are
1952 * automatically decompressed by the dcfs filesystem
1953 * (via an interception in ufs_lookup - see decompvp())
1954 * which is layered on top of ufs on a system running
1955 * from the archive. See uts/common/fs/dcfs for details.
1956 * This ioctl only marks the file as compressed - the
1957 * actual compression is done by fiocompress (a
1958 * userland utility) which invokes this ioctl().
1960 struct inode
*ip
= VTOI(vp
);
1962 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
1963 ULOCKFS_SETATTR_MASK
);
1968 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_IUPDAT
,
1969 TOP_IUPDAT_SIZE(ip
));
1972 error
= ufs_mark_compressed(vp
);
1975 TRANS_END_ASYNC(ufsvfsp
, TOP_IUPDAT
,
1976 TOP_IUPDAT_SIZE(ip
));
1977 ufs_lockfs_end(ulp
);
1992 ufs_getattr(struct vnode
*vp
, struct vattr
*vap
, int flags
,
1993 struct cred
*cr
, caller_context_t
*ct
)
1995 struct inode
*ip
= VTOI(vp
);
1996 struct ufsvfs
*ufsvfsp
;
1999 if (vap
->va_mask
== VATTR_SIZE
) {
2001 * for performance, if only the size is requested don't bother
2002 * with anything else.
2004 UFS_GET_ISIZE(&vap
->va_size
, ip
);
2009 * inlined lockfs checks
2011 ufsvfsp
= ip
->i_ufsvfs
;
2012 if ((ufsvfsp
== NULL
) || ULOCKFS_IS_HLOCK(&ufsvfsp
->vfs_ulockfs
)) {
2017 rw_enter(&ip
->i_contents
, RW_READER
);
2019 * Return all the attributes. This should be refined so
2020 * that it only returns what's asked for.
2024 * Copy from inode table.
2026 vap
->va_type
= vp
->v_type
;
2027 vap
->va_mode
= ip
->i_mode
& MODEMASK
;
2029 * If there is an ACL and there is a mask entry, then do the
2030 * extra work that completes the equivalent of an acltomode(3)
2031 * call. According to POSIX P1003.1e, the acl mask should be
2032 * returned in the group permissions field.
2034 * - start with the original permission and mode bits (from above)
2035 * - clear the group owner bits
2036 * - add in the mask bits.
2038 if (ip
->i_ufs_acl
&& ip
->i_ufs_acl
->aclass
.acl_ismask
) {
2039 vap
->va_mode
&= ~((VREAD
| VWRITE
| VEXEC
) >> 3);
2041 (ip
->i_ufs_acl
->aclass
.acl_maskbits
& PERMMASK
) << 3;
2043 vap
->va_uid
= ip
->i_uid
;
2044 vap
->va_gid
= ip
->i_gid
;
2045 vap
->va_fsid
= ip
->i_dev
;
2046 vap
->va_nodeid
= (ino64_t
)ip
->i_number
;
2047 vap
->va_nlink
= ip
->i_nlink
;
2048 vap
->va_size
= ip
->i_size
;
2049 if (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
)
2050 vap
->va_rdev
= ip
->i_rdev
;
2052 vap
->va_rdev
= 0; /* not a b/c spec. */
2053 mutex_enter(&ip
->i_tlock
);
2054 ITIMES_NOLOCK(ip
); /* mark correct time in inode */
2055 vap
->va_seq
= ip
->i_seq
;
2056 vap
->va_atime
.tv_sec
= (time_t)ip
->i_atime
.tv_sec
;
2057 vap
->va_atime
.tv_nsec
= ip
->i_atime
.tv_usec
*1000;
2058 vap
->va_mtime
.tv_sec
= (time_t)ip
->i_mtime
.tv_sec
;
2059 vap
->va_mtime
.tv_nsec
= ip
->i_mtime
.tv_usec
*1000;
2060 vap
->va_ctime
.tv_sec
= (time_t)ip
->i_ctime
.tv_sec
;
2061 vap
->va_ctime
.tv_nsec
= ip
->i_ctime
.tv_usec
*1000;
2062 mutex_exit(&ip
->i_tlock
);
2064 switch (ip
->i_mode
& IFMT
) {
2067 vap
->va_blksize
= MAXBSIZE
; /* was BLKDEV_IOSIZE */
2071 vap
->va_blksize
= MAXBSIZE
;
2075 vap
->va_blksize
= ip
->i_fs
->fs_bsize
;
2078 vap
->va_nblocks
= (fsblkcnt64_t
)ip
->i_blocks
;
2079 rw_exit(&ip
->i_contents
);
2087 * Special wrapper to provide a callback for secpolicy_vnode_setattr().
2088 * The i_contents lock is already held by the caller and we need to
2089 * declare the inode as 'void *' argument.
2092 ufs_priv_access(void *vip
, int mode
, struct cred
*cr
)
2094 struct inode
*ip
= vip
;
2096 return (ufs_iaccess(ip
, mode
, cr
, 0));
2101 ufs_setattr(struct vnode
*vp
, struct vattr
*vap
, int flags
, struct cred
*cr
,
2102 caller_context_t
*ct
)
2104 struct inode
*ip
= VTOI(vp
);
2105 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
2107 struct ulockfs
*ulp
;
2111 long int mask
= vap
->va_mask
;
2126 * Cannot set these attributes.
2128 if ((mask
& VATTR_NOSET
) || (mask
& VATTR_XVATTR
))
2132 * check for forced unmount
2134 if (ufsvfsp
== NULL
)
2137 fs
= ufsvfsp
->vfs_fs
;
2138 if (fs
->fs_ronly
!= 0)
2148 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_SETATTR_MASK
);
2153 * Acquire i_rwlock before TRANS_BEGIN_CSYNC() if this is a file.
2154 * This follows the protocol for read()/write().
2156 if (vp
->v_type
!= VDIR
) {
2158 * ufs_tryirwlock uses rw_tryenter and checks for SLOCK to
2159 * avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
2160 * possible, retries the operation.
2162 indeadlock
= ufs_tryirwlock(ulp
, &ip
->i_rwlock
, RW_WRITER
);
2165 ufs_lockfs_end(ulp
);
2172 * Truncate file. Must have write permission and not be a directory.
2174 if (mask
& VATTR_SIZE
) {
2175 rw_enter(&ip
->i_contents
, RW_WRITER
);
2176 if (vp
->v_type
== VDIR
) {
2180 if (error
= ufs_iaccess(ip
, IWRITE
, cr
, 0))
2183 rw_exit(&ip
->i_contents
);
2184 error
= TRANS_ITRUNC(ip
, vap
->va_size
, 0, cr
);
2186 rw_enter(&ip
->i_contents
, RW_WRITER
);
2190 if (error
== 0 && vap
->va_size
)
2191 vnevent_truncate(vp
, ct
);
2195 trans_size
= (int)TOP_SETATTR_SIZE(ip
);
2196 TRANS_BEGIN_CSYNC(ufsvfsp
, &issync
, TOP_SETATTR
, trans_size
);
2201 * Acquire i_rwlock after TRANS_BEGIN_CSYNC() if this is a directory.
2202 * This follows the protocol established by
2203 * ufs_link/create/remove/rename/mkdir/rmdir/symlink.
2205 if (vp
->v_type
== VDIR
) {
2206 indeadlock
= ufs_tryirwlock_trans(ulp
, &ip
->i_rwlock
,
2207 RW_WRITER
, TOP_SETATTR
,
2208 ufsvfsp
, &error
, issync
,
2216 * Grab quota lock if we are changing the file's owner.
2218 if (mask
& VATTR_UID
) {
2219 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
2222 rw_enter(&ip
->i_contents
, RW_WRITER
);
2224 oldva
.va_mode
= ip
->i_mode
;
2225 oldva
.va_uid
= ip
->i_uid
;
2226 oldva
.va_gid
= ip
->i_gid
;
2228 vap
->va_mask
&= ~VATTR_SIZE
;
2230 error
= secpolicy_vnode_setattr(cr
, vp
, vap
, &oldva
, flags
,
2231 ufs_priv_access
, ip
);
2235 mask
= vap
->va_mask
;
2238 * Change file access modes.
2240 if (mask
& VATTR_MODE
) {
2241 ip
->i_mode
= (ip
->i_mode
& IFMT
) | (vap
->va_mode
& ~IFMT
);
2242 TRANS_INODE(ufsvfsp
, ip
);
2245 mutex_enter(&vp
->v_lock
);
2246 if ((ip
->i_mode
& (ISVTX
| IEXEC
| IFDIR
)) == ISVTX
)
2247 vp
->v_flag
|= VSWAPLIKE
;
2249 vp
->v_flag
&= ~VSWAPLIKE
;
2250 mutex_exit(&vp
->v_lock
);
2253 if (mask
& (VATTR_UID
|VATTR_GID
)) {
2254 if (mask
& VATTR_UID
) {
2256 * Don't change ownership of the quota inode.
2258 if (ufsvfsp
->vfs_qinod
== ip
) {
2259 ASSERT(ufsvfsp
->vfs_qflags
& MQ_ENABLED
);
2265 * No real ownership change.
2267 if (ip
->i_uid
== vap
->va_uid
) {
2272 * Remove the blocks and the file, from the old user's
2276 blocks
= ip
->i_blocks
;
2279 (void) chkdq(ip
, -blocks
, /* force */ 1, cr
,
2280 (char **)NULL
, NULL
);
2281 (void) chkiq(ufsvfsp
, /* change */ -1, ip
,
2282 (uid_t
)ip
->i_uid
, /* force */ 1, cr
,
2283 (char **)NULL
, NULL
);
2284 dqrele(ip
->i_dquot
);
2287 ip
->i_uid
= vap
->va_uid
;
2290 * There is a real ownership change.
2294 * Add the blocks and the file to the new
2297 ip
->i_dquot
= getinoquota(ip
);
2298 (void) chkdq(ip
, blocks
, /* force */ 1, cr
,
2300 (void) chkiq(ufsvfsp
, /* change */ 1,
2301 NULL
, (uid_t
)ip
->i_uid
,
2302 /* force */ 1, cr
, &errmsg2
, &len2
);
2305 if (mask
& VATTR_GID
) {
2306 ip
->i_gid
= vap
->va_gid
;
2308 TRANS_INODE(ufsvfsp
, ip
);
2312 * Change file access or modified times.
2314 if (mask
& (VATTR_ATIME
|VATTR_MTIME
)) {
2315 /* Check that the time value is within ufs range */
2316 if (((mask
& VATTR_ATIME
) && TIMESPEC_OVERFLOW(&vap
->va_atime
)) ||
2317 ((mask
& VATTR_MTIME
) && TIMESPEC_OVERFLOW(&vap
->va_mtime
))) {
2323 * if the "noaccess" mount option is set and only atime
2324 * update is requested, do nothing. No error is returned.
2326 if ((ufsvfsp
->vfs_noatime
) &&
2327 ((mask
& (VATTR_ATIME
|VATTR_MTIME
)) == VATTR_ATIME
))
2330 if (mask
& VATTR_ATIME
) {
2331 ip
->i_atime
.tv_sec
= vap
->va_atime
.tv_sec
;
2332 ip
->i_atime
.tv_usec
= vap
->va_atime
.tv_nsec
/ 1000;
2333 ip
->i_flag
&= ~IACC
;
2335 if (mask
& VATTR_MTIME
) {
2336 ip
->i_mtime
.tv_sec
= vap
->va_mtime
.tv_sec
;
2337 ip
->i_mtime
.tv_usec
= vap
->va_mtime
.tv_nsec
/ 1000;
2339 if (now
.tv_sec
> TIME32_MAX
) {
2341 * In 2038, ctime sticks forever..
2343 ip
->i_ctime
.tv_sec
= TIME32_MAX
;
2344 ip
->i_ctime
.tv_usec
= 0;
2346 ip
->i_ctime
.tv_sec
= now
.tv_sec
;
2347 ip
->i_ctime
.tv_usec
= now
.tv_nsec
/ 1000;
2349 ip
->i_flag
&= ~(IUPD
|ICHG
);
2350 ip
->i_flag
|= IMODTIME
;
2352 TRANS_INODE(ufsvfsp
, ip
);
2358 * The presence of a shadow inode may indicate an ACL, but does
2359 * not imply an ACL. Future FSD types should be handled here too
2360 * and check for the presence of the attribute-specific data
2361 * before referencing it.
2365 * XXX if ufs_iupdat is changed to sandbagged write fix
2366 * ufs_acl_setattr to push ip to keep acls consistent
2368 * Suppress out of inodes messages if we will retry.
2371 ip
->i_flag
|= IQUIET
;
2372 error
= ufs_acl_setattr(ip
, vap
, cr
);
2373 ip
->i_flag
&= ~IQUIET
;
2378 * Setattr always increases the sequence number
2383 * if nfsd and not logging; push synchronously
2385 if ((curthread
->t_flag
& T_DONTPEND
) && !TRANS_ISTRANS(ufsvfsp
)) {
2391 rw_exit(&ip
->i_contents
);
2393 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
2396 rw_exit(&ip
->i_rwlock
);
2401 TRANS_END_CSYNC(ufsvfsp
, &terr
, issync
, TOP_SETATTR
,
2406 ufs_lockfs_end(ulp
);
2410 * If out of inodes or blocks, see if we can free something
2411 * up from the delete queue.
2413 if ((error
== ENOSPC
) && retry
&& TRANS_ISTRANS(ufsvfsp
)) {
2414 ufs_delete_drain_wait(ufsvfsp
, 1);
2416 if (errmsg1
!= NULL
)
2417 kmem_free(errmsg1
, len1
);
2418 if (errmsg2
!= NULL
)
2419 kmem_free(errmsg2
, len2
);
2422 if (errmsg1
!= NULL
) {
2424 kmem_free(errmsg1
, len1
);
2426 if (errmsg2
!= NULL
) {
2428 kmem_free(errmsg2
, len2
);
2435 ufs_access(struct vnode
*vp
, int mode
, int flags
, struct cred
*cr
,
2436 caller_context_t
*ct
)
2438 struct inode
*ip
= VTOI(vp
);
2440 if (ip
->i_ufsvfs
== NULL
)
2444 * The ufs_iaccess function wants to be called with
2445 * mode bits expressed as "ufs specific" bits.
2446 * I.e., VWRITE|VREAD|VEXEC do not make sense to
2447 * ufs_iaccess() but IWRITE|IREAD|IEXEC do.
2448 * But since they're the same we just pass the vnode mode
2449 * bit but just verify that assumption at compile time.
2451 #if IWRITE != VWRITE || IREAD != VREAD || IEXEC != VEXEC
2452 #error "ufs_access needs to map Vmodes to Imodes"
2454 return (ufs_iaccess(ip
, mode
, cr
, 1));
2459 ufs_readlink(struct vnode
*vp
, struct uio
*uiop
, struct cred
*cr
,
2460 caller_context_t
*ct
)
2462 struct inode
*ip
= VTOI(vp
);
2463 struct ufsvfs
*ufsvfsp
;
2464 struct ulockfs
*ulp
;
2468 if (vp
->v_type
!= VLNK
) {
2474 * If the symbolic link is empty there is nothing to read.
2475 * Fast-track these empty symbolic links
2477 if (ip
->i_size
== 0) {
2482 ufsvfsp
= ip
->i_ufsvfs
;
2483 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_READLINK_MASK
);
2487 * The ip->i_rwlock protects the data blocks used for FASTSYMLINK
2491 if (ip
->i_flag
& IFASTSYMLNK
) {
2492 rw_enter(&ip
->i_rwlock
, RW_READER
);
2493 rw_enter(&ip
->i_contents
, RW_READER
);
2494 if (ip
->i_flag
& IFASTSYMLNK
) {
2495 if (!ULOCKFS_IS_NOIACC(ITOUL(ip
)) &&
2496 (ip
->i_fs
->fs_ronly
== 0) &&
2497 (!ufsvfsp
->vfs_noatime
)) {
2498 mutex_enter(&ip
->i_tlock
);
2500 mutex_exit(&ip
->i_tlock
);
2502 error
= uiomove((caddr_t
)&ip
->i_db
[1],
2503 MIN(ip
->i_size
, uiop
->uio_resid
),
2508 rw_exit(&ip
->i_contents
);
2509 rw_exit(&ip
->i_rwlock
);
2512 ssize_t size
; /* number of bytes read */
2513 caddr_t basep
; /* pointer to input data */
2516 struct uio tuio
; /* temp uio struct */
2518 iovec_t tiov
; /* temp iovec struct */
2519 char kbuf
[FSL_SIZE
]; /* buffer to hold fast symlink */
2520 int tflag
= 0; /* flag to indicate temp vars used */
2524 size
= uiop
->uio_resid
;
2525 basep
= uiop
->uio_iov
->iov_base
;
2528 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
2529 rw_enter(&ip
->i_contents
, RW_WRITER
);
2530 if (ip
->i_flag
& IFASTSYMLNK
) {
2531 rw_exit(&ip
->i_contents
);
2532 rw_exit(&ip
->i_rwlock
);
2536 /* can this be a fast symlink and is it a user buffer? */
2537 if (ip
->i_size
<= FSL_SIZE
&&
2538 (uiop
->uio_segflg
== UIO_USERSPACE
||
2539 uiop
->uio_segflg
== UIO_USERISPACE
)) {
2541 bzero(&tuio
, sizeof (struct uio
));
2543 * setup a kernel buffer to read link into. this
2544 * is to fix a race condition where the user buffer
2545 * got corrupted before copying it into the inode.
2548 tiov
.iov_len
= size
;
2549 tiov
.iov_base
= kbuf
;
2550 tuio
.uio_iov
= &tiov
;
2551 tuio
.uio_iovcnt
= 1;
2552 tuio
.uio_offset
= uiop
->uio_offset
;
2553 tuio
.uio_segflg
= UIO_SYSSPACE
;
2554 tuio
.uio_fmode
= uiop
->uio_fmode
;
2555 tuio
.uio_extflg
= uiop
->uio_extflg
;
2556 tuio
.uio_limit
= uiop
->uio_limit
;
2557 tuio
.uio_resid
= size
;
2559 basep
= tuio
.uio_iov
->iov_base
;
2564 error
= rdip(ip
, tuiop
, 0, cr
);
2565 if (!(error
== 0 && ip
->i_number
== ino
&& ip
->i_gen
== igen
)) {
2566 rw_exit(&ip
->i_contents
);
2567 rw_exit(&ip
->i_rwlock
);
2572 size
-= uiop
->uio_resid
;
2574 if ((tflag
== 0 && ip
->i_size
<= FSL_SIZE
&&
2575 ip
->i_size
== size
) || (tflag
== 1 &&
2576 tuio
.uio_resid
== 0)) {
2577 error
= kcopy(basep
, &ip
->i_db
[1], ip
->i_size
);
2579 ip
->i_flag
|= IFASTSYMLNK
;
2583 (void) fop_putpage(ITOV(ip
),
2585 (B_DONTNEED
| B_FREE
| B_FORCE
| B_ASYNC
),
2589 /* error, clear garbage left behind */
2590 for (i
= 1; i
< NDADDR
; i
++)
2592 for (i
= 0; i
< NIADDR
; i
++)
2597 /* now, copy it into the user buffer */
2598 error
= uiomove((caddr_t
)kbuf
,
2599 MIN(size
, uiop
->uio_resid
),
2602 rw_exit(&ip
->i_contents
);
2603 rw_exit(&ip
->i_rwlock
);
2607 ufs_lockfs_end(ulp
);
2615 ufs_fsync(struct vnode
*vp
, int syncflag
, struct cred
*cr
, caller_context_t
*ct
)
2617 struct inode
*ip
= VTOI(vp
);
2618 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
2619 struct ulockfs
*ulp
;
2622 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_FSYNC_MASK
);
2626 if (TRANS_ISTRANS(ufsvfsp
)) {
2628 * First push out any data pages
2630 if (vn_has_cached_data(vp
) && !(syncflag
& FNODSYNC
) &&
2631 (vp
->v_type
!= VCHR
) && !(IS_SWAPVP(vp
))) {
2632 error
= fop_putpage(vp
, 0, (size_t)0,
2639 * Delta any delayed inode times updates
2640 * and push inode to log.
2641 * All other inode deltas will have already been delta'd
2642 * and will be pushed during the commit.
2644 if (!(syncflag
& FDSYNC
) &&
2645 ((ip
->i_flag
& (IMOD
|IMODACC
)) == IMODACC
)) {
2647 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_FSYNC
,
2650 rw_enter(&ip
->i_contents
, RW_READER
);
2651 mutex_enter(&ip
->i_tlock
);
2652 ip
->i_flag
&= ~IMODTIME
;
2653 mutex_exit(&ip
->i_tlock
);
2654 ufs_iupdat(ip
, I_SYNC
);
2655 rw_exit(&ip
->i_contents
);
2657 TRANS_END_ASYNC(ufsvfsp
, TOP_FSYNC
,
2663 * Commit the Moby transaction
2665 * Deltas have already been made so we just need to
2666 * commit them with a synchronous transaction.
2667 * TRANS_BEGIN_SYNC() will return an error
2668 * if there are no deltas to commit, for an
2669 * empty transaction.
2672 TRANS_BEGIN_SYNC(ufsvfsp
, TOP_FSYNC
, TOP_COMMIT_SIZE
,
2675 error
= 0; /* commit wasn't needed */
2678 TRANS_END_SYNC(ufsvfsp
, &error
, TOP_FSYNC
,
2681 } else { /* not logging */
2682 if (!(IS_SWAPVP(vp
)))
2683 if (syncflag
& FNODSYNC
) {
2684 /* Just update the inode only */
2685 TRANS_IUPDAT(ip
, 1);
2687 } else if (syncflag
& FDSYNC
)
2688 /* Do data-synchronous writes */
2689 error
= TRANS_SYNCIP(ip
, 0, I_DSYNC
, TOP_FSYNC
);
2691 /* Do synchronous writes */
2692 error
= TRANS_SYNCIP(ip
, 0, I_SYNC
, TOP_FSYNC
);
2694 rw_enter(&ip
->i_contents
, RW_WRITER
);
2696 error
= ufs_sync_indir(ip
);
2697 rw_exit(&ip
->i_contents
);
2701 ufs_lockfs_end(ulp
);
2708 ufs_inactive(struct vnode
*vp
, struct cred
*cr
, caller_context_t
*ct
)
2710 ufs_iinactive(VTOI(vp
));
2714 * Unix file system operations having to do with directory manipulation.
2716 int ufs_lookup_idle_count
= 2; /* Number of inodes to idle each time */
2719 ufs_lookup(struct vnode
*dvp
, char *nm
, struct vnode
**vpp
,
2720 struct pathname
*pnp
, int flags
, struct vnode
*rdir
, struct cred
*cr
,
2721 caller_context_t
*ct
, int *direntflags
, pathname_t
*realpnp
)
2726 struct ufsvfs
*ufsvfsp
;
2727 struct ulockfs
*ulp
;
2732 * Check flags for type of lookup (regular file or attribute file)
2737 if (flags
& LOOKUP_XATTR
) {
2740 * If not mounted with XATTR support then return EINVAL
2743 if (!(ip
->i_ufsvfs
->vfs_vfs
->vfs_flag
& VFS_XATTR
))
2746 * We don't allow recursive attributes...
2747 * Maybe someday we will.
2749 if ((ip
->i_cflags
& IXATTR
)) {
2753 if ((vp
= dnlc_lookup(dvp
, XATTR_DIR_NAME
)) == NULL
) {
2754 error
= ufs_xattr_getattrdir(dvp
, &sip
, flags
, cr
);
2761 dnlc_update(dvp
, XATTR_DIR_NAME
, vp
);
2765 * Check accessibility of directory.
2767 if (vp
== DNLC_NO_VNODE
) {
2772 if ((error
= ufs_iaccess(VTOI(vp
), IEXEC
, cr
, 1)) != 0) {
2782 * Check for a null component, which we should treat as
2783 * looking at dvp from within it's parent, so we don't
2784 * need a call to ufs_iaccess(), as it has already been
2795 * Check for "." ie itself. this is a quick check and
2796 * avoids adding "." into the dnlc (which have been seen
2797 * to occupy >10% of the cache).
2799 if ((nm
[0] == '.') && (nm
[1] == 0)) {
2801 * Don't return without checking accessibility
2802 * of the directory. We only need the lock if
2803 * we are going to return it.
2805 if ((error
= ufs_iaccess(ip
, IEXEC
, cr
, 1)) == 0) {
2813 * Fast path: Check the directory name lookup cache.
2815 if (vp
= dnlc_lookup(dvp
, nm
)) {
2817 * Check accessibility of directory.
2819 if ((error
= ufs_iaccess(ip
, IEXEC
, cr
, 1)) != 0) {
2823 if (vp
== DNLC_NO_VNODE
) {
2834 * Keep the idle queue from getting too long by
2835 * idling two inodes before attempting to allocate another.
2836 * This operation must be performed before entering
2837 * lockfs or a transaction.
2839 if (ufs_idle_q
.uq_ne
> ufs_idle_q
.uq_hiwat
)
2840 if ((curthread
->t_flag
& T_DONTBLOCK
) == 0) {
2841 ins
.in_lidles
.value
.ul
+= ufs_lookup_idle_count
;
2842 ufs_idle_some(ufs_lookup_idle_count
);
2847 * Check accessibility of directory.
2849 if (error
= ufs_diraccess(ip
, IEXEC
, cr
))
2852 ufsvfsp
= ip
->i_ufsvfs
;
2853 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_LOOKUP_MASK
);
2857 error
= ufs_dirlook(ip
, nm
, &xip
, cr
, 1, 0);
2865 * If vnode is a device return special vnode instead.
2867 if (IS_DEVVP(*vpp
)) {
2868 struct vnode
*newvp
;
2870 newvp
= specvp(*vpp
, (*vpp
)->v_rdev
, (*vpp
)->v_type
,
2877 } else if (ip
->i_cflags
& ICOMPRESS
) {
2878 struct vnode
*newvp
;
2881 * Compressed file, substitute dcfs vnode
2883 newvp
= decompvp(*vpp
, cr
, ct
);
2892 ufs_lockfs_end(ulp
);
2895 if (error
== EAGAIN
)
2904 ufs_create(struct vnode
*dvp
, char *name
, struct vattr
*vap
, enum vcexcl excl
,
2905 int mode
, struct vnode
**vpp
, struct cred
*cr
, int flag
,
2906 caller_context_t
*ct
, vsecattr_t
*vsecp
)
2912 struct ufsvfs
*ufsvfsp
;
2913 struct ulockfs
*ulp
;
2919 int defer_dip_seq_update
= 0; /* need to defer update of dip->i_seq */
2925 ufsvfsp
= ip
->i_ufsvfs
;
2928 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_CREATE_MASK
);
2933 trans_size
= (int)TOP_CREATE_SIZE(ip
);
2934 TRANS_BEGIN_CSYNC(ufsvfsp
, &issync
, TOP_CREATE
, trans_size
);
2937 if ((vap
->va_mode
& VSVTX
) && secpolicy_vnode_stky_modify(cr
) != 0)
2938 vap
->va_mode
&= ~VSVTX
;
2940 if (*name
== '\0') {
2942 * Null component name refers to the directory itself.
2946 * Even though this is an error case, we need to grab the
2947 * quota lock since the error handling code below is common.
2949 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
2950 rw_enter(&ip
->i_contents
, RW_WRITER
);
2956 * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
2957 * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
2958 * possible, retries the operation.
2960 indeadlock
= ufs_tryirwlock_trans(ulp
, &ip
->i_rwlock
,
2961 RW_WRITER
, TOP_CREATE
,
2962 ufsvfsp
, &error
, issync
,
2967 xvp
= dnlc_lookup(dvp
, name
);
2968 if (xvp
== DNLC_NO_VNODE
) {
2974 rw_exit(&ip
->i_rwlock
);
2975 if (error
= ufs_iaccess(ip
, IEXEC
, cr
, 1)) {
2983 * Suppress file system full message if we will retry
2985 error
= ufs_direnter_cm(ip
, name
, DE_CREATE
,
2986 vap
, &xip
, cr
, (noentry
| (retry
? IQUIET
: 0)));
2987 if (error
== EAGAIN
) {
2989 TRANS_END_CSYNC(ufsvfsp
, &error
,
2992 ufs_lockfs_end(ulp
);
2996 rw_exit(&ip
->i_rwlock
);
3000 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
3001 rw_enter(&ip
->i_contents
, RW_WRITER
);
3006 * If the file already exists and this is a non-exclusive create,
3007 * check permissions and allow access for non-directories.
3008 * Read-only create of an existing directory is also allowed.
3009 * We fail an exclusive create of anything which already exists.
3011 if (error
== EEXIST
) {
3013 if (excl
== NONEXCL
) {
3014 if ((((ip
->i_mode
& IFMT
) == IFDIR
) ||
3015 ((ip
->i_mode
& IFMT
) == IFATTRDIR
)) &&
3019 error
= ufs_iaccess(ip
, mode
, cr
, 0);
3024 rw_exit(&ip
->i_contents
);
3025 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
3030 * If the error EEXIST was set, then i_seq can not
3031 * have been updated. The sequence number interface
3032 * is defined such that a non-error fop_create must
3033 * increase the dir va_seq it by at least one. If we
3034 * have cleared the error, increase i_seq. Note that
3035 * we are increasing the dir i_seq and in rare cases
3036 * ip may actually be from the dvp, so we already have
3037 * the locks and it will not be subject to truncation.
3038 * In case we have to update i_seq of the parent
3039 * directory dip, we have to defer it till we have
3040 * released our locks on ip due to lock ordering requirements.
3043 defer_dip_seq_update
= 1;
3047 if (((ip
->i_mode
& IFMT
) == IFREG
) &&
3048 (vap
->va_mask
& VATTR_SIZE
) && vap
->va_size
== 0) {
3050 * Truncate regular files, if requested by caller.
3051 * Grab i_rwlock to make sure no one else is
3052 * currently writing to the file (we promised
3053 * bmap we would do this).
3054 * Must get the locks in the correct order.
3056 if (ip
->i_size
== 0) {
3057 ip
->i_flag
|= ICHG
| IUPD
;
3059 TRANS_INODE(ufsvfsp
, ip
);
3062 * Large Files: Why this check here?
3063 * Though we do it in vn_create() we really
3064 * want to guarantee that we do not destroy
3065 * Large file data by atomically checking
3066 * the size while holding the contents
3069 if (flag
&& !(flag
& FOFFMAX
) &&
3070 ((ip
->i_mode
& IFMT
) == IFREG
) &&
3071 (ip
->i_size
> (offset_t
)MAXOFF32_T
)) {
3072 rw_exit(&ip
->i_contents
);
3073 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
3077 if (TRANS_ISTRANS(ufsvfsp
))
3080 rw_exit(&ip
->i_contents
);
3081 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
3082 indeadlock
= ufs_tryirwlock_trans(ulp
,
3094 rw_enter(&ufsvfsp
->vfs_dqrwlock
,
3096 rw_enter(&ip
->i_contents
, RW_WRITER
);
3097 (void) ufs_itrunc(ip
, 0, 0,
3099 rw_exit(&ip
->i_rwlock
);
3104 vnevent_create(ITOV(ip
), ct
);
3111 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
3112 rw_exit(&ip
->i_contents
);
3119 rw_exit(&ip
->i_contents
);
3120 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
3123 * If vnode is a device return special vnode instead.
3125 if (!error
&& IS_DEVVP(*vpp
)) {
3126 struct vnode
*newvp
;
3128 newvp
= specvp(*vpp
, (*vpp
)->v_rdev
, (*vpp
)->v_type
, cr
);
3130 if (newvp
== NULL
) {
3140 * Do the deferred update of the parent directory's sequence
3143 if (defer_dip_seq_update
== 1) {
3144 rw_enter(&dip
->i_contents
, RW_READER
);
3145 mutex_enter(&dip
->i_tlock
);
3147 mutex_exit(&dip
->i_tlock
);
3148 rw_exit(&dip
->i_contents
);
3154 TRANS_END_CSYNC(ufsvfsp
, &terr
, issync
, TOP_CREATE
,
3158 * If we haven't had a more interesting failure
3159 * already, then anything that might've happened
3160 * here should be reported.
3166 if (!error
&& truncflag
) {
3167 indeadlock
= ufs_tryirwlock(ulp
, &ip
->i_rwlock
, RW_WRITER
);
3170 ufs_lockfs_end(ulp
);
3174 (void) TRANS_ITRUNC(ip
, 0, 0, cr
);
3175 rw_exit(&ip
->i_rwlock
);
3179 ufs_lockfs_end(ulp
);
3182 * If no inodes available, try to free one up out of the
3183 * pending delete queue.
3185 if ((error
== ENOSPC
) && retry
&& TRANS_ISTRANS(ufsvfsp
)) {
3186 ufs_delete_drain_wait(ufsvfsp
, 1);
3195 extern int ufs_idle_max
;
3198 ufs_remove(struct vnode
*vp
, char *nm
, struct cred
*cr
, caller_context_t
*ct
,
3201 struct inode
*ip
= VTOI(vp
);
3202 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
3203 struct ulockfs
*ulp
;
3204 vnode_t
*rmvp
= NULL
; /* Vnode corresponding to name being removed */
3211 * don't let the delete queue get too long
3213 if (ufsvfsp
== NULL
) {
3217 if (ufsvfsp
->vfs_delete
.uq_ne
> ufs_idle_max
)
3218 ufs_delete_drain(vp
->v_vfsp
, 1, 1);
3220 error
= ufs_eventlookup(vp
, nm
, cr
, &rmvp
);
3222 /* Only send the event if there were no errors */
3224 vnevent_remove(rmvp
, vp
, nm
, ct
);
3229 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_REMOVE_MASK
);
3234 TRANS_BEGIN_CSYNC(ufsvfsp
, &issync
, TOP_REMOVE
,
3235 trans_size
= (int)TOP_REMOVE_SIZE(VTOI(vp
)));
3238 * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3239 * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3240 * possible, retries the operation.
3242 indeadlock
= ufs_tryirwlock_trans(ulp
, &ip
->i_rwlock
, RW_WRITER
,
3243 TOP_REMOVE
, ufsvfsp
, &error
,
3244 issync
, trans_size
);
3247 error
= ufs_dirremove(ip
, nm
, NULL
, NULL
, DR_REMOVE
, cr
);
3248 rw_exit(&ip
->i_rwlock
);
3251 TRANS_END_CSYNC(ufsvfsp
, &error
, issync
, TOP_REMOVE
,
3253 ufs_lockfs_end(ulp
);
3261 * Link a file or a directory. Only privileged processes are allowed to
3262 * make links to directories.
3266 ufs_link(struct vnode
*tdvp
, struct vnode
*svp
, char *tnm
, struct cred
*cr
,
3267 caller_context_t
*ct
, int flags
)
3270 struct inode
*tdp
= VTOI(tdvp
);
3271 struct ufsvfs
*ufsvfsp
= tdp
->i_ufsvfs
;
3272 struct ulockfs
*ulp
;
3273 struct vnode
*realvp
;
3281 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_LINK_MASK
);
3286 TRANS_BEGIN_CSYNC(ufsvfsp
, &issync
, TOP_LINK
,
3287 trans_size
= (int)TOP_LINK_SIZE(VTOI(tdvp
)));
3289 if (fop_realvp(svp
, &realvp
, ct
) == 0)
3293 * Make sure link for extended attributes is valid
3294 * We only support hard linking of attr in ATTRDIR to ATTRDIR
3296 * Make certain we don't attempt to look at a device node as
3300 isdev
= IS_DEVVP(svp
);
3301 if (((isdev
== 0) && ((VTOI(svp
)->i_cflags
& IXATTR
) == 0) &&
3302 ((tdp
->i_mode
& IFMT
) == IFATTRDIR
)) ||
3303 ((isdev
== 0) && (VTOI(svp
)->i_cflags
& IXATTR
) &&
3304 ((tdp
->i_mode
& IFMT
) == IFDIR
))) {
3310 if ((svp
->v_type
== VDIR
&&
3311 secpolicy_fs_linkdir(cr
, ufsvfsp
->vfs_vfs
) != 0) ||
3312 (sip
->i_uid
!= crgetuid(cr
) && secpolicy_basic_link(cr
) != 0)) {
3318 * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3319 * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3320 * possible, retries the operation.
3322 indeadlock
= ufs_tryirwlock_trans(ulp
, &tdp
->i_rwlock
, RW_WRITER
,
3323 TOP_LINK
, ufsvfsp
, &error
, issync
,
3327 error
= ufs_direnter_lr(tdp
, tnm
, DE_LINK
, NULL
, sip
, cr
);
3328 rw_exit(&tdp
->i_rwlock
);
3332 TRANS_END_CSYNC(ufsvfsp
, &error
, issync
, TOP_LINK
, trans_size
);
3333 ufs_lockfs_end(ulp
);
3337 vnevent_link(svp
, ct
);
3343 uint64_t ufs_rename_retry_cnt
;
3344 uint64_t ufs_rename_upgrade_retry_cnt
;
3345 uint64_t ufs_rename_dircheck_retry_cnt
;
3346 clock_t ufs_rename_backoff_delay
= 1;
3349 * Rename a file or directory.
3350 * We are given the vnode and entry string of the source and the
3351 * vnode and entry string of the place we want to move the source
3352 * to (the target). The essential operation is:
3354 * link(source, target);
3356 * but "atomically". Can't do full commit without saving state in
3357 * the inode on disk, which isn't feasible at this time. Best we
3358 * can do is always guarantee that the TARGET exists.
3363 ufs_rename(struct vnode
*sdvp
, char *snm
, struct vnode
*tdvp
, char *tnm
,
3364 struct cred
*cr
, caller_context_t
*ct
, int flags
)
3366 struct inode
*sip
= NULL
; /* source inode */
3367 struct inode
*ip
= NULL
; /* check inode */
3368 struct inode
*sdp
; /* old (source) parent inode */
3369 struct inode
*tdp
; /* new (target) parent inode */
3370 struct vnode
*svp
= NULL
; /* source vnode */
3371 struct vnode
*tvp
= NULL
; /* target vnode, if it exists */
3372 struct vnode
*realvp
;
3373 struct ufsvfs
*ufsvfsp
;
3374 struct ulockfs
*ulp
= NULL
;
3375 struct ufs_slot slot
;
3380 krwlock_t
*first_lock
;
3381 krwlock_t
*second_lock
;
3382 krwlock_t
*reverse_lock
;
3387 ufsvfsp
= sdp
->i_ufsvfs
;
3389 if (fop_realvp(tdvp
, &realvp
, ct
) == 0)
3392 /* Must do this before taking locks in case of DNLC miss */
3393 terr
= ufs_eventlookup(tdvp
, tnm
, cr
, &tvp
);
3394 serr
= ufs_eventlookup(sdvp
, snm
, cr
, &svp
);
3396 if ((serr
== 0) && ((terr
== 0) || (terr
== ENOENT
))) {
3398 vnevent_pre_rename_dest(tvp
, tdvp
, tnm
, ct
);
3401 * Notify the target directory of the rename event
3402 * if source and target directories are not the same.
3405 vnevent_pre_rename_dest_dir(tdvp
, svp
, tnm
, ct
);
3408 vnevent_pre_rename_src(svp
, sdvp
, snm
, ct
);
3415 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_RENAME_MASK
);
3420 TRANS_BEGIN_CSYNC(ufsvfsp
, &issync
, TOP_RENAME
,
3421 trans_size
= (int)TOP_RENAME_SIZE(sdp
));
3423 if (fop_realvp(tdvp
, &realvp
, ct
) == 0)
3429 * We only allow renaming of attributes from ATTRDIR to ATTRDIR.
3431 if ((tdp
->i_mode
& IFMT
) != (sdp
->i_mode
& IFMT
)) {
3437 * Check accessibility of directory.
3439 if (error
= ufs_diraccess(sdp
, IEXEC
, cr
))
3443 * Look up inode of file we're supposed to rename.
3446 if (error
= ufs_dirlook(sdp
, snm
, &sip
, cr
, 0, 0)) {
3447 if (error
== EAGAIN
) {
3449 TRANS_END_CSYNC(ufsvfsp
, &error
, issync
,
3450 TOP_RENAME
, trans_size
);
3451 ufs_lockfs_end(ulp
);
3460 * Lock both the source and target directories (they may be
3461 * the same) to provide the atomicity semantics that was
3462 * previously provided by the per file system vfs_rename_lock
3464 * with vfs_rename_lock removed to allow simultaneous renames
3465 * within a file system, ufs_dircheckpath can deadlock while
3466 * traversing back to ensure that source is not a parent directory
3467 * of target parent directory. This is because we get into
3468 * ufs_dircheckpath with the sdp and tdp locks held as RW_WRITER.
3469 * If the tdp and sdp of the simultaneous renames happen to be
3470 * in the path of each other, it can lead to a deadlock. This
3471 * can be avoided by getting the locks as RW_READER here and then
3472 * upgrading to RW_WRITER after completing the ufs_dircheckpath.
3474 * We hold the target directory's i_rwlock after calling
3475 * ufs_lockfs_begin but in many other operations (like ufs_readdir)
3476 * fop_rwlock is explicitly called by the filesystem independent code
3477 * before calling the file system operation. In these cases the order
3478 * is reversed (i.e i_rwlock is taken first and then ufs_lockfs_begin
3479 * is called). This is fine as long as ufs_lockfs_begin acts as a VOP
3480 * counter but with ufs_quiesce setting the SLOCK bit this becomes a
3481 * synchronizing object which might lead to a deadlock. So we use
3482 * rw_tryenter instead of rw_enter. If we fail to get this lock and
3483 * find that SLOCK bit is set, we call ufs_lockfs_end and restart the
3487 first_lock
= &tdp
->i_rwlock
;
3488 second_lock
= &sdp
->i_rwlock
;
3490 if (!rw_tryenter(first_lock
, RW_READER
)) {
3492 * We didn't get the lock. Check if the SLOCK is set in the
3493 * ufsvfs. If yes, we might be in a deadlock. Safer to give up
3494 * and wait for SLOCK to be cleared.
3497 if (ulp
&& ULOCKFS_IS_SLOCK(ulp
)) {
3498 TRANS_END_CSYNC(ufsvfsp
, &error
, issync
, TOP_RENAME
,
3500 ufs_lockfs_end(ulp
);
3505 * SLOCK isn't set so this is a genuine synchronization
3506 * case. Let's try again after giving them a breather.
3508 delay(RETRY_LOCK_DELAY
);
3509 goto retry_firstlock
;
3513 * Need to check if the tdp and sdp are same !!!
3515 if ((tdp
!= sdp
) && (!rw_tryenter(second_lock
, RW_READER
))) {
3517 * We didn't get the lock. Check if the SLOCK is set in the
3518 * ufsvfs. If yes, we might be in a deadlock. Safer to give up
3519 * and wait for SLOCK to be cleared.
3522 rw_exit(first_lock
);
3523 if (ulp
&& ULOCKFS_IS_SLOCK(ulp
)) {
3524 TRANS_END_CSYNC(ufsvfsp
, &error
, issync
, TOP_RENAME
,
3526 ufs_lockfs_end(ulp
);
3531 * So we couldn't get the second level peer lock *and*
3532 * the SLOCK bit isn't set. Too bad we can be
3533 * contentding with someone wanting these locks otherway
3534 * round. Reverse the locks in case there is a heavy
3535 * contention for the second level lock.
3537 reverse_lock
= first_lock
;
3538 first_lock
= second_lock
;
3539 second_lock
= reverse_lock
;
3540 ufs_rename_retry_cnt
++;
3541 goto retry_firstlock
;
3550 * Make sure we can delete the source entry. This requires
3551 * write permission on the containing directory.
3552 * Check for sticky directories.
3554 rw_enter(&sdp
->i_contents
, RW_READER
);
3555 rw_enter(&sip
->i_contents
, RW_READER
);
3556 if ((error
= ufs_iaccess(sdp
, IWRITE
, cr
, 0)) != 0 ||
3557 (error
= ufs_sticky_remove_access(sdp
, sip
, cr
)) != 0) {
3558 rw_exit(&sip
->i_contents
);
3559 rw_exit(&sdp
->i_contents
);
3564 * If this is a rename of a directory and the parent is
3565 * different (".." must be changed), then the source
3566 * directory must not be in the directory hierarchy
3567 * above the target, as this would orphan everything
3568 * below the source directory. Also the user must have
3569 * write permission in the source so as to be able to
3572 if ((((sip
->i_mode
& IFMT
) == IFDIR
) ||
3573 ((sip
->i_mode
& IFMT
) == IFATTRDIR
)) && sdp
!= tdp
) {
3576 if (error
= ufs_iaccess(sip
, IWRITE
, cr
, 0)) {
3577 rw_exit(&sip
->i_contents
);
3578 rw_exit(&sdp
->i_contents
);
3581 inum
= sip
->i_number
;
3582 rw_exit(&sip
->i_contents
);
3583 rw_exit(&sdp
->i_contents
);
3584 if ((error
= ufs_dircheckpath(inum
, tdp
, sdp
, cr
))) {
3586 * If we got EAGAIN ufs_dircheckpath detected a
3587 * potential deadlock and backed out. We need
3588 * to retry the operation since sdp and tdp have
3589 * to be released to avoid the deadlock.
3591 if (error
== EAGAIN
) {
3592 rw_exit(&tdp
->i_rwlock
);
3594 rw_exit(&sdp
->i_rwlock
);
3595 delay(ufs_rename_backoff_delay
);
3596 ufs_rename_dircheck_retry_cnt
++;
3602 rw_exit(&sip
->i_contents
);
3603 rw_exit(&sdp
->i_contents
);
3608 * Check for renaming '.' or '..' or alias of '.'
3610 if (strcmp(snm
, ".") == 0 || strcmp(snm
, "..") == 0 || sdp
== sip
) {
3616 * Simultaneous renames can deadlock in ufs_dircheckpath since it
3617 * tries to traverse back the file tree with both tdp and sdp held
3618 * as RW_WRITER. To avoid that we have to hold the tdp and sdp locks
3619 * as RW_READERS till ufs_dircheckpath is done.
3620 * Now that ufs_dircheckpath is done with, we can upgrade the locks
3623 if (!rw_tryupgrade(&tdp
->i_rwlock
)) {
3625 * The upgrade failed. We got to give away the lock
3626 * as to avoid deadlocking with someone else who is
3627 * waiting for writer lock. With the lock gone, we
3628 * cannot be sure the checks done above will hold
3629 * good when we eventually get them back as writer.
3630 * So if we can't upgrade we drop the locks and retry
3633 rw_exit(&tdp
->i_rwlock
);
3635 rw_exit(&sdp
->i_rwlock
);
3636 delay(ufs_rename_backoff_delay
);
3637 ufs_rename_upgrade_retry_cnt
++;
3641 if (!rw_tryupgrade(&sdp
->i_rwlock
)) {
3643 * The upgrade failed. We got to give away the lock
3644 * as to avoid deadlocking with someone else who is
3645 * waiting for writer lock. With the lock gone, we
3646 * cannot be sure the checks done above will hold
3647 * good when we eventually get them back as writer.
3648 * So if we can't upgrade we drop the locks and retry
3651 rw_exit(&tdp
->i_rwlock
);
3652 rw_exit(&sdp
->i_rwlock
);
3653 delay(ufs_rename_backoff_delay
);
3654 ufs_rename_upgrade_retry_cnt
++;
3660 * Now that all the locks are held check to make sure another thread
3661 * didn't slip in and take out the sip.
3664 if ((sip
->i_ctime
.tv_usec
* 1000) > now
.tv_nsec
||
3665 sip
->i_ctime
.tv_sec
> now
.tv_sec
) {
3666 rw_enter(&sdp
->i_ufsvfs
->vfs_dqrwlock
, RW_READER
);
3667 rw_enter(&sdp
->i_contents
, RW_WRITER
);
3668 error
= ufs_dircheckforname(sdp
, snm
, strlen(snm
), &slot
,
3670 rw_exit(&sdp
->i_contents
);
3671 rw_exit(&sdp
->i_ufsvfs
->vfs_dqrwlock
);
3680 * If the inode was found need to drop the v_count
3681 * so as not to keep the filesystem from being
3682 * unmounted at a later time.
3688 * Release the slot.fbp that has the page mapped and
3689 * locked SE_SHARED, and could be used in in
3690 * ufs_direnter_lr() which needs to get the SE_EXCL lock
3694 fbrelse(slot
.fbp
, S_OTHER
);
3700 * Link source to the target.
3702 if (error
= ufs_direnter_lr(tdp
, tnm
, DE_RENAME
, sdp
, sip
, cr
)) {
3704 * ESAME isn't really an error; it indicates that the
3705 * operation should not be done because the source and target
3706 * are the same file, but that no error should be reported.
3713 if (error
== 0 && tvp
!= NULL
)
3714 vnevent_rename_dest(tvp
, tdvp
, tnm
, ct
);
3717 * Unlink the source.
3718 * Remove the source entry. ufs_dirremove() checks that the entry
3719 * still reflects sip, and returns an error if it doesn't.
3720 * If the entry has changed just forget about it. Release
3723 if ((error
= ufs_dirremove(sdp
, snm
, sip
, NULL
,
3724 DR_RENAME
, cr
)) == ENOENT
)
3728 vnevent_rename_src(ITOV(sip
), sdvp
, snm
, ct
);
3730 * Notify the target directory of the rename event
3731 * if source and target directories are not the same.
3734 vnevent_rename_dest_dir(tdvp
, ct
);
3739 fbrelse(slot
.fbp
, S_OTHER
);
3741 rw_exit(&tdp
->i_rwlock
);
3743 rw_exit(&sdp
->i_rwlock
);
3753 TRANS_END_CSYNC(ufsvfsp
, &error
, issync
, TOP_RENAME
,
3755 ufs_lockfs_end(ulp
);
3763 ufs_mkdir(struct vnode
*dvp
, char *dirname
, struct vattr
*vap
,
3764 struct vnode
**vpp
, struct cred
*cr
, caller_context_t
*ct
, int flags
,
3769 struct ufsvfs
*ufsvfsp
;
3770 struct ulockfs
*ulp
;
3777 ASSERT((vap
->va_mask
& (VATTR_TYPE
|VATTR_MODE
)) == (VATTR_TYPE
|VATTR_MODE
));
3780 * Can't make directory in attr hidden dir
3782 if ((VTOI(dvp
)->i_mode
& IFMT
) == IFATTRDIR
)
3787 ufsvfsp
= ip
->i_ufsvfs
;
3788 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_MKDIR_MASK
);
3792 TRANS_BEGIN_CSYNC(ufsvfsp
, &issync
, TOP_MKDIR
,
3793 trans_size
= (int)TOP_MKDIR_SIZE(ip
));
3796 * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3797 * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3798 * possible, retries the operation.
3800 indeadlock
= ufs_tryirwlock_trans(ulp
, &ip
->i_rwlock
, RW_WRITER
,
3801 TOP_MKDIR
, ufsvfsp
, &error
, issync
,
3806 error
= ufs_direnter_cm(ip
, dirname
, DE_MKDIR
, vap
, &xip
, cr
,
3807 (retry
? IQUIET
: 0));
3808 if (error
== EAGAIN
) {
3810 TRANS_END_CSYNC(ufsvfsp
, &error
, issync
, TOP_MKDIR
,
3812 ufs_lockfs_end(ulp
);
3817 rw_exit(&ip
->i_rwlock
);
3821 } else if (error
== EEXIST
)
3826 TRANS_END_CSYNC(ufsvfsp
, &terr
, issync
, TOP_MKDIR
, trans_size
);
3827 ufs_lockfs_end(ulp
);
3832 if ((error
== ENOSPC
) && retry
&& TRANS_ISTRANS(ufsvfsp
)) {
3833 ufs_delete_drain_wait(ufsvfsp
, 1);
3843 ufs_rmdir(struct vnode
*vp
, char *nm
, struct vnode
*cdir
, struct cred
*cr
,
3844 caller_context_t
*ct
, int flags
)
3846 struct inode
*ip
= VTOI(vp
);
3847 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
3848 struct ulockfs
*ulp
;
3849 vnode_t
*rmvp
= NULL
; /* Vnode of removed directory */
3856 * don't let the delete queue get too long
3858 if (ufsvfsp
== NULL
) {
3862 if (ufsvfsp
->vfs_delete
.uq_ne
> ufs_idle_max
)
3863 ufs_delete_drain(vp
->v_vfsp
, 1, 1);
3865 error
= ufs_eventlookup(vp
, nm
, cr
, &rmvp
);
3867 /* Only send the event if there were no errors */
3869 vnevent_rmdir(rmvp
, vp
, nm
, ct
);
3874 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_RMDIR_MASK
);
3879 TRANS_BEGIN_CSYNC(ufsvfsp
, &issync
, TOP_RMDIR
,
3880 trans_size
= TOP_RMDIR_SIZE
);
3883 * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3884 * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3885 * possible, retries the operation.
3887 indeadlock
= ufs_tryirwlock_trans(ulp
, &ip
->i_rwlock
, RW_WRITER
,
3888 TOP_RMDIR
, ufsvfsp
, &error
, issync
,
3892 error
= ufs_dirremove(ip
, nm
, NULL
, cdir
, DR_RMDIR
, cr
);
3894 rw_exit(&ip
->i_rwlock
);
3897 TRANS_END_CSYNC(ufsvfsp
, &error
, issync
, TOP_RMDIR
,
3899 ufs_lockfs_end(ulp
);
3908 ufs_readdir(struct vnode
*vp
, struct uio
*uiop
, struct cred
*cr
, int *eofp
,
3909 caller_context_t
*ct
, int flags
)
3914 struct dirent64
*odp
;
3916 struct ufsvfs
*ufsvfsp
;
3917 struct ulockfs
*ulp
;
3921 uint_t bytes_wanted
, total_bytes_wanted
;
3927 ASSERT(RW_READ_HELD(&ip
->i_rwlock
));
3929 if (uiop
->uio_loffset
>= MAXOFF32_T
) {
3936 * Check if we have been called with a valid iov_len
3937 * and bail out if not, otherwise we may potentially loop
3938 * forever further down.
3940 if (uiop
->uio_iov
->iov_len
<= 0) {
3946 * Large Files: When we come here we are guaranteed that
3947 * uio_offset can be used safely. The high word is zero.
3950 ufsvfsp
= ip
->i_ufsvfs
;
3951 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_READDIR_MASK
);
3955 iovp
= uiop
->uio_iov
;
3956 total_bytes_wanted
= iovp
->iov_len
;
3958 /* Large Files: directory files should not be "large" */
3960 ASSERT(ip
->i_size
<= MAXOFF32_T
);
3962 /* Force offset to be valid (to guard against bogus lseek() values) */
3963 offset
= (uint_t
)uiop
->uio_offset
& ~(DIRBLKSIZ
- 1);
3965 /* Quit if at end of file or link count of zero (posix) */
3966 if (offset
>= (uint_t
)ip
->i_size
|| ip
->i_nlink
<= 0) {
3974 * Get space to change directory entries into fs independent format.
3975 * Do fast alloc for the most commonly used-request size (filesystem
3978 if (uiop
->uio_segflg
!= UIO_SYSSPACE
|| uiop
->uio_iovcnt
!= 1) {
3979 bufsize
= total_bytes_wanted
;
3980 outbuf
= kmem_alloc(bufsize
, KM_SLEEP
);
3981 odp
= (struct dirent64
*)outbuf
;
3983 bufsize
= total_bytes_wanted
;
3984 odp
= (struct dirent64
*)iovp
->iov_base
;
3988 bytes_wanted
= total_bytes_wanted
;
3990 /* Truncate request to file size */
3991 if (offset
+ bytes_wanted
> (int)ip
->i_size
)
3992 bytes_wanted
= (int)(ip
->i_size
- offset
);
3994 /* Comply with MAXBSIZE boundary restrictions of fbread() */
3995 if ((offset
& MAXBOFFSET
) + bytes_wanted
> MAXBSIZE
)
3996 bytes_wanted
= MAXBSIZE
- (offset
& MAXBOFFSET
);
3999 * Read in the next chunk.
4000 * We are still holding the i_rwlock.
4002 error
= fbread(vp
, (offset_t
)offset
, bytes_wanted
, S_OTHER
, &fbp
);
4006 if (!ULOCKFS_IS_NOIACC(ITOUL(ip
)) && (ip
->i_fs
->fs_ronly
== 0) &&
4007 (!ufsvfsp
->vfs_noatime
)) {
4011 idp
= (struct direct
*)fbp
->fb_addr
;
4012 if (idp
->d_ino
== 0 && idp
->d_reclen
== 0 && idp
->d_namlen
== 0) {
4013 cmn_err(CE_WARN
, "ufs_readdir: bad dir, inumber = %llu, "
4015 (u_longlong_t
)ip
->i_number
, ufsvfsp
->vfs_fs
->fs_fsmnt
);
4016 fbrelse(fbp
, S_OTHER
);
4020 /* Transform to file-system independent format */
4021 while (incount
< bytes_wanted
) {
4023 * If the current directory entry is mangled, then skip
4024 * to the next block. It would be nice to set the FSBAD
4025 * flag in the super-block so that a fsck is forced on
4026 * next reboot, but locking is a problem.
4028 if (idp
->d_reclen
& 0x3) {
4029 offset
= (offset
+ DIRBLKSIZ
) & ~(DIRBLKSIZ
-1);
4033 /* Skip to requested offset and skip empty entries */
4034 if (idp
->d_ino
!= 0 && offset
>= (uint_t
)uiop
->uio_offset
) {
4035 ushort_t this_reclen
=
4036 DIRENT64_RECLEN(idp
->d_namlen
);
4037 /* Buffer too small for any entries */
4038 if (!outcount
&& this_reclen
> bufsize
) {
4039 fbrelse(fbp
, S_OTHER
);
4043 /* If would overrun the buffer, quit */
4044 if (outcount
+ this_reclen
> bufsize
) {
4047 /* Take this entry */
4048 odp
->d_ino
= (ino64_t
)idp
->d_ino
;
4049 odp
->d_reclen
= (ushort_t
)this_reclen
;
4050 odp
->d_off
= (offset_t
)(offset
+ idp
->d_reclen
);
4052 /* use strncpy(9f) to zero out uninitialized bytes */
4054 ASSERT(strlen(idp
->d_name
) + 1 <=
4055 DIRENT64_NAMELEN(this_reclen
));
4056 (void) strncpy(odp
->d_name
, idp
->d_name
,
4057 DIRENT64_NAMELEN(this_reclen
));
4058 outcount
+= odp
->d_reclen
;
4059 odp
= (struct dirent64
*)
4060 ((intptr_t)odp
+ odp
->d_reclen
);
4061 ASSERT(outcount
<= bufsize
);
4063 if (idp
->d_reclen
) {
4064 incount
+= idp
->d_reclen
;
4065 offset
+= idp
->d_reclen
;
4066 idp
= (struct direct
*)((intptr_t)idp
+ idp
->d_reclen
);
4068 offset
= (offset
+ DIRBLKSIZ
) & ~(DIRBLKSIZ
-1);
4072 /* Release the chunk */
4073 fbrelse(fbp
, S_OTHER
);
4075 /* Read whole block, but got no entries, read another if not eof */
4078 * Large Files: casting i_size to int here is not a problem
4079 * because directory sizes are always less than MAXOFF32_T.
4080 * See assertion above.
4083 if (offset
< (int)ip
->i_size
&& !outcount
)
4086 /* Copy out the entry data */
4087 if (uiop
->uio_segflg
== UIO_SYSSPACE
&& uiop
->uio_iovcnt
== 1) {
4088 iovp
->iov_base
+= outcount
;
4089 iovp
->iov_len
-= outcount
;
4090 uiop
->uio_resid
-= outcount
;
4091 uiop
->uio_offset
= offset
;
4092 } else if ((error
= uiomove(outbuf
, (long)outcount
, UIO_READ
,
4094 uiop
->uio_offset
= offset
;
4097 if (uiop
->uio_segflg
!= UIO_SYSSPACE
|| uiop
->uio_iovcnt
!= 1)
4098 kmem_free(outbuf
, bufsize
);
4100 if (eofp
&& error
== 0)
4101 *eofp
= (uiop
->uio_offset
>= (int)ip
->i_size
);
4104 ufs_lockfs_end(ulp
);
4112 ufs_symlink(struct vnode
*dvp
, char *linkname
, struct vattr
*vap
, char *target
,
4113 struct cred
*cr
, caller_context_t
*ct
, int flags
)
4115 struct inode
*ip
, *dip
= VTOI(dvp
);
4116 struct ufsvfs
*ufsvfsp
= dip
->i_ufsvfs
;
4117 struct ulockfs
*ulp
;
4126 * No symlinks in attrdirs at this time
4128 if ((VTOI(dvp
)->i_mode
& IFMT
) == IFATTRDIR
)
4133 vap
->va_type
= VLNK
;
4136 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_SYMLINK_MASK
);
4141 TRANS_BEGIN_CSYNC(ufsvfsp
, &issync
, TOP_SYMLINK
,
4142 trans_size
= (int)TOP_SYMLINK_SIZE(dip
));
4145 * We must create the inode before the directory entry, to avoid
4146 * racing with readlink(). ufs_dirmakeinode requires that we
4147 * hold the quota lock as reader, and directory locks as writer.
4150 rw_enter(&dip
->i_rwlock
, RW_WRITER
);
4151 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
4152 rw_enter(&dip
->i_contents
, RW_WRITER
);
4155 * Suppress any out of inodes messages if we will retry on
4159 dip
->i_flag
|= IQUIET
;
4161 error
= ufs_dirmakeinode(dip
, &ip
, vap
, DE_SYMLINK
, cr
);
4163 dip
->i_flag
&= ~IQUIET
;
4165 rw_exit(&dip
->i_contents
);
4166 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4167 rw_exit(&dip
->i_rwlock
);
4173 * OK. The inode has been created. Write out the data of the
4174 * symbolic link. Since symbolic links are metadata, and should
4175 * remain consistent across a system crash, we need to force the
4176 * data out synchronously.
4178 * (This is a change from the semantics in earlier releases, which
4179 * only created symbolic links synchronously if the semi-documented
4180 * 'syncdir' option was set, or if we were being invoked by the NFS
4181 * server, which requires symbolic links to be created synchronously.)
4183 * We need to pass in a pointer for the residual length; otherwise
4184 * ufs_rdwri() will always return EIO if it can't write the data,
4185 * even if the error was really ENOSPC or EDQUOT.
4188 ioflag
= FWRITE
| FDSYNC
;
4191 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
4192 rw_enter(&ip
->i_contents
, RW_WRITER
);
4195 * Suppress file system full messages if we will retry
4198 ip
->i_flag
|= IQUIET
;
4200 error
= ufs_rdwri(UIO_WRITE
, ioflag
, ip
, target
, strlen(target
),
4201 0, UIO_SYSSPACE
, &residual
, cr
);
4203 ip
->i_flag
&= ~IQUIET
;
4206 rw_exit(&ip
->i_contents
);
4207 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4212 * If the link's data is small enough, we can cache it in the inode.
4213 * This is a "fast symbolic link". We don't use the first direct
4214 * block because that's actually used to point at the symbolic link's
4215 * contents on disk; but we know that none of the other direct or
4216 * indirect blocks can be used because symbolic links are restricted
4217 * to be smaller than a file system block.
4220 ASSERT(MAXPATHLEN
<= VBSIZE(ITOV(ip
)));
4222 if (ip
->i_size
> 0 && ip
->i_size
<= FSL_SIZE
) {
4223 if (kcopy(target
, &ip
->i_db
[1], ip
->i_size
) == 0) {
4224 ip
->i_flag
|= IFASTSYMLNK
;
4227 /* error, clear garbage left behind */
4228 for (i
= 1; i
< NDADDR
; i
++)
4230 for (i
= 0; i
< NIADDR
; i
++)
4235 rw_exit(&ip
->i_contents
);
4236 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4239 * OK. We've successfully created the symbolic link. All that
4240 * remains is to insert it into the appropriate directory.
4243 rw_enter(&dip
->i_rwlock
, RW_WRITER
);
4244 error
= ufs_direnter_lr(dip
, linkname
, DE_SYMLINK
, NULL
, ip
, cr
);
4245 rw_exit(&dip
->i_rwlock
);
4248 * Fall through into remove-on-error code. We're either done, or we
4249 * need to remove the inode (if we couldn't insert it).
4253 if (error
&& (ip
!= NULL
)) {
4254 rw_enter(&ip
->i_contents
, RW_WRITER
);
4259 rw_exit(&ip
->i_contents
);
4269 TRANS_END_CSYNC(ufsvfsp
, &terr
, issync
, TOP_SYMLINK
,
4271 ufs_lockfs_end(ulp
);
4277 * We may have failed due to lack of an inode or of a block to
4278 * store the target in. Try flushing the delete queue to free
4279 * logically-available things up and try again.
4281 if ((error
== ENOSPC
) && retry
&& TRANS_ISTRANS(ufsvfsp
)) {
4282 ufs_delete_drain_wait(ufsvfsp
, 1);
4292 * Ufs specific routine used to do ufs io.
4295 ufs_rdwri(enum uio_rw rw
, int ioflag
, struct inode
*ip
, caddr_t base
,
4296 ssize_t len
, offset_t offset
, enum uio_seg seg
, int *aresid
,
4303 ASSERT(RW_LOCK_HELD(&ip
->i_contents
));
4305 bzero((caddr_t
)&auio
, sizeof (uio_t
));
4306 bzero((caddr_t
)&aiov
, sizeof (iovec_t
));
4308 aiov
.iov_base
= base
;
4310 auio
.uio_iov
= &aiov
;
4311 auio
.uio_iovcnt
= 1;
4312 auio
.uio_loffset
= offset
;
4313 auio
.uio_segflg
= (short)seg
;
4314 auio
.uio_resid
= len
;
4316 if (rw
== UIO_WRITE
) {
4317 auio
.uio_fmode
= FWRITE
;
4318 auio
.uio_extflg
= UIO_COPY_DEFAULT
;
4319 auio
.uio_llimit
= curproc
->p_fsz_ctl
;
4320 error
= wrip(ip
, &auio
, ioflag
, cr
);
4322 auio
.uio_fmode
= FREAD
;
4323 auio
.uio_extflg
= UIO_COPY_CACHED
;
4324 auio
.uio_llimit
= MAXOFFSET_T
;
4325 error
= rdip(ip
, &auio
, ioflag
, cr
);
4329 *aresid
= auio
.uio_resid
;
4330 } else if (auio
.uio_resid
) {
4338 ufs_fid(struct vnode
*vp
, struct fid
*fidp
, caller_context_t
*ct
)
4341 struct inode
*ip
= VTOI(vp
);
4343 if (ip
->i_ufsvfs
== NULL
)
4346 if (fidp
->fid_len
< (sizeof (struct ufid
) - sizeof (ushort_t
))) {
4347 fidp
->fid_len
= sizeof (struct ufid
) - sizeof (ushort_t
);
4351 ufid
= (struct ufid
*)fidp
;
4352 bzero((char *)ufid
, sizeof (struct ufid
));
4353 ufid
->ufid_len
= sizeof (struct ufid
) - sizeof (ushort_t
);
4354 ufid
->ufid_ino
= ip
->i_number
;
4355 ufid
->ufid_gen
= ip
->i_gen
;
4362 ufs_rwlock(struct vnode
*vp
, int write_lock
, caller_context_t
*ctp
)
4364 struct inode
*ip
= VTOI(vp
);
4365 struct ufsvfs
*ufsvfsp
;
4369 * Read case is easy.
4372 rw_enter(&ip
->i_rwlock
, RW_READER
);
4373 return (V_WRITELOCK_FALSE
);
4377 * Caller has requested a writer lock, but that inhibits any
4378 * concurrency in the VOPs that follow. Acquire the lock shared
4379 * and defer exclusive access until it is known to be needed in
4380 * other VOP handlers. Some cases can be determined here.
4384 * If directio is not set, there is no chance of concurrency,
4385 * so just acquire the lock exclusive. Beware of a forced
4386 * unmount before looking at the mount option.
4388 ufsvfsp
= ip
->i_ufsvfs
;
4389 forcedirectio
= ufsvfsp
? ufsvfsp
->vfs_forcedirectio
: 0;
4390 if (!(ip
->i_flag
& IDIRECTIO
|| forcedirectio
) ||
4391 !ufs_allow_shared_writes
) {
4392 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
4393 return (V_WRITELOCK_TRUE
);
4397 * Mandatory locking forces acquiring i_rwlock exclusive.
4399 if (MANDLOCK(vp
, ip
->i_mode
)) {
4400 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
4401 return (V_WRITELOCK_TRUE
);
4405 * Acquire the lock shared in case a concurrent write follows.
4406 * Mandatory locking could have become enabled before the lock
4407 * was acquired. Re-check and upgrade if needed.
4409 rw_enter(&ip
->i_rwlock
, RW_READER
);
4410 if (MANDLOCK(vp
, ip
->i_mode
)) {
4411 rw_exit(&ip
->i_rwlock
);
4412 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
4413 return (V_WRITELOCK_TRUE
);
4415 return (V_WRITELOCK_FALSE
);
4420 ufs_rwunlock(struct vnode
*vp
, int write_lock
, caller_context_t
*ctp
)
4422 struct inode
*ip
= VTOI(vp
);
4424 rw_exit(&ip
->i_rwlock
);
4429 ufs_seek(struct vnode
*vp
, offset_t ooff
, offset_t
*noffp
, caller_context_t
*ct
)
4431 return ((*noffp
< 0 || *noffp
> MAXOFFSET_T
) ? EINVAL
: 0);
4436 ufs_frlock(struct vnode
*vp
, int cmd
, struct flock64
*bfp
, int flag
,
4437 offset_t offset
, struct flk_callback
*flk_cbp
, struct cred
*cr
,
4438 caller_context_t
*ct
)
4440 struct inode
*ip
= VTOI(vp
);
4442 if (ip
->i_ufsvfs
== NULL
)
4446 * If file is being mapped, disallow frlock.
4447 * XXX I am not holding tlock while checking i_mapcnt because the
4448 * current locking strategy drops all locks before calling fs_frlock.
4449 * So, mapcnt could change before we enter fs_frlock making is
4450 * meaningless to have held tlock in the first place.
4452 if (ip
->i_mapcnt
> 0 && MANDLOCK(vp
, ip
->i_mode
))
4454 return (fs_frlock(vp
, cmd
, bfp
, flag
, offset
, flk_cbp
, cr
, ct
));
4459 ufs_space(struct vnode
*vp
, int cmd
, struct flock64
*bfp
, int flag
,
4460 offset_t offset
, cred_t
*cr
, caller_context_t
*ct
)
4462 struct ufsvfs
*ufsvfsp
= VTOI(vp
)->i_ufsvfs
;
4463 struct ulockfs
*ulp
;
4466 if ((error
= convoff(vp
, bfp
, 0, offset
)) == 0) {
4467 if (cmd
== F_FREESP
) {
4468 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
4469 ULOCKFS_SPACE_MASK
);
4472 error
= ufs_freesp(vp
, bfp
, flag
, cr
);
4474 if (error
== 0 && bfp
->l_start
== 0)
4475 vnevent_truncate(vp
, ct
);
4476 } else if (cmd
== F_ALLOCSP
) {
4477 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
4478 ULOCKFS_FALLOCATE_MASK
);
4481 error
= ufs_allocsp(vp
, bfp
, cr
);
4483 return (EINVAL
); /* Command not handled here */
4486 ufs_lockfs_end(ulp
);
4493 * Used to determine if read ahead should be done. Also used to
4494 * to determine when write back occurs.
4496 #define CLUSTSZ(ip) ((ip)->i_ufsvfs->vfs_ioclustsz)
4499 * A faster version of ufs_getpage.
4501 * We optimize by inlining the pvn_getpages iterator, eliminating
4502 * calls to bmap_read if file doesn't have UFS holes, and avoiding
4503 * the overhead of page_exists().
4505 * When files has UFS_HOLES and ufs_getpage is called with S_READ,
4506 * we set *protp to PROT_READ to avoid calling bmap_read. This approach
4507 * victimizes performance when a file with UFS holes is faulted
4508 * first in the S_READ mode, and then in the S_WRITE mode. We will get
4509 * two MMU faults in this case.
4511 * XXX - the inode fields which control the sequential mode are not
4512 * protected by any mutex. The read ahead will act wild if
4513 * multiple processes will access the file concurrently and
4514 * some of them in sequential mode. One particulary bad case
4515 * is if another thread will change the value of i_nextrio between
4516 * the time this thread tests the i_nextrio value and then reads it
4517 * again to use it as the offset for the read ahead.
4521 ufs_getpage(struct vnode
*vp
, offset_t off
, size_t len
, uint_t
*protp
,
4522 page_t
*plarr
[], size_t plsz
, struct seg
*seg
, caddr_t addr
,
4523 enum seg_rw rw
, struct cred
*cr
, caller_context_t
*ct
)
4525 uoff_t uoff
= (uoff_t
)off
; /* type conversion */
4528 struct inode
*ip
= VTOI(vp
);
4529 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
4531 struct ulockfs
*ulp
;
4539 int pgsize
= PAGESIZE
;
4544 ASSERT((uoff
& PAGEOFFSET
) == 0);
4550 * Obey the lockfs protocol
4552 err
= ufs_lockfs_begin_getpage(ufsvfsp
, &ulp
, seg
,
4553 rw
== S_READ
|| rw
== S_EXEC
, protp
);
4557 fs
= ufsvfsp
->vfs_fs
;
4559 if (ulp
&& (rw
== S_CREATE
|| rw
== S_WRITE
) &&
4560 !(vp
->v_flag
& VISSWAP
)) {
4562 * Try to start a transaction, will return if blocking is
4563 * expected to occur and the address space is not the
4564 * kernel address space.
4566 trans_size
= TOP_GETPAGE_SIZE(ip
);
4567 if (seg
->s_as
!= &kas
) {
4568 TRANS_TRY_BEGIN_ASYNC(ufsvfsp
, TOP_GETPAGE
,
4570 if (err
== EWOULDBLOCK
) {
4572 * Use EDEADLK here because the VM code
4573 * can normally never see this error.
4576 ufs_lockfs_end(ulp
);
4580 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_GETPAGE
, trans_size
);
4584 if (vp
->v_flag
& VNOMAP
) {
4589 seqmode
= ip
->i_nextr
== uoff
&& rw
!= S_CREATE
;
4591 rwtype
= RW_READER
; /* start as a reader */
4592 dolock
= (rw_owner(&ip
->i_contents
) != curthread
);
4594 * If this thread owns the lock, i.e., this thread grabbed it
4595 * as writer somewhere above, then we don't need to grab the
4596 * lock as reader in this routine.
4598 do_qlock
= (rw_owner(&ufsvfsp
->vfs_dqrwlock
) != curthread
);
4603 * Grab the quota lock if we need to call
4604 * bmap_write() below (with i_contents as writer).
4606 if (do_qlock
&& rwtype
== RW_WRITER
)
4607 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
4608 rw_enter(&ip
->i_contents
, rwtype
);
4612 * We may be getting called as a side effect of a bmap using
4613 * fbread() when the blocks might be being allocated and the
4614 * size has not yet been up'ed. In this case we want to be
4615 * able to return zero pages if we get back UFS_HOLE from
4616 * calling bmap for a non write case here. We also might have
4617 * to read some frags from the disk into a page if we are
4618 * extending the number of frags for a given lbn in bmap().
4619 * Large Files: The read of i_size here is atomic because
4620 * i_contents is held here. If dolock is zero, the lock
4621 * is held in bmap routines.
4623 beyond_eof
= uoff
+ len
>
4624 P2ROUNDUP_TYPED(ip
->i_size
, PAGESIZE
, uoff_t
);
4625 if (beyond_eof
&& seg
!= segkmap
) {
4627 rw_exit(&ip
->i_contents
);
4628 if (do_qlock
&& rwtype
== RW_WRITER
)
4629 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4636 * Must hold i_contents lock throughout the call to pvn_getpages
4637 * since locked pages are returned from each call to ufs_getapage.
4638 * Must *not* return locked pages and then try for contents lock
4639 * due to lock ordering requirements (inode > page)
4642 has_holes
= bmap_has_holes(ip
);
4644 if ((rw
== S_WRITE
|| rw
== S_CREATE
) && has_holes
&& !beyond_eof
) {
4649 * We must acquire the RW_WRITER lock in order to
4650 * call bmap_write().
4652 if (dolock
&& rwtype
== RW_READER
) {
4656 * Grab the quota lock before
4657 * upgrading i_contents, but if we can't grab it
4658 * don't wait here due to lock order:
4659 * vfs_dqrwlock > i_contents.
4662 rw_tryenter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
)
4664 rw_exit(&ip
->i_contents
);
4667 if (!rw_tryupgrade(&ip
->i_contents
)) {
4668 rw_exit(&ip
->i_contents
);
4670 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4676 * May be allocating disk blocks for holes here as
4677 * a result of mmap faults. write(2) does the bmap_write
4678 * in rdip/wrip, not here. We are not dealing with frags
4682 * Large Files: We cast fs_bmask field to offset_t
4683 * just as we do for MAXBMASK because uoff is a 64-bit
4684 * data type. fs_bmask will still be a 32-bit type
4685 * as we cannot change any ondisk data structures.
4688 offset
= uoff
& (offset_t
)fs
->fs_bmask
;
4689 while (offset
< uoff
+ len
) {
4690 blk_size
= (int)blksize(fs
, ip
, lblkno(fs
, offset
));
4691 err
= bmap_write(ip
, offset
, blk_size
,
4692 BI_NORMAL
, NULL
, cr
);
4693 if (ip
->i_flag
& (ICHG
|IUPD
))
4697 offset
+= blk_size
; /* XXX - make this contig */
4702 * Can be a reader from now on.
4704 if (dolock
&& rwtype
== RW_WRITER
) {
4705 rw_downgrade(&ip
->i_contents
);
4707 * We can release vfs_dqrwlock early so do it, but make
4708 * sure we don't try to release it again at the bottom.
4711 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4717 * We remove PROT_WRITE in cases when the file has UFS holes
4718 * because we don't want to call bmap_read() to check each
4719 * page if it is backed with a disk block.
4721 if (protp
&& has_holes
&& rw
!= S_WRITE
&& rw
!= S_CREATE
)
4722 *protp
&= ~PROT_WRITE
;
4727 * The loop looks up pages in the range [off, off + len).
4728 * For each page, we first check if we should initiate an asynchronous
4729 * read ahead before we call page_lookup (we may sleep in page_lookup
4730 * for a previously initiated disk read).
4732 eoff
= (uoff
+ len
);
4733 for (pgoff
= uoff
, pgaddr
= addr
, pl
= plarr
;
4734 pgoff
< eoff
; /* empty */) {
4740 se
= ((rw
== S_CREATE
|| rw
== S_OTHER
) ? SE_EXCL
: SE_SHARED
);
4742 /* Handle async getpage (faultahead) */
4743 if (plarr
== NULL
) {
4744 ip
->i_nextrio
= pgoff
;
4745 (void) ufs_getpage_ra(vp
, pgoff
, seg
, pgaddr
);
4751 * Check if we should initiate read ahead of next cluster.
4752 * We call page_exists only when we need to confirm that
4753 * we have the current page before we initiate the read ahead.
4755 nextrio
= ip
->i_nextrio
;
4757 pgoff
+ CLUSTSZ(ip
) >= nextrio
&& pgoff
<= nextrio
&&
4758 nextrio
< ip
->i_size
&& page_exists(&vp
->v_object
, pgoff
)) {
4759 retval
= ufs_getpage_ra(vp
, pgoff
, seg
, pgaddr
);
4761 * We always read ahead the next cluster of data
4762 * starting from i_nextrio. If the page (vp,nextrio)
4763 * is actually in core at this point, the routine
4764 * ufs_getpage_ra() will stop pre-fetching data
4765 * until we read that page in a synchronized manner
4766 * through ufs_getpage_miss(). So, we should increase
4767 * i_nextrio if the page (vp, nextrio) exists.
4769 if ((retval
== 0) && page_exists(&vp
->v_object
, nextrio
)) {
4770 ip
->i_nextrio
= nextrio
+ pgsize
;
4774 if ((pp
= page_lookup(&vp
->v_object
, pgoff
, se
)) != NULL
) {
4776 * We found the page in the page cache.
4785 * We have to create the page, or read it from disk.
4787 if (err
= ufs_getpage_miss(vp
, pgoff
, len
, seg
, pgaddr
,
4788 pl
, plsz
, rw
, seqmode
))
4791 while (*pl
!= NULL
) {
4802 * Return pages up to plsz if they are in the page cache.
4803 * We cannot return pages if there is a chance that they are
4804 * backed with a UFS hole and rw is S_WRITE or S_CREATE.
4806 if (plarr
&& !(has_holes
&& (rw
== S_WRITE
|| rw
== S_CREATE
))) {
4808 ASSERT((protp
== NULL
) ||
4809 !(has_holes
&& (*protp
& PROT_WRITE
)));
4811 eoff
= pgoff
+ plsz
;
4812 while (pgoff
< eoff
) {
4815 if ((pp
= page_lookup_nowait(&vp
->v_object
, pgoff
, SE_SHARED
)) == NULL
)
4825 *pl
= NULL
; /* Terminate page list */
4826 ip
->i_nextr
= pgoff
;
4831 * Release any pages we have locked.
4833 while (pl
> &plarr
[0])
4841 * If the inode is not already marked for IACC (in rdip() for read)
4842 * and the inode is not marked for no access time update (in wrip()
4843 * for write) then update the inode access time and mod time now.
4845 if ((ip
->i_flag
& (IACC
| INOACC
)) == 0) {
4846 if ((rw
!= S_OTHER
) && (ip
->i_mode
& IFMT
) != IFDIR
) {
4847 if (!ULOCKFS_IS_NOIACC(ITOUL(ip
)) &&
4848 (fs
->fs_ronly
== 0) &&
4849 (!ufsvfsp
->vfs_noatime
)) {
4850 mutex_enter(&ip
->i_tlock
);
4853 mutex_exit(&ip
->i_tlock
);
4859 rw_exit(&ip
->i_contents
);
4860 if (do_qlock
&& rwtype
== RW_WRITER
)
4861 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4866 if ((rw
== S_CREATE
|| rw
== S_WRITE
) &&
4867 !(vp
->v_flag
& VISSWAP
)) {
4868 TRANS_END_ASYNC(ufsvfsp
, TOP_GETPAGE
, trans_size
);
4870 ufs_lockfs_end(ulp
);
4877 * ufs_getpage_miss is called when ufs_getpage missed the page in the page
4878 * cache. The page is either read from the disk, or it's created.
4879 * A page is created (without disk read) if rw == S_CREATE, or if
4880 * the page is not backed with a real disk block (UFS hole).
4884 ufs_getpage_miss(struct vnode
*vp
, uoff_t off
, size_t len
, struct seg
*seg
,
4885 caddr_t addr
, page_t
*pl
[], size_t plsz
, enum seg_rw rw
, int seq
)
4887 struct inode
*ip
= VTOI(vp
);
4894 int bsize
= ip
->i_fs
->fs_bsize
;
4897 * Figure out whether the page can be created, or must be
4898 * must be read from the disk.
4904 if (err
= bmap_read(ip
, off
, &bn
, &contig
))
4907 crpage
= (bn
== UFS_HOLE
);
4910 * If its also a fallocated block that hasn't been written to
4911 * yet, we will treat it just like a UFS_HOLE and create
4912 * a zero page for it
4914 if (ISFALLOCBLK(ip
, bn
))
4919 if ((pp
= page_create_va(&vp
->v_object
, off
, PAGESIZE
, PG_WAIT
,
4920 seg
, addr
)) == NULL
) {
4921 return (ufs_fault(vp
,
4922 "ufs_getpage_miss: page_create == NULL"));
4926 pagezero(pp
, 0, PAGESIZE
);
4933 ufsvfs_t
*ufsvfsp
= ip
->i_ufsvfs
;
4936 * If access is not in sequential order, we read from disk
4939 * We limit the size of the transfer to bsize if we are reading
4940 * from the beginning of the file. Note in this situation we
4941 * will hedge our bets and initiate an async read ahead of
4944 if (!seq
|| off
== 0)
4945 contig
= MIN(contig
, bsize
);
4947 pp
= pvn_read_kluster(vp
, off
, seg
, addr
, &io_off
,
4948 &io_len
, off
, contig
, 0);
4951 * Some other thread has entered the page.
4952 * ufs_getpage will retry page_lookup.
4960 * Zero part of the page which we are not
4961 * going to read from the disk.
4963 xlen
= io_len
& PAGEOFFSET
;
4965 pagezero(pp
->p_prev
, xlen
, PAGESIZE
- xlen
);
4967 bp
= pageio_setup(pp
, io_len
, ip
->i_devvp
, B_READ
);
4968 bp
->b_edev
= ip
->i_dev
;
4969 bp
->b_dev
= cmpdev(ip
->i_dev
);
4971 bp
->b_un
.b_addr
= (caddr_t
)0;
4972 bp
->b_file
= ip
->i_vnode
;
4975 if (ufsvfsp
->vfs_log
) {
4976 lufs_read_strategy(ufsvfsp
->vfs_log
, bp
);
4977 } else if (ufsvfsp
->vfs_snapshot
) {
4978 fssnap_strategy(&ufsvfsp
->vfs_snapshot
, bp
);
4980 ufsvfsp
->vfs_iotstamp
= ddi_get_lbolt();
4981 ub
.ub_getpages
.value
.ul
++;
4982 (void) bdev_strategy(bp
);
4983 lwp_stat_update(LWP_STAT_INBLK
, 1);
4986 ip
->i_nextrio
= off
+ ((io_len
+ PAGESIZE
- 1) & PAGEMASK
);
4989 * If the file access is sequential, initiate read ahead
4990 * of the next cluster.
4992 if (seq
&& ip
->i_nextrio
< ip
->i_size
)
4993 (void) ufs_getpage_ra(vp
, off
, seg
, addr
);
4998 pvn_read_done(pp
, B_ERROR
);
5003 pvn_plist_init(pp
, pl
, plsz
, off
, io_len
, rw
);
5008 * Read ahead a cluster from the disk. Returns the length in bytes.
5011 ufs_getpage_ra(struct vnode
*vp
, uoff_t off
, struct seg
*seg
, caddr_t addr
)
5013 struct inode
*ip
= VTOI(vp
);
5015 uoff_t io_off
= ip
->i_nextrio
;
5017 caddr_t addr2
= addr
+ (io_off
- off
);
5024 int bsize
= ip
->i_fs
->fs_bsize
;
5027 * If the directio advisory is in effect on this file,
5028 * then do not do buffered read ahead. Read ahead makes
5029 * it more difficult on threads using directio as they
5030 * will be forced to flush the pages from this vnode.
5032 if ((ufsvfsp
= ip
->i_ufsvfs
) == NULL
)
5034 if (ip
->i_flag
& IDIRECTIO
|| ufsvfsp
->vfs_forcedirectio
)
5038 * Is this test needed?
5040 if (addr2
>= seg
->s_base
+ seg
->s_size
)
5044 err
= bmap_read(ip
, io_off
, &bn
, &contig
);
5046 * If its a UFS_HOLE or a fallocated block, do not perform
5047 * any read ahead's since there probably is nothing to read ahead
5049 if (err
|| bn
== UFS_HOLE
|| ISFALLOCBLK(ip
, bn
))
5053 * Limit the transfer size to bsize if this is the 2nd block.
5055 if (io_off
== (uoff_t
)bsize
)
5056 contig
= MIN(contig
, bsize
);
5058 if ((pp
= pvn_read_kluster(vp
, io_off
, seg
, addr2
, &io_off
,
5059 &io_len
, io_off
, contig
, 1)) == NULL
)
5063 * Zero part of page which we are not going to read from disk
5065 if ((xlen
= (io_len
& PAGEOFFSET
)) > 0)
5066 pagezero(pp
->p_prev
, xlen
, PAGESIZE
- xlen
);
5068 ip
->i_nextrio
= (io_off
+ io_len
+ PAGESIZE
- 1) & PAGEMASK
;
5070 bp
= pageio_setup(pp
, io_len
, ip
->i_devvp
, B_READ
| B_ASYNC
);
5071 bp
->b_edev
= ip
->i_dev
;
5072 bp
->b_dev
= cmpdev(ip
->i_dev
);
5074 bp
->b_un
.b_addr
= (caddr_t
)0;
5075 bp
->b_file
= ip
->i_vnode
;
5078 if (ufsvfsp
->vfs_log
) {
5079 lufs_read_strategy(ufsvfsp
->vfs_log
, bp
);
5080 } else if (ufsvfsp
->vfs_snapshot
) {
5081 fssnap_strategy(&ufsvfsp
->vfs_snapshot
, bp
);
5083 ufsvfsp
->vfs_iotstamp
= ddi_get_lbolt();
5084 ub
.ub_getras
.value
.ul
++;
5085 (void) bdev_strategy(bp
);
5086 lwp_stat_update(LWP_STAT_INBLK
, 1);
5094 * Flags are composed of {B_INVAL, B_FREE, B_DONTNEED, B_FORCE, B_ASYNC}
5096 * LMXXX - the inode really ought to contain a pointer to one of these
5097 * async args. Stuff gunk in there and just hand the whole mess off.
5098 * This would replace i_delaylen, i_delayoff.
5102 ufs_putpage(struct vnode
*vp
, offset_t off
, size_t len
, int flags
,
5103 struct cred
*cr
, caller_context_t
*ct
)
5105 struct inode
*ip
= VTOI(vp
);
5108 if (vp
->v_count
== 0) {
5109 return (ufs_fault(vp
, "ufs_putpage: bad v_count == 0"));
5113 * XXX - Why should this check be made here?
5115 if (vp
->v_flag
& VNOMAP
) {
5120 if (ip
->i_ufsvfs
== NULL
) {
5125 if (flags
& B_ASYNC
) {
5126 if (ufs_delay
&& len
&&
5127 (flags
& ~(B_ASYNC
|B_DONTNEED
|B_FREE
)) == 0) {
5128 mutex_enter(&ip
->i_tlock
);
5130 * If nobody stalled, start a new cluster.
5132 if (ip
->i_delaylen
== 0) {
5133 ip
->i_delayoff
= off
;
5134 ip
->i_delaylen
= len
;
5135 mutex_exit(&ip
->i_tlock
);
5139 * If we have a full cluster or they are not contig,
5140 * then push last cluster and start over.
5142 if (ip
->i_delaylen
>= CLUSTSZ(ip
) ||
5143 ip
->i_delayoff
+ ip
->i_delaylen
!= off
) {
5147 doff
= ip
->i_delayoff
;
5148 dlen
= ip
->i_delaylen
;
5149 ip
->i_delayoff
= off
;
5150 ip
->i_delaylen
= len
;
5151 mutex_exit(&ip
->i_tlock
);
5152 err
= ufs_putpages(vp
, doff
, dlen
,
5154 /* LMXXX - flags are new val, not old */
5158 * There is something there, it's not full, and
5161 ip
->i_delaylen
+= len
;
5162 mutex_exit(&ip
->i_tlock
);
5166 * Must have weird flags or we are not clustering.
5170 err
= ufs_putpages(vp
, off
, len
, flags
, cr
);
5177 * If len == 0, do from off to EOF.
5179 * The normal cases should be len == 0 & off == 0 (entire vp list),
5180 * len == MAXBSIZE (from segmap_release actions), and len == PAGESIZE
5185 ufs_putpages(struct vnode
*vp
, offset_t off
, size_t len
, int flags
,
5190 struct inode
*ip
= VTOI(vp
);
5196 if (vp
->v_count
== 0)
5197 return (ufs_fault(vp
, "ufs_putpages: v_count == 0"));
5199 * Acquire the readers/write inode lock before locking
5200 * any pages in this inode.
5201 * The inode lock is held during i/o.
5204 mutex_enter(&ip
->i_tlock
);
5205 ip
->i_delayoff
= ip
->i_delaylen
= 0;
5206 mutex_exit(&ip
->i_tlock
);
5208 dolock
= (rw_owner(&ip
->i_contents
) != curthread
);
5211 * Must synchronize this thread and any possible thread
5212 * operating in the window of vulnerability in wrip().
5213 * It is dangerous to allow both a thread doing a putpage
5214 * and a thread writing, so serialize them. The exception
5215 * is when the thread in wrip() does something which causes
5216 * a putpage operation. Then, the thread must be allowed
5217 * to continue. It may encounter a bmap_read problem in
5218 * ufs_putapage, but that is handled in ufs_putapage.
5219 * Allow async writers to proceed, we don't want to block
5220 * the pageout daemon.
5222 if (ip
->i_writer
== curthread
)
5223 rw_enter(&ip
->i_contents
, RW_READER
);
5226 rw_enter(&ip
->i_contents
, RW_READER
);
5227 mutex_enter(&ip
->i_tlock
);
5229 * If there is no thread in the critical
5230 * section of wrip(), then proceed.
5231 * Otherwise, wait until there isn't one.
5233 if (ip
->i_writer
== NULL
) {
5234 mutex_exit(&ip
->i_tlock
);
5237 rw_exit(&ip
->i_contents
);
5239 * Bounce async writers when we have a writer
5240 * working on this file so we don't deadlock
5241 * the pageout daemon.
5243 if (flags
& B_ASYNC
) {
5244 mutex_exit(&ip
->i_tlock
);
5247 cv_wait(&ip
->i_wrcv
, &ip
->i_tlock
);
5248 mutex_exit(&ip
->i_tlock
);
5253 if (!vn_has_cached_data(vp
)) {
5255 rw_exit(&ip
->i_contents
);
5261 * Search the entire vp list for pages >= off.
5263 err
= pvn_vplist_dirty(vp
, (uoff_t
)off
, ufs_putapage
,
5267 * Loop over all offsets in the range looking for
5268 * pages to deal with.
5270 if ((eoff
= blkroundup(ip
->i_fs
, ip
->i_size
)) != 0)
5271 eoff
= MIN(off
+ len
, eoff
);
5275 for (io_off
= off
; io_off
< eoff
; io_off
+= io_len
) {
5277 * If we are not invalidating, synchronously
5278 * freeing or writing pages, use the routine
5279 * page_lookup_nowait() to prevent reclaiming
5280 * them from the free list.
5282 if ((flags
& B_INVAL
) || ((flags
& B_ASYNC
) == 0)) {
5283 pp
= page_lookup(&vp
->v_object
, io_off
,
5284 (flags
& (B_INVAL
| B_FREE
)) ? SE_EXCL
: SE_SHARED
);
5286 pp
= page_lookup_nowait(&vp
->v_object
,
5288 (flags
& B_FREE
) ? SE_EXCL
: SE_SHARED
);
5291 if (pp
== NULL
|| pvn_getdirty(pp
, flags
) == 0)
5294 uoff_t
*io_offp
= &io_off
;
5296 err
= ufs_putapage(vp
, pp
, io_offp
, &io_len
,
5301 * "io_off" and "io_len" are returned as
5302 * the range of pages we actually wrote.
5303 * This allows us to skip ahead more quickly
5304 * since several pages may've been dealt
5305 * with by this iteration of the loop.
5310 if (err
== 0 && off
== 0 && (len
== 0 || len
>= ip
->i_size
)) {
5312 * We have just sync'ed back all the pages on
5313 * the inode, turn off the IMODTIME flag.
5315 mutex_enter(&ip
->i_tlock
);
5316 ip
->i_flag
&= ~IMODTIME
;
5317 mutex_exit(&ip
->i_tlock
);
5320 rw_exit(&ip
->i_contents
);
5325 ufs_iodone(buf_t
*bp
)
5329 VERIFY(bp
->b_pages
->p_object
!= NULL
);
5330 ASSERT(bp
->b_pages
->p_vnode
!= NULL
);
5331 ASSERT(!(bp
->b_flags
& B_READ
));
5333 bp
->b_iodone
= NULL
;
5335 ip
= VTOI(bp
->b_pages
->p_vnode
);
5337 mutex_enter(&ip
->i_tlock
);
5338 if (ip
->i_writes
>= ufs_LW
) {
5339 if ((ip
->i_writes
-= bp
->b_bcount
) <= ufs_LW
)
5341 cv_broadcast(&ip
->i_wrcv
); /* wake all up */
5343 ip
->i_writes
-= bp
->b_bcount
;
5346 mutex_exit(&ip
->i_tlock
);
5351 * Write out a single page, possibly klustering adjacent
5352 * dirty pages. The inode lock must be held.
5354 * LMXXX - bsize < pagesize not done.
5358 ufs_putapage(struct vnode
*vp
, page_t
*pp
, uoff_t
*offp
, size_t *lenp
,
5359 int flags
, struct cred
*cr
)
5363 struct inode
*ip
= VTOI(vp
);
5364 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
5373 ASSERT(RW_LOCK_HELD(&ip
->i_contents
));
5375 if (ufsvfsp
== NULL
) {
5381 ASSERT(fs
->fs_ronly
== 0);
5384 * If the modified time on the inode has not already been
5385 * set elsewhere (e.g. for write/setattr) we set the time now.
5386 * This gives us approximate modified times for mmap'ed files
5387 * which are modified via stores in the user address space.
5389 if ((ip
->i_flag
& IMODTIME
) == 0) {
5390 mutex_enter(&ip
->i_tlock
);
5394 mutex_exit(&ip
->i_tlock
);
5398 * Align the request to a block boundry (for old file systems),
5399 * and go ask bmap() how contiguous things are for this file.
5401 off
= pp
->p_offset
& (offset_t
)fs
->fs_bmask
; /* block align it */
5403 err
= bmap_read(ip
, off
, &bn
, &contig
);
5406 if (bn
== UFS_HOLE
) { /* putpage never allocates */
5408 * logging device is in error mode; simply return EIO
5410 if (TRANS_ISERROR(ufsvfsp
)) {
5415 * Oops, the thread in the window in wrip() did some
5416 * sort of operation which caused a putpage in the bad
5417 * range. In this case, just return an error which will
5418 * cause the software modified bit on the page to set
5419 * and the page will get written out again later.
5421 if (ip
->i_writer
== curthread
) {
5426 * If the pager is trying to push a page in the bad range
5427 * just tell it to try again later when things are better.
5429 if (flags
& B_ASYNC
) {
5433 err
= ufs_fault(ITOV(ip
), "ufs_putapage: bn == UFS_HOLE");
5438 * If it is an fallocate'd block, reverse the negativity since
5439 * we are now writing to it
5441 if (ISFALLOCBLK(ip
, bn
)) {
5442 err
= bmap_set_bn(vp
, off
, dbtofsb(fs
, -bn
));
5450 * Take the length (of contiguous bytes) passed back from bmap()
5451 * and _try_ and get a set of pages covering that extent.
5453 pp
= pvn_write_kluster(vp
, pp
, &io_off
, &io_len
, off
, contig
, flags
);
5456 * May have run out of memory and not clustered backwards.
5460 * We told bmap off, so we have to adjust the bn accordingly.
5463 bn
+= btod(io_off
- off
);
5464 contig
-= (io_off
- off
);
5468 * bmap was carefull to tell us the right size so use that.
5469 * There might be unallocated frags at the end.
5470 * LMXXX - bzero the end of the page? We must be writing after EOF.
5472 if (io_len
> contig
) {
5473 ASSERT(io_len
- contig
< fs
->fs_bsize
);
5474 io_len
-= (io_len
- contig
);
5478 * Handle the case where we are writing the last page after EOF.
5480 * XXX - just a patch for i-mt3.
5483 ASSERT(pp
->p_offset
>=
5484 (uoff_t
)(roundup(ip
->i_size
, PAGESIZE
)));
5488 bp
= pageio_setup(pp
, io_len
, ip
->i_devvp
, B_WRITE
| flags
);
5490 ULOCKFS_SET_MOD(ITOUL(ip
));
5492 bp
->b_edev
= ip
->i_dev
;
5493 bp
->b_dev
= cmpdev(ip
->i_dev
);
5495 bp
->b_un
.b_addr
= (caddr_t
)0;
5496 bp
->b_file
= ip
->i_vnode
;
5499 * File contents of shadow or quota inodes are metadata, and updates
5500 * to these need to be put into a logging transaction. All direct
5501 * callers in UFS do that, but fsflush can come here _before_ the
5502 * normal codepath. An example would be updating ACL information, for
5503 * which the normal codepath would be:
5509 * Here, fsflush can pick up the dirty page before segmap_release()
5510 * forces it out. If that happens, there's no transaction.
5511 * We therefore need to test whether a transaction exists, and if not
5512 * create one - for fsflush.
5515 (((ip
->i_mode
& IFMT
) == IFSHAD
|| ufsvfsp
->vfs_qinod
== ip
) &&
5516 ((curthread
->t_flag
& T_DONTBLOCK
) == 0) &&
5517 (TRANS_ISTRANS(ufsvfsp
)));
5520 curthread
->t_flag
|= T_DONTBLOCK
;
5521 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_PUTPAGE
, TOP_PUTPAGE_SIZE(ip
));
5523 if (TRANS_ISTRANS(ufsvfsp
)) {
5524 if ((ip
->i_mode
& IFMT
) == IFSHAD
) {
5525 TRANS_BUF(ufsvfsp
, 0, io_len
, bp
, DT_SHAD
);
5526 } else if (ufsvfsp
->vfs_qinod
== ip
) {
5527 TRANS_DELTA(ufsvfsp
, ldbtob(bn
), bp
->b_bcount
, DT_QR
,
5532 TRANS_END_ASYNC(ufsvfsp
, TOP_PUTPAGE
, TOP_PUTPAGE_SIZE(ip
));
5533 curthread
->t_flag
&= ~T_DONTBLOCK
;
5536 /* write throttle */
5538 ASSERT(bp
->b_iodone
== NULL
);
5539 bp
->b_iodone
= (int (*)())ufs_iodone
;
5540 mutex_enter(&ip
->i_tlock
);
5541 ip
->i_writes
+= bp
->b_bcount
;
5542 mutex_exit(&ip
->i_tlock
);
5544 if (bp
->b_flags
& B_ASYNC
) {
5545 if (ufsvfsp
->vfs_log
) {
5546 lufs_write_strategy(ufsvfsp
->vfs_log
, bp
);
5547 } else if (ufsvfsp
->vfs_snapshot
) {
5548 fssnap_strategy(&ufsvfsp
->vfs_snapshot
, bp
);
5550 ufsvfsp
->vfs_iotstamp
= ddi_get_lbolt();
5551 ub
.ub_putasyncs
.value
.ul
++;
5552 (void) bdev_strategy(bp
);
5553 lwp_stat_update(LWP_STAT_OUBLK
, 1);
5556 if (ufsvfsp
->vfs_log
) {
5557 lufs_write_strategy(ufsvfsp
->vfs_log
, bp
);
5558 } else if (ufsvfsp
->vfs_snapshot
) {
5559 fssnap_strategy(&ufsvfsp
->vfs_snapshot
, bp
);
5561 ufsvfsp
->vfs_iotstamp
= ddi_get_lbolt();
5562 ub
.ub_putsyncs
.value
.ul
++;
5563 (void) bdev_strategy(bp
);
5564 lwp_stat_update(LWP_STAT_OUBLK
, 1);
5568 pvn_write_done(pp
, ((err
) ? B_ERROR
: 0) | B_WRITE
| flags
);
5574 if (err
!= 0 && pp
!= NULL
)
5575 pvn_write_done(pp
, B_ERROR
| B_WRITE
| flags
);
5585 uint64_t ufs_map_alock_retry_cnt
;
5586 uint64_t ufs_map_lockfs_retry_cnt
;
5590 ufs_map(struct vnode
*vp
, offset_t off
, struct as
*as
, caddr_t
*addrp
,
5591 size_t len
, uchar_t prot
, uchar_t maxprot
, uint_t flags
, struct cred
*cr
,
5592 caller_context_t
*ct
)
5594 struct segvn_crargs vn_a
;
5595 struct ufsvfs
*ufsvfsp
= VTOI(vp
)->i_ufsvfs
;
5596 struct ulockfs
*ulp
;
5599 caddr_t hint
= *addrp
;
5601 if (vp
->v_flag
& VNOMAP
) {
5606 if (off
< 0 || (offset_t
)(off
+ len
) < 0) {
5611 if (vp
->v_type
!= VREG
) {
5619 * If file is being locked, disallow mapping.
5621 if (vn_has_mandatory_locks(vp
, VTOI(vp
)->i_mode
)) {
5628 * Note that if we are retrying (because ufs_lockfs_trybegin failed in
5629 * the previous attempt), some other thread could have grabbed
5630 * the same VA range if MAP_FIXED is set. In that case, choose_addr
5631 * would unmap the valid VA range, that is ok.
5633 error
= choose_addr(as
, addrp
, len
, off
, ADDR_VACALIGN
, flags
);
5640 * a_lock has to be acquired before entering the lockfs protocol
5641 * because that is the order in which pagefault works. Also we cannot
5642 * block on a_lock here because this waiting writer will prevent
5643 * further readers like ufs_read from progressing and could cause
5644 * deadlock between ufs_read/ufs_map/pagefault when a quiesce is
5647 while (!AS_LOCK_TRYENTER(as
, RW_WRITER
)) {
5648 ufs_map_alock_retry_cnt
++;
5649 delay(RETRY_LOCK_DELAY
);
5653 * We can't hold as->a_lock and wait for lockfs to succeed because
5654 * the proc tools might hang on a_lock, so call ufs_lockfs_trybegin()
5657 if (error
= ufs_lockfs_trybegin(ufsvfsp
, &ulp
, ULOCKFS_MAP_MASK
)) {
5659 * ufs_lockfs_trybegin() did not succeed. It is safer to give up
5660 * as->a_lock and wait for ulp->ul_fs_lock status to change.
5662 ufs_map_lockfs_retry_cnt
++;
5668 mutex_enter(&ulp
->ul_lock
);
5669 while (ulp
->ul_fs_lock
& ULOCKFS_MAP_MASK
) {
5670 if (ULOCKFS_IS_SLOCK(ulp
) || ufsvfsp
->vfs_nointr
) {
5671 cv_wait(&ulp
->ul_cv
, &ulp
->ul_lock
);
5674 sig
= cv_wait_sig(&ulp
->ul_cv
, &ulp
->ul_lock
);
5676 if (((ulp
->ul_fs_lock
& ULOCKFS_MAP_MASK
) &&
5677 !sig
) || ufsvfsp
->vfs_dontblock
) {
5678 mutex_exit(&ulp
->ul_lock
);
5683 mutex_exit(&ulp
->ul_lock
);
5688 vn_a
.offset
= (uoff_t
)off
;
5689 vn_a
.type
= flags
& MAP_TYPE
;
5691 vn_a
.maxprot
= maxprot
;
5694 vn_a
.flags
= flags
& ~MAP_TYPE
;
5696 vn_a
.lgrp_mem_policy_flags
= 0;
5698 error
= as_map_locked(as
, *addrp
, len
, segvn_create
, &vn_a
);
5700 ufs_lockfs_end(ulp
);
5708 ufs_addmap(struct vnode
*vp
, offset_t off
, struct as
*as
, caddr_t addr
,
5709 size_t len
, uchar_t prot
, uchar_t maxprot
, uint_t flags
,
5710 struct cred
*cr
, caller_context_t
*ct
)
5712 struct inode
*ip
= VTOI(vp
);
5714 if (vp
->v_flag
& VNOMAP
) {
5718 mutex_enter(&ip
->i_tlock
);
5719 ip
->i_mapcnt
+= btopr(len
);
5720 mutex_exit(&ip
->i_tlock
);
5726 ufs_delmap(struct vnode
*vp
, offset_t off
, struct as
*as
, caddr_t addr
,
5727 size_t len
, uint_t prot
, uint_t maxprot
, uint_t flags
, struct cred
*cr
,
5728 caller_context_t
*ct
)
5730 struct inode
*ip
= VTOI(vp
);
5732 if (vp
->v_flag
& VNOMAP
) {
5736 mutex_enter(&ip
->i_tlock
);
5737 ip
->i_mapcnt
-= btopr(len
); /* Count released mappings */
5738 ASSERT(ip
->i_mapcnt
>= 0);
5739 mutex_exit(&ip
->i_tlock
);
5743 * Return the answer requested to poll() for non-device files
5745 struct pollhead ufs_pollhd
;
5749 ufs_poll(vnode_t
*vp
, short ev
, int any
, short *revp
, struct pollhead
**phpp
,
5750 caller_context_t
*ct
)
5752 struct ufsvfs
*ufsvfsp
;
5755 * Regular files reject edge-triggered pollers.
5756 * See the comment in fs_poll() for a more detailed explanation.
5762 ufsvfsp
= VTOI(vp
)->i_ufsvfs
;
5769 if (ULOCKFS_IS_HLOCK(&ufsvfsp
->vfs_ulockfs
) ||
5770 ULOCKFS_IS_ELOCK(&ufsvfsp
->vfs_ulockfs
)) {
5774 if ((ev
& POLLOUT
) && !ufsvfsp
->vfs_fs
->fs_ronly
&&
5775 !ULOCKFS_IS_WLOCK(&ufsvfsp
->vfs_ulockfs
))
5778 if ((ev
& POLLWRBAND
) && !ufsvfsp
->vfs_fs
->fs_ronly
&&
5779 !ULOCKFS_IS_WLOCK(&ufsvfsp
->vfs_ulockfs
))
5780 *revp
|= POLLWRBAND
;
5785 if (ev
& POLLRDNORM
)
5786 *revp
|= POLLRDNORM
;
5788 if (ev
& POLLRDBAND
)
5789 *revp
|= POLLRDBAND
;
5792 if ((ev
& POLLPRI
) && (*revp
& (POLLERR
|POLLHUP
)))
5795 if (*revp
== 0 && ! any
)
5796 *phpp
= &ufs_pollhd
;
5803 ufs_l_pathconf(struct vnode
*vp
, int cmd
, ulong_t
*valp
, struct cred
*cr
,
5804 caller_context_t
*ct
)
5806 struct ufsvfs
*ufsvfsp
= VTOI(vp
)->i_ufsvfs
;
5807 struct ulockfs
*ulp
= NULL
;
5808 struct inode
*sip
= NULL
;
5810 struct inode
*ip
= VTOI(vp
);
5813 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_PATHCONF_MASK
);
5819 * Have to handle _PC_NAME_MAX here, because the normal way
5820 * [fs_pathconf() -> VOP_STATVFS() -> ufs_statvfs()]
5821 * results in a lock ordering reversal between
5822 * ufs_lockfs_{begin,end}() and
5823 * ufs_thread_{suspend,continue}().
5825 * Keep in sync with ufs_statvfs().
5831 case _PC_FILESIZEBITS
:
5832 if (ufsvfsp
->vfs_lfflags
& UFS_LARGEFILES
)
5833 *valp
= UFS_FILESIZE_BITS
;
5838 case _PC_XATTR_EXISTS
:
5839 if (vp
->v_vfsp
->vfs_flag
& VFS_XATTR
) {
5842 ufs_xattr_getattrdir(vp
, &sip
, LOOKUP_XATTR
, cr
);
5843 if (error
== 0 && sip
!= NULL
) {
5844 /* Start transaction */
5846 TRANS_BEGIN_CSYNC(ufsvfsp
, &issync
,
5851 * Is directory empty
5853 rw_enter(&sip
->i_rwlock
, RW_WRITER
);
5854 rw_enter(&sip
->i_contents
, RW_WRITER
);
5855 if (ufs_xattrdirempty(sip
,
5856 sip
->i_number
, CRED())) {
5857 rw_enter(&ip
->i_contents
, RW_WRITER
);
5858 ufs_unhook_shadow(ip
, sip
);
5859 rw_exit(&ip
->i_contents
);
5865 rw_exit(&sip
->i_contents
);
5866 rw_exit(&sip
->i_rwlock
);
5868 TRANS_END_CSYNC(ufsvfsp
, &error
,
5873 } else if (error
== ENOENT
) {
5878 error
= fs_pathconf(vp
, cmd
, valp
, cr
, ct
);
5882 case _PC_ACL_ENABLED
:
5883 *valp
= _ACL_ACLENT_ENABLED
;
5886 case _PC_MIN_HOLE_SIZE
:
5887 *valp
= (ulong_t
)ip
->i_fs
->fs_bsize
;
5890 case _PC_SATTR_ENABLED
:
5891 case _PC_SATTR_EXISTS
:
5892 *valp
= vfs_has_feature(vp
->v_vfsp
, VFSFT_SYSATTR_VIEWS
) &&
5893 (vp
->v_type
== VREG
|| vp
->v_type
== VDIR
);
5896 case _PC_TIMESTAMP_RESOLUTION
:
5898 * UFS keeps only microsecond timestamp resolution.
5899 * This is historical and will probably never change.
5905 error
= fs_pathconf(vp
, cmd
, valp
, cr
, ct
);
5910 ufs_lockfs_end(ulp
);
5915 int ufs_pageio_writes
, ufs_pageio_reads
;
5919 ufs_pageio(struct vnode
*vp
, page_t
*pp
, uoff_t io_off
, size_t io_len
,
5920 int flags
, struct cred
*cr
, caller_context_t
*ct
)
5922 struct inode
*ip
= VTOI(vp
);
5923 struct ufsvfs
*ufsvfsp
;
5924 page_t
*npp
= NULL
, *opp
= NULL
, *cpp
= pp
;
5927 size_t done_len
= 0, cur_len
= 0;
5932 struct ulockfs
*ulp
;
5934 if ((flags
& B_READ
) && pp
!= NULL
&& pp
->p_vnode
== vp
&&
5935 vp
->v_mpssdata
!= NULL
) {
5939 dolock
= (rw_owner(&ip
->i_contents
) != curthread
);
5941 * We need a better check. Ideally, we would use another
5942 * vnodeops so that hlocked and forcibly unmounted file
5943 * systems would return EIO where appropriate and w/o the
5944 * need for these checks.
5946 if ((ufsvfsp
= ip
->i_ufsvfs
) == NULL
)
5950 * For vmpss (pp can be NULL) case respect the quiesce protocol.
5951 * ul_lock must be taken before locking pages so we can't use it here
5952 * if pp is non NULL because segvn already locked pages
5953 * SE_EXCL. Instead we rely on the fact that a forced umount or
5954 * applying a filesystem lock via ufs_fiolfs() will block in the
5955 * implicit call to ufs_flush() until we unlock the pages after the
5956 * return to segvn. Other ufs_quiesce() callers keep ufs_quiesce_pend
5957 * above 0 until they are done. We have to be careful not to increment
5958 * ul_vnops_cnt here after forceful unmount hlocks the file system.
5960 * If pp is NULL use ul_lock to make sure we don't increment
5961 * ul_vnops_cnt after forceful unmount hlocks the file system.
5963 if (vmpss
|| pp
== NULL
) {
5964 ulp
= &ufsvfsp
->vfs_ulockfs
;
5966 mutex_enter(&ulp
->ul_lock
);
5967 if (ulp
->ul_fs_lock
& ULOCKFS_GETREAD_MASK
) {
5969 mutex_exit(&ulp
->ul_lock
);
5971 return (vmpss
? EIO
: EINVAL
);
5973 atomic_inc_ulong(&ulp
->ul_vnops_cnt
);
5975 mutex_exit(&ulp
->ul_lock
);
5976 if (ufs_quiesce_pend
) {
5977 if (!atomic_dec_ulong_nv(&ulp
->ul_vnops_cnt
))
5978 cv_broadcast(&ulp
->ul_cv
);
5979 return (vmpss
? EIO
: EINVAL
);
5985 * segvn may call fop_pageio() instead of fop_getpage() to
5986 * handle a fault against a segment that maps vnode pages with
5987 * large mappings. Segvn creates pages and holds them locked
5988 * SE_EXCL during fop_pageio() call. In this case we have to
5989 * use rw_tryenter() to avoid a potential deadlock since in
5990 * lock order i_contents needs to be taken first.
5991 * Segvn will retry via fop_getpage() if fop_pageio() fails.
5994 rw_enter(&ip
->i_contents
, RW_READER
);
5995 } else if (!rw_tryenter(&ip
->i_contents
, RW_READER
)) {
5996 if (!atomic_dec_ulong_nv(&ulp
->ul_vnops_cnt
))
5997 cv_broadcast(&ulp
->ul_cv
);
6003 * Return an error to segvn because the pagefault request is beyond
6004 * PAGESIZE rounded EOF.
6006 if (vmpss
&& btopr(io_off
+ io_len
) > btopr(ip
->i_size
)) {
6008 rw_exit(&ip
->i_contents
);
6009 if (!atomic_dec_ulong_nv(&ulp
->ul_vnops_cnt
))
6010 cv_broadcast(&ulp
->ul_cv
);
6015 if (bmap_has_holes(ip
)) {
6021 rw_exit(&ip
->i_contents
);
6022 if (!atomic_dec_ulong_nv(&ulp
->ul_vnops_cnt
))
6023 cv_broadcast(&ulp
->ul_cv
);
6028 * Break the io request into chunks, one for each contiguous
6029 * stretch of disk blocks in the target file.
6031 while (done_len
< io_len
) {
6034 if (err
= bmap_read(ip
, (uoff_t
)(io_off
+ done_len
),
6038 if (bn
== UFS_HOLE
) { /* No holey swapfiles */
6043 err
= ufs_fault(ITOV(ip
), "ufs_pageio: bn == UFS_HOLE");
6047 cur_len
= MIN(io_len
- done_len
, contig
);
6049 * Zero out a page beyond EOF, when the last block of
6050 * a file is a UFS fragment so that ufs_pageio() can be used
6051 * instead of ufs_getpage() to handle faults against
6052 * segvn segments that use large pages.
6054 page_list_break(&cpp
, &npp
, btopr(cur_len
));
6055 if ((flags
& B_READ
) && (cur_len
& PAGEOFFSET
)) {
6056 size_t xlen
= cur_len
& PAGEOFFSET
;
6057 pagezero(cpp
->p_prev
, xlen
, PAGESIZE
- xlen
);
6060 bp
= pageio_setup(cpp
, cur_len
, ip
->i_devvp
, flags
);
6063 bp
->b_edev
= ip
->i_dev
;
6064 bp
->b_dev
= cmpdev(ip
->i_dev
);
6066 bp
->b_un
.b_addr
= (caddr_t
)0;
6067 bp
->b_file
= ip
->i_vnode
;
6069 ufsvfsp
->vfs_iotstamp
= ddi_get_lbolt();
6070 ub
.ub_pageios
.value
.ul
++;
6071 if (ufsvfsp
->vfs_snapshot
)
6072 fssnap_strategy(&(ufsvfsp
->vfs_snapshot
), bp
);
6074 (void) bdev_strategy(bp
);
6079 ufs_pageio_writes
++;
6081 lwp_stat_update(LWP_STAT_INBLK
, 1);
6083 lwp_stat_update(LWP_STAT_OUBLK
, 1);
6085 * If the request is not B_ASYNC, wait for i/o to complete
6086 * and re-assemble the page list to return to the caller.
6087 * If it is B_ASYNC we leave the page list in pieces and
6088 * cleanup() will dispose of them.
6090 if ((flags
& B_ASYNC
) == 0) {
6095 page_list_concat(&opp
, &cpp
);
6100 cur_len
= P2ROUNDUP_TYPED(cur_len
, PAGESIZE
, size_t);
6101 done_len
+= cur_len
;
6103 ASSERT(err
|| (cpp
== NULL
&& npp
== NULL
&& done_len
== io_len
));
6105 if (flags
& B_ASYNC
) {
6106 /* Cleanup unprocessed parts of list */
6107 page_list_concat(&cpp
, &npp
);
6109 pvn_read_done(cpp
, B_ERROR
);
6111 pvn_write_done(cpp
, B_ERROR
);
6113 /* Re-assemble list and let caller clean up */
6114 page_list_concat(&opp
, &cpp
);
6115 page_list_concat(&opp
, &npp
);
6119 if (vmpss
&& !(ip
->i_flag
& IACC
) && !ULOCKFS_IS_NOIACC(ulp
) &&
6120 ufsvfsp
->vfs_fs
->fs_ronly
== 0 && !ufsvfsp
->vfs_noatime
) {
6121 mutex_enter(&ip
->i_tlock
);
6124 mutex_exit(&ip
->i_tlock
);
6128 rw_exit(&ip
->i_contents
);
6129 if (vmpss
&& !atomic_dec_ulong_nv(&ulp
->ul_vnops_cnt
))
6130 cv_broadcast(&ulp
->ul_cv
);
6135 * Called when the kernel is in a frozen state to dump data
6136 * directly to the device. It uses a private dump data structure,
6137 * set up by dump_ctl, to locate the correct disk block to which to dump.
6141 ufs_dump(vnode_t
*vp
, caddr_t addr
, offset_t ldbn
, offset_t dblks
,
6142 caller_context_t
*ct
)
6145 struct inode
*ip
= VTOI(vp
);
6146 struct fs
*fs
= ip
->i_fs
;
6148 int disk_blks
= fs
->fs_bsize
>> DEV_BSHIFT
;
6153 * forced unmount case
6155 if (ip
->i_ufsvfs
== NULL
)
6158 * Validate the inode that it has not been modified since
6159 * the dump structure is allocated.
6161 mutex_enter(&ip
->i_tlock
);
6162 if ((dump_info
== NULL
) ||
6163 (dump_info
->ip
!= ip
) ||
6164 (dump_info
->time
.tv_sec
!= ip
->i_mtime
.tv_sec
) ||
6165 (dump_info
->time
.tv_usec
!= ip
->i_mtime
.tv_usec
)) {
6166 mutex_exit(&ip
->i_tlock
);
6169 mutex_exit(&ip
->i_tlock
);
6172 * See that the file has room for this write
6174 UFS_GET_ISIZE(&file_size
, ip
);
6176 if (ldbtob(ldbn
+ dblks
) > file_size
)
6180 * Find the physical disk block numbers from the dump
6181 * private data structure directly and write out the data
6182 * in contiguous block lumps
6184 while (dblks
> 0 && !error
) {
6185 lfsbn
= (daddr_t
)lblkno(fs
, ldbtob(ldbn
));
6186 dbn
= fsbtodb(fs
, dump_info
->dblk
[lfsbn
]) + ldbn
% disk_blks
;
6188 ndbs
= disk_blks
- ldbn
% disk_blks
;
6189 while (ndbs
< dblks
&& fsbtodb(fs
, dump_info
->dblk
[lfsbn
+
6190 nfsbs
]) == dbn
+ ndbs
) {
6196 error
= bdev_dump(ip
->i_dev
, addr
, dbn
, ndbs
);
6197 addr
+= ldbtob((offset_t
)ndbs
);
6206 * Prepare the file system before and after the dump operation.
6208 * action = DUMP_ALLOC:
6209 * Preparation before dump, allocate dump private data structure
6210 * to hold all the direct and indirect block info for dump.
6212 * action = DUMP_FREE:
6213 * Clean up after dump, deallocate the dump private data structure.
6215 * action = DUMP_SCAN:
6216 * Scan dump_info for *blkp DEV_BSIZE blocks of contig fs space;
6217 * if found, the starting file-relative DEV_BSIZE lbn is written
6218 * to *bklp; that lbn is intended for use with fop_dump()
6222 ufs_dumpctl(vnode_t
*vp
, int action
, offset_t
*blkp
, caller_context_t
*ct
)
6224 struct inode
*ip
= VTOI(vp
);
6225 ufsvfs_t
*ufsvfsp
= ip
->i_ufsvfs
;
6227 daddr32_t
*dblk
, *storeblk
;
6228 daddr32_t
*nextblk
, *endblk
;
6230 int i
, entry
, entries
;
6234 * check for forced unmount
6236 if (ufsvfsp
== NULL
)
6239 if (action
== DUMP_ALLOC
) {
6241 * alloc and record dump_info
6243 if (dump_info
!= NULL
)
6246 ASSERT(vp
->v_type
== VREG
);
6247 fs
= ufsvfsp
->vfs_fs
;
6249 rw_enter(&ip
->i_contents
, RW_READER
);
6251 if (bmap_has_holes(ip
)) {
6252 rw_exit(&ip
->i_contents
);
6257 * calculate and allocate space needed according to i_size
6259 entries
= (int)lblkno(fs
, blkroundup(fs
, ip
->i_size
));
6260 dump_info
= kmem_alloc(sizeof (struct dump
) +
6261 (entries
- 1) * sizeof (daddr32_t
), KM_NOSLEEP
);
6262 if (dump_info
== NULL
) {
6263 rw_exit(&ip
->i_contents
);
6267 /* Start saving the info */
6268 dump_info
->fsbs
= entries
;
6270 storeblk
= &dump_info
->dblk
[0];
6273 for (entry
= 0; entry
< NDADDR
&& entry
< entries
; entry
++)
6274 *storeblk
++ = ip
->i_db
[entry
];
6276 /* Indirect Blocks */
6277 for (i
= 0; i
< NIADDR
; i
++) {
6280 bp
= UFS_BREAD(ufsvfsp
,
6281 ip
->i_dev
, fsbtodb(fs
, ip
->i_ib
[i
]), fs
->fs_bsize
);
6282 if (bp
->b_flags
& B_ERROR
)
6285 dblk
= bp
->b_un
.b_daddr
;
6286 if ((storeblk
= save_dblks(ip
, ufsvfsp
,
6287 storeblk
, dblk
, i
, entries
)) == NULL
)
6294 kmem_free(dump_info
, sizeof (struct dump
) +
6295 (entries
- 1) * sizeof (daddr32_t
));
6296 rw_exit(&ip
->i_contents
);
6301 /* and time stamp the information */
6302 mutex_enter(&ip
->i_tlock
);
6303 dump_info
->time
= ip
->i_mtime
;
6304 mutex_exit(&ip
->i_tlock
);
6306 rw_exit(&ip
->i_contents
);
6307 } else if (action
== DUMP_FREE
) {
6311 if (dump_info
== NULL
)
6313 entries
= dump_info
->fsbs
- 1;
6314 kmem_free(dump_info
, sizeof (struct dump
) +
6315 entries
* sizeof (daddr32_t
));
6317 } else if (action
== DUMP_SCAN
) {
6321 if (dump_info
== NULL
)
6324 dblk
= dump_info
->dblk
;
6326 endblk
= dblk
+ dump_info
->fsbs
- 1;
6327 fs
= ufsvfsp
->vfs_fs
;
6328 ncontig
= *blkp
>> (fs
->fs_bshift
- DEV_BSHIFT
);
6331 * scan dblk[] entries; contig fs space is found when:
6332 * ((current blkno + frags per block) == next blkno)
6335 while (n
< ncontig
&& dblk
< endblk
) {
6336 if ((*dblk
+ fs
->fs_frag
) == *nextblk
)
6345 * index is where size bytes of contig space begins;
6346 * conversion from index to the file's DEV_BSIZE lbn
6347 * is equivalent to: (index * fs_bsize) / DEV_BSIZE
6350 i
= (dblk
- dump_info
->dblk
) - ncontig
;
6351 *blkp
= i
<< (fs
->fs_bshift
- DEV_BSHIFT
);
6359 * Recursive helper function for ufs_dumpctl(). It follows the indirect file
6360 * system blocks until it reaches the the disk block addresses, which are
6361 * then stored into the given buffer, storeblk.
6364 save_dblks(struct inode
*ip
, struct ufsvfs
*ufsvfsp
, daddr32_t
*storeblk
,
6365 daddr32_t
*dblk
, int level
, int entries
)
6367 struct fs
*fs
= ufsvfsp
->vfs_fs
;
6372 for (i
= 0; i
< NINDIR(fs
); i
++) {
6373 if (storeblk
- dump_info
->dblk
>= entries
)
6375 *storeblk
++ = dblk
[i
];
6379 for (i
= 0; i
< NINDIR(fs
); i
++) {
6380 if (storeblk
- dump_info
->dblk
>= entries
)
6382 bp
= UFS_BREAD(ufsvfsp
,
6383 ip
->i_dev
, fsbtodb(fs
, dblk
[i
]), fs
->fs_bsize
);
6384 if (bp
->b_flags
& B_ERROR
) {
6388 storeblk
= save_dblks(ip
, ufsvfsp
, storeblk
, bp
->b_un
.b_daddr
,
6389 level
- 1, entries
);
6392 if (storeblk
== NULL
)
6400 ufs_getsecattr(struct vnode
*vp
, vsecattr_t
*vsap
, int flag
,
6401 struct cred
*cr
, caller_context_t
*ct
)
6403 struct inode
*ip
= VTOI(vp
);
6404 struct ulockfs
*ulp
;
6405 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
6406 ulong_t vsa_mask
= vsap
->vsa_mask
;
6409 vsa_mask
&= (VSA_ACL
| VSA_ACLCNT
| VSA_DFACL
| VSA_DFACLCNT
);
6412 * Only grab locks if needed - they're not needed to check vsa_mask
6413 * or if the mask contains no acl flags.
6415 if (vsa_mask
!= 0) {
6416 if (err
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
6417 ULOCKFS_GETATTR_MASK
))
6420 rw_enter(&ip
->i_contents
, RW_READER
);
6421 err
= ufs_acl_get(ip
, vsap
, flag
, cr
);
6422 rw_exit(&ip
->i_contents
);
6425 ufs_lockfs_end(ulp
);
6432 ufs_setsecattr(struct vnode
*vp
, vsecattr_t
*vsap
, int flag
, struct cred
*cr
,
6433 caller_context_t
*ct
)
6435 struct inode
*ip
= VTOI(vp
);
6436 struct ulockfs
*ulp
= NULL
;
6437 struct ufsvfs
*ufsvfsp
= VTOI(vp
)->i_ufsvfs
;
6438 ulong_t vsa_mask
= vsap
->vsa_mask
;
6445 ASSERT(RW_LOCK_HELD(&ip
->i_rwlock
));
6447 /* Abort now if the request is either empty or invalid. */
6448 vsa_mask
&= (VSA_ACL
| VSA_ACLCNT
| VSA_DFACL
| VSA_DFACLCNT
);
6449 if ((vsa_mask
== 0) ||
6450 ((vsap
->vsa_aclentp
== NULL
) &&
6451 (vsap
->vsa_dfaclentp
== NULL
))) {
6457 * Following convention, if this is a directory then we acquire the
6458 * inode's i_rwlock after starting a UFS logging transaction;
6459 * otherwise, we acquire it beforehand. Since we were called (and
6460 * must therefore return) with the lock held, we will have to drop it,
6461 * and later reacquire it, if operating on a directory.
6463 if (vp
->v_type
== VDIR
) {
6464 rw_exit(&ip
->i_rwlock
);
6467 /* Upgrade the lock if required. */
6468 if (!rw_write_held(&ip
->i_rwlock
)) {
6469 rw_exit(&ip
->i_rwlock
);
6470 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
6475 ASSERT(!(vp
->v_type
== VDIR
&& haverwlock
));
6476 if (err
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_SETATTR_MASK
)) {
6483 * Check that the file system supports this operation. Note that
6484 * ufs_lockfs_begin() will have checked that the file system had
6485 * not been forcibly unmounted.
6487 if (ufsvfsp
->vfs_fs
->fs_ronly
) {
6491 if (ufsvfsp
->vfs_nosetsec
) {
6497 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_SETSECATTR
,
6498 trans_size
= TOP_SETSECATTR_SIZE(VTOI(vp
)));
6502 if (vp
->v_type
== VDIR
) {
6503 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
6509 /* Do the actual work. */
6510 rw_enter(&ip
->i_contents
, RW_WRITER
);
6512 * Suppress out of inodes messages if we will retry.
6515 ip
->i_flag
|= IQUIET
;
6516 err
= ufs_acl_set(ip
, vsap
, flag
, cr
);
6517 ip
->i_flag
&= ~IQUIET
;
6518 rw_exit(&ip
->i_contents
);
6524 * top_end_async() can eventually call
6525 * top_end_sync(), which can block. We must
6526 * therefore observe the lock-ordering protocol
6529 if (vp
->v_type
== VDIR
) {
6530 rw_exit(&ip
->i_rwlock
);
6533 TRANS_END_ASYNC(ufsvfsp
, TOP_SETSECATTR
, trans_size
);
6535 ufs_lockfs_end(ulp
);
6538 * If no inodes available, try scaring a logically-
6539 * free one out of the delete queue to someplace
6540 * that we can find it.
6542 if ((err
== ENOSPC
) && retry
&& TRANS_ISTRANS(ufsvfsp
)) {
6543 ufs_delete_drain_wait(ufsvfsp
, 1);
6545 if (vp
->v_type
== VDIR
&& haverwlock
) {
6546 rw_exit(&ip
->i_rwlock
);
6552 * If we need to reacquire the lock then it is safe to do so
6553 * as a reader. This is because ufs_rwunlock(), which will be
6554 * called by our caller after we return, does not differentiate
6555 * between shared and exclusive locks.
6558 ASSERT(vp
->v_type
== VDIR
);
6559 rw_enter(&ip
->i_rwlock
, RW_READER
);
6566 * Locate the vnode to be used for an event notification. As this will
6567 * be called prior to the name space change perform basic verification
6568 * that the change will be allowed.
6572 ufs_eventlookup(struct vnode
*dvp
, char *nm
, struct cred
*cr
,
6580 struct ufsvfs
*ufsvfsp
;
6581 struct ulockfs
*ulp
;
6586 if ((namlen
= strlen(nm
)) == 0)
6592 else if ((namlen
== 2) && nm
[1] == '.') {
6598 * Check accessibility and write access of parent directory as we
6599 * only want to post the event if we're able to make a change.
6601 if (error
= ufs_diraccess(ip
, IEXEC
|IWRITE
, cr
))
6604 if (vp
= dnlc_lookup(dvp
, nm
)) {
6605 if (vp
== DNLC_NO_VNODE
) {
6615 * Keep the idle queue from getting too long by idling two
6616 * inodes before attempting to allocate another.
6617 * This operation must be performed before entering lockfs
6620 if (ufs_idle_q
.uq_ne
> ufs_idle_q
.uq_hiwat
)
6621 if ((curthread
->t_flag
& T_DONTBLOCK
) == 0) {
6622 ins
.in_lidles
.value
.ul
+= ufs_lookup_idle_count
;
6623 ufs_idle_some(ufs_lookup_idle_count
);
6626 ufsvfsp
= ip
->i_ufsvfs
;
6629 if (error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_LOOKUP_MASK
))
6632 if ((error
= ufs_dirlook(ip
, nm
, &xip
, cr
, 1, 1)) == 0) {
6638 ufs_lockfs_end(ulp
);
6641 if (error
== EAGAIN
)