4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 /* Portions Copyright 2007 Jeremy Teo */
26 /* Portions Copyright 2010 Robert Milkowski */
28 #include <sys/types.h>
29 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/sysmacros.h>
33 #include <sys/resource.h>
35 #include <sys/vfs_opreg.h>
36 #include <sys/vnode.h>
40 #include <sys/taskq.h>
42 #include <sys/vmsystm.h>
43 #include <sys/atomic.h>
45 #include <vm/seg_vn.h>
49 #include <vm/seg_kpm.h>
51 #include <sys/pathname.h>
52 #include <sys/cmn_err.h>
53 #include <sys/errno.h>
54 #include <sys/unistd.h>
55 #include <sys/zfs_dir.h>
56 #include <sys/zfs_acl.h>
57 #include <sys/zfs_ioctl.h>
58 #include <sys/fs/zfs.h>
60 #include <sys/dmu_objset.h>
66 #include <sys/dirent.h>
67 #include <sys/policy.h>
68 #include <sys/sunddi.h>
69 #include <sys/filio.h>
71 #include "fs/fs_subr.h"
72 #include <sys/zfs_ctldir.h>
73 #include <sys/zfs_fuid.h>
74 #include <sys/zfs_sa.h>
76 #include <sys/zfs_rlock.h>
77 #include <sys/extdirent.h>
78 #include <sys/kidmap.h>
85 * Each vnode op performs some logical unit of work. To do this, the ZPL must
86 * properly lock its in-core state, create a DMU transaction, do the work,
87 * record this work in the intent log (ZIL), commit the DMU transaction,
88 * and wait for the intent log to commit if it is a synchronous operation.
89 * Moreover, the vnode ops must work in both normal and log replay context.
90 * The ordering of events is important to avoid deadlocks and references
91 * to freed memory. The example below illustrates the following Big Rules:
93 * (1) A check must be made in each zfs thread for a mounted file system.
94 * This is done avoiding races using ZFS_ENTER(zfsvfs).
95 * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
96 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
97 * can return EIO from the calling function.
99 * (2) VN_RELE() should always be the last thing except for zil_commit()
100 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
101 * First, if it's the last reference, the vnode/znode
102 * can be freed, so the zp may point to freed memory. Second, the last
103 * reference will call zfs_zinactive(), which may induce a lot of work --
104 * pushing cached pages (which acquires range locks) and syncing out
105 * cached atime changes. Third, zfs_zinactive() may require a new tx,
106 * which could deadlock the system if you were already holding one.
107 * If you must call VN_RELE() within a tx then use VN_RELE_ASYNC().
109 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
110 * as they can span dmu_tx_assign() calls.
112 * (4) Always pass TXG_NOWAIT as the second argument to dmu_tx_assign().
113 * This is critical because we don't want to block while holding locks.
114 * Note, in particular, that if a lock is sometimes acquired before
115 * the tx assigns, and sometimes after (e.g. z_lock), then failing to
116 * use a non-blocking assign can deadlock the system. The scenario:
118 * Thread A has grabbed a lock before calling dmu_tx_assign().
119 * Thread B is in an already-assigned tx, and blocks for this lock.
120 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
121 * forever, because the previous txg can't quiesce until B's tx commits.
123 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
124 * then drop all locks, call dmu_tx_wait(), and try again.
126 * (5) If the operation succeeded, generate the intent log entry for it
127 * before dropping locks. This ensures that the ordering of events
128 * in the intent log matches the order in which they actually occurred.
129 * During ZIL replay the zfs_log_* functions will update the sequence
130 * number to indicate the zil transaction has replayed.
132 * (6) At the end of each vnode op, the DMU tx must always commit,
133 * regardless of whether there were any errors.
135 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
136 * to ensure that synchronous semantics are provided when necessary.
138 * In general, this is how things should be ordered in each vnode op:
140 * ZFS_ENTER(zfsvfs); // exit if unmounted
142 * zfs_dirent_lock(&dl, ...) // lock directory entry (may VN_HOLD())
143 * rw_enter(...); // grab any other locks you need
144 * tx = dmu_tx_create(...); // get DMU tx
145 * dmu_tx_hold_*(); // hold each object you might modify
146 * error = dmu_tx_assign(tx, TXG_NOWAIT); // try to assign
148 * rw_exit(...); // drop locks
149 * zfs_dirent_unlock(dl); // unlock directory entry
150 * VN_RELE(...); // release held vnodes
151 * if (error == ERESTART) {
156 * dmu_tx_abort(tx); // abort DMU tx
157 * ZFS_EXIT(zfsvfs); // finished in zfs
158 * return (error); // really out of space
160 * error = do_real_work(); // do whatever this VOP does
162 * zfs_log_*(...); // on success, make ZIL entry
163 * dmu_tx_commit(tx); // commit DMU tx -- error or not
164 * rw_exit(...); // drop locks
165 * zfs_dirent_unlock(dl); // unlock directory entry
166 * VN_RELE(...); // release held vnodes
167 * zil_commit(zilog, foid); // synchronous when necessary
168 * ZFS_EXIT(zfsvfs); // finished in zfs
169 * return (error); // done, report error
174 zfs_open(vnode_t
**vpp
, int flag
, cred_t
*cr
, caller_context_t
*ct
)
176 znode_t
*zp
= VTOZ(*vpp
);
177 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
182 if ((flag
& FWRITE
) && (zp
->z_pflags
& ZFS_APPENDONLY
) &&
183 ((flag
& FAPPEND
) == 0)) {
188 if (!zfs_has_ctldir(zp
) && zp
->z_zfsvfs
->z_vscan
&&
189 ZTOV(zp
)->v_type
== VREG
&&
190 !(zp
->z_pflags
& ZFS_AV_QUARANTINED
) && zp
->z_size
> 0) {
191 if (fs_vscan(*vpp
, cr
, 0) != 0) {
197 /* Keep a count of the synchronous opens in the znode */
198 if (flag
& (FSYNC
| FDSYNC
))
199 atomic_inc_32(&zp
->z_sync_cnt
);
207 zfs_close(vnode_t
*vp
, int flag
, int count
, offset_t offset
, cred_t
*cr
,
208 caller_context_t
*ct
)
210 znode_t
*zp
= VTOZ(vp
);
211 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
214 * Clean up any locks held by this process on the vp.
216 cleanlocks(vp
, ddi_get_pid(), 0);
217 cleanshares(vp
, ddi_get_pid());
222 /* Decrement the synchronous opens in the znode */
223 if ((flag
& (FSYNC
| FDSYNC
)) && (count
== 1))
224 atomic_dec_32(&zp
->z_sync_cnt
);
226 if (!zfs_has_ctldir(zp
) && zp
->z_zfsvfs
->z_vscan
&&
227 ZTOV(zp
)->v_type
== VREG
&&
228 !(zp
->z_pflags
& ZFS_AV_QUARANTINED
) && zp
->z_size
> 0)
229 VERIFY(fs_vscan(vp
, cr
, 1) == 0);
236 * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and
237 * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter.
240 zfs_holey(vnode_t
*vp
, int cmd
, offset_t
*off
)
242 znode_t
*zp
= VTOZ(vp
);
243 uint64_t noff
= (uint64_t)*off
; /* new offset */
248 file_sz
= zp
->z_size
;
249 if (noff
>= file_sz
) {
253 if (cmd
== _FIO_SEEK_HOLE
)
258 error
= dmu_offset_next(zp
->z_zfsvfs
->z_os
, zp
->z_id
, hole
, &noff
);
261 if ((error
== ESRCH
) || (noff
> file_sz
)) {
263 * Handle the virtual hole at the end of file.
280 zfs_ioctl(vnode_t
*vp
, int com
, intptr_t data
, int flag
, cred_t
*cred
,
281 int *rvalp
, caller_context_t
*ct
)
290 return (zfs_sync(vp
->v_vfsp
, 0, cred
));
293 * The following two ioctls are used by bfu. Faking out,
294 * necessary to avoid bfu errors.
302 if (ddi_copyin((void *)data
, &off
, sizeof (off
), flag
))
306 zfsvfs
= zp
->z_zfsvfs
;
310 /* offset parameter is in/out */
311 error
= zfs_holey(vp
, com
, &off
);
315 if (ddi_copyout(&off
, (void *)data
, sizeof (off
), flag
))
323 * Utility functions to map and unmap a single physical page. These
324 * are used to manage the mappable copies of ZFS file data, and therefore
325 * do not update ref/mod bits.
328 zfs_map_page(page_t
*pp
, enum seg_rw rw
)
331 return (hat_kpm_mapin(pp
, 0));
332 ASSERT(rw
== S_READ
|| rw
== S_WRITE
);
333 return (ppmapin(pp
, PROT_READ
| ((rw
== S_WRITE
) ? PROT_WRITE
: 0),
338 zfs_unmap_page(page_t
*pp
, caddr_t addr
)
341 hat_kpm_mapout(pp
, 0, addr
);
348 * When a file is memory mapped, we must keep the IO data synchronized
349 * between the DMU cache and the memory mapped pages. What this means:
351 * On Write: If we find a memory mapped page, we write to *both*
352 * the page and the dmu buffer.
355 update_pages(vnode_t
*vp
, int64_t start
, int len
, objset_t
*os
, uint64_t oid
)
359 off
= start
& PAGEOFFSET
;
360 for (start
&= PAGEMASK
; len
> 0; start
+= PAGESIZE
) {
362 uint64_t nbytes
= MIN(PAGESIZE
- off
, len
);
364 if (pp
= page_lookup(vp
, start
, SE_SHARED
)) {
367 va
= zfs_map_page(pp
, S_WRITE
);
368 (void) dmu_read(os
, oid
, start
+off
, nbytes
, va
+off
,
370 zfs_unmap_page(pp
, va
);
379 * When a file is memory mapped, we must keep the IO data synchronized
380 * between the DMU cache and the memory mapped pages. What this means:
382 * On Read: We "read" preferentially from memory mapped pages,
383 * else we default from the dmu buffer.
385 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
386 * the file is memory mapped.
389 mappedread(vnode_t
*vp
, int nbytes
, uio_t
*uio
)
391 znode_t
*zp
= VTOZ(vp
);
392 objset_t
*os
= zp
->z_zfsvfs
->z_os
;
397 start
= uio
->uio_loffset
;
398 off
= start
& PAGEOFFSET
;
399 for (start
&= PAGEMASK
; len
> 0; start
+= PAGESIZE
) {
401 uint64_t bytes
= MIN(PAGESIZE
- off
, len
);
403 if (pp
= page_lookup(vp
, start
, SE_SHARED
)) {
406 va
= zfs_map_page(pp
, S_READ
);
407 error
= uiomove(va
+ off
, bytes
, UIO_READ
, uio
);
408 zfs_unmap_page(pp
, va
);
411 error
= dmu_read_uio(os
, zp
->z_id
, uio
, bytes
);
421 offset_t zfs_read_chunk_size
= 1024 * 1024; /* Tunable */
424 * Read bytes from specified file into supplied buffer.
426 * IN: vp - vnode of file to be read from.
427 * uio - structure supplying read location, range info,
429 * ioflag - SYNC flags; used to provide FRSYNC semantics.
430 * cr - credentials of caller.
431 * ct - caller context
433 * OUT: uio - updated offset and range, buffer filled.
435 * RETURN: 0 if success
436 * error code if failure
439 * vp - atime updated if byte count > 0
443 zfs_read(vnode_t
*vp
, uio_t
*uio
, int ioflag
, cred_t
*cr
, caller_context_t
*ct
)
445 znode_t
*zp
= VTOZ(vp
);
446 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
457 if (zp
->z_pflags
& ZFS_AV_QUARANTINED
) {
463 * Validate file offset
465 if (uio
->uio_loffset
< (offset_t
)0) {
471 * Fasttrack empty reads
473 if (uio
->uio_resid
== 0) {
479 * Check for mandatory locks
481 if (MANDMODE(zp
->z_mode
)) {
482 if (error
= chklock(vp
, FREAD
,
483 uio
->uio_loffset
, uio
->uio_resid
, uio
->uio_fmode
, ct
)) {
490 * If we're in FRSYNC mode, sync out this znode before reading it.
492 if (ioflag
& FRSYNC
|| zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
493 zil_commit(zfsvfs
->z_log
, zp
->z_id
);
496 * Lock the range against changes.
498 rl
= zfs_range_lock(zp
, uio
->uio_loffset
, uio
->uio_resid
, RL_READER
);
501 * If we are reading past end-of-file we can skip
502 * to the end; but we might still need to set atime.
504 if (uio
->uio_loffset
>= zp
->z_size
) {
509 ASSERT(uio
->uio_loffset
< zp
->z_size
);
510 n
= MIN(uio
->uio_resid
, zp
->z_size
- uio
->uio_loffset
);
512 if ((uio
->uio_extflg
== UIO_XUIO
) &&
513 (((xuio_t
*)uio
)->xu_type
== UIOTYPE_ZEROCOPY
)) {
515 int blksz
= zp
->z_blksz
;
516 uint64_t offset
= uio
->uio_loffset
;
518 xuio
= (xuio_t
*)uio
;
520 nblk
= (P2ROUNDUP(offset
+ n
, blksz
) - P2ALIGN(offset
,
523 ASSERT(offset
+ n
<= blksz
);
526 (void) dmu_xuio_init(xuio
, nblk
);
528 if (vn_has_cached_data(vp
)) {
530 * For simplicity, we always allocate a full buffer
531 * even if we only expect to read a portion of a block.
533 while (--nblk
>= 0) {
534 (void) dmu_xuio_add(xuio
,
535 dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
542 nbytes
= MIN(n
, zfs_read_chunk_size
-
543 P2PHASE(uio
->uio_loffset
, zfs_read_chunk_size
));
545 if (vn_has_cached_data(vp
))
546 error
= mappedread(vp
, nbytes
, uio
);
548 error
= dmu_read_uio(os
, zp
->z_id
, uio
, nbytes
);
550 /* convert checksum errors into IO errors */
559 zfs_range_unlock(rl
);
561 ZFS_ACCESSTIME_STAMP(zfsvfs
, zp
);
567 * Write the bytes to a file.
569 * IN: vp - vnode of file to be written to.
570 * uio - structure supplying write location, range info,
572 * ioflag - FAPPEND flag set if in append mode.
573 * cr - credentials of caller.
574 * ct - caller context (NFS/CIFS fem monitor only)
576 * OUT: uio - updated offset and range.
578 * RETURN: 0 if success
579 * error code if failure
582 * vp - ctime|mtime updated if byte count > 0
587 zfs_write(vnode_t
*vp
, uio_t
*uio
, int ioflag
, cred_t
*cr
, caller_context_t
*ct
)
589 znode_t
*zp
= VTOZ(vp
);
590 rlim64_t limit
= uio
->uio_llimit
;
591 ssize_t start_resid
= uio
->uio_resid
;
595 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
600 int max_blksz
= zfsvfs
->z_max_blksz
;
606 int iovcnt
= uio
->uio_iovcnt
;
607 iovec_t
*iovp
= uio
->uio_iov
;
610 sa_bulk_attr_t bulk
[4];
611 uint64_t mtime
[2], ctime
[2];
614 * Fasttrack empty write
620 if (limit
== RLIM64_INFINITY
|| limit
> MAXOFFSET_T
)
626 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
, &mtime
, 16);
627 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
, &ctime
, 16);
628 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_SIZE(zfsvfs
), NULL
,
630 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zfsvfs
), NULL
,
634 * If immutable or not appending then return EPERM
636 if ((zp
->z_pflags
& (ZFS_IMMUTABLE
| ZFS_READONLY
)) ||
637 ((zp
->z_pflags
& ZFS_APPENDONLY
) && !(ioflag
& FAPPEND
) &&
638 (uio
->uio_loffset
< zp
->z_size
))) {
643 zilog
= zfsvfs
->z_log
;
646 * Validate file offset
648 woff
= ioflag
& FAPPEND
? zp
->z_size
: uio
->uio_loffset
;
655 * Check for mandatory locks before calling zfs_range_lock()
656 * in order to prevent a deadlock with locks set via fcntl().
658 if (MANDMODE((mode_t
)zp
->z_mode
) &&
659 (error
= chklock(vp
, FWRITE
, woff
, n
, uio
->uio_fmode
, ct
)) != 0) {
665 * Pre-fault the pages to ensure slow (eg NFS) pages
667 * Skip this if uio contains loaned arc_buf.
669 if ((uio
->uio_extflg
== UIO_XUIO
) &&
670 (((xuio_t
*)uio
)->xu_type
== UIOTYPE_ZEROCOPY
))
671 xuio
= (xuio_t
*)uio
;
673 uio_prefaultpages(MIN(n
, max_blksz
), uio
);
676 * If in append mode, set the io offset pointer to eof.
678 if (ioflag
& FAPPEND
) {
680 * Obtain an appending range lock to guarantee file append
681 * semantics. We reset the write offset once we have the lock.
683 rl
= zfs_range_lock(zp
, 0, n
, RL_APPEND
);
685 if (rl
->r_len
== UINT64_MAX
) {
687 * We overlocked the file because this write will cause
688 * the file block size to increase.
689 * Note that zp_size cannot change with this lock held.
693 uio
->uio_loffset
= woff
;
696 * Note that if the file block size will change as a result of
697 * this write, then this range lock will lock the entire file
698 * so that we can re-write the block safely.
700 rl
= zfs_range_lock(zp
, woff
, n
, RL_WRITER
);
704 zfs_range_unlock(rl
);
709 if ((woff
+ n
) > limit
|| woff
> (limit
- n
))
712 /* Will this write extend the file length? */
713 write_eof
= (woff
+ n
> zp
->z_size
);
715 end_size
= MAX(zp
->z_size
, woff
+ n
);
718 * Write the file in reasonable size chunks. Each chunk is written
719 * in a separate transaction; this keeps the intent log records small
720 * and allows us to do more fine-grained space accounting.
724 woff
= uio
->uio_loffset
;
726 if (zfs_owner_overquota(zfsvfs
, zp
, B_FALSE
) ||
727 zfs_owner_overquota(zfsvfs
, zp
, B_TRUE
)) {
729 dmu_return_arcbuf(abuf
);
734 if (xuio
&& abuf
== NULL
) {
735 ASSERT(i_iov
< iovcnt
);
737 abuf
= dmu_xuio_arcbuf(xuio
, i_iov
);
738 dmu_xuio_clear(xuio
, i_iov
);
739 DTRACE_PROBE3(zfs_cp_write
, int, i_iov
,
740 iovec_t
*, aiov
, arc_buf_t
*, abuf
);
741 ASSERT((aiov
->iov_base
== abuf
->b_data
) ||
742 ((char *)aiov
->iov_base
- (char *)abuf
->b_data
+
743 aiov
->iov_len
== arc_buf_size(abuf
)));
745 } else if (abuf
== NULL
&& n
>= max_blksz
&&
746 woff
>= zp
->z_size
&&
747 P2PHASE(woff
, max_blksz
) == 0 &&
748 zp
->z_blksz
== max_blksz
) {
750 * This write covers a full block. "Borrow" a buffer
751 * from the dmu so that we can fill it before we enter
752 * a transaction. This avoids the possibility of
753 * holding up the transaction if the data copy hangs
754 * up on a pagefault (e.g., from an NFS server mapping).
758 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
760 ASSERT(abuf
!= NULL
);
761 ASSERT(arc_buf_size(abuf
) == max_blksz
);
762 if (error
= uiocopy(abuf
->b_data
, max_blksz
,
763 UIO_WRITE
, uio
, &cbytes
)) {
764 dmu_return_arcbuf(abuf
);
767 ASSERT(cbytes
== max_blksz
);
771 * Start a transaction.
773 tx
= dmu_tx_create(zfsvfs
->z_os
);
774 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
775 dmu_tx_hold_write(tx
, zp
->z_id
, woff
, MIN(n
, max_blksz
));
776 zfs_sa_upgrade_txholds(tx
, zp
);
777 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
779 if (error
== ERESTART
) {
786 dmu_return_arcbuf(abuf
);
791 * If zfs_range_lock() over-locked we grow the blocksize
792 * and then reduce the lock range. This will only happen
793 * on the first iteration since zfs_range_reduce() will
794 * shrink down r_len to the appropriate size.
796 if (rl
->r_len
== UINT64_MAX
) {
799 if (zp
->z_blksz
> max_blksz
) {
800 ASSERT(!ISP2(zp
->z_blksz
));
801 new_blksz
= MIN(end_size
, SPA_MAXBLOCKSIZE
);
803 new_blksz
= MIN(end_size
, max_blksz
);
805 zfs_grow_blocksize(zp
, new_blksz
, tx
);
806 zfs_range_reduce(rl
, woff
, n
);
810 * XXX - should we really limit each write to z_max_blksz?
811 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
813 nbytes
= MIN(n
, max_blksz
- P2PHASE(woff
, max_blksz
));
816 tx_bytes
= uio
->uio_resid
;
817 error
= dmu_write_uio_dbuf(sa_get_db(zp
->z_sa_hdl
),
819 tx_bytes
-= uio
->uio_resid
;
822 ASSERT(xuio
== NULL
|| tx_bytes
== aiov
->iov_len
);
824 * If this is not a full block write, but we are
825 * extending the file past EOF and this data starts
826 * block-aligned, use assign_arcbuf(). Otherwise,
827 * write via dmu_write().
829 if (tx_bytes
< max_blksz
&& (!write_eof
||
830 aiov
->iov_base
!= abuf
->b_data
)) {
832 dmu_write(zfsvfs
->z_os
, zp
->z_id
, woff
,
833 aiov
->iov_len
, aiov
->iov_base
, tx
);
834 dmu_return_arcbuf(abuf
);
835 xuio_stat_wbuf_copied();
837 ASSERT(xuio
|| tx_bytes
== max_blksz
);
838 dmu_assign_arcbuf(sa_get_db(zp
->z_sa_hdl
),
841 ASSERT(tx_bytes
<= uio
->uio_resid
);
842 uioskip(uio
, tx_bytes
);
844 if (tx_bytes
&& vn_has_cached_data(vp
)) {
845 update_pages(vp
, woff
,
846 tx_bytes
, zfsvfs
->z_os
, zp
->z_id
);
850 * If we made no progress, we're done. If we made even
851 * partial progress, update the znode and ZIL accordingly.
854 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_SIZE(zfsvfs
),
855 (void *)&zp
->z_size
, sizeof (uint64_t), tx
);
862 * Clear Set-UID/Set-GID bits on successful write if not
863 * privileged and at least one of the excute bits is set.
865 * It would be nice to to this after all writes have
866 * been done, but that would still expose the ISUID/ISGID
867 * to another app after the partial write is committed.
869 * Note: we don't call zfs_fuid_map_id() here because
870 * user 0 is not an ephemeral uid.
872 mutex_enter(&zp
->z_acl_lock
);
873 if ((zp
->z_mode
& (S_IXUSR
| (S_IXUSR
>> 3) |
874 (S_IXUSR
>> 6))) != 0 &&
875 (zp
->z_mode
& (S_ISUID
| S_ISGID
)) != 0 &&
876 secpolicy_vnode_setid_retain(cr
,
877 (zp
->z_mode
& S_ISUID
) != 0 && zp
->z_uid
== 0) != 0) {
879 zp
->z_mode
&= ~(S_ISUID
| S_ISGID
);
880 newmode
= zp
->z_mode
;
881 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_MODE(zfsvfs
),
882 (void *)&newmode
, sizeof (uint64_t), tx
);
884 mutex_exit(&zp
->z_acl_lock
);
886 zfs_tstamp_update_setup(zp
, CONTENT_MODIFIED
, mtime
, ctime
,
890 * Update the file size (zp_size) if it has changed;
891 * account for possible concurrent updates.
893 while ((end_size
= zp
->z_size
) < uio
->uio_loffset
) {
894 (void) atomic_cas_64(&zp
->z_size
, end_size
,
899 * If we are replaying and eof is non zero then force
900 * the file size to the specified eof. Note, there's no
901 * concurrency during replay.
903 if (zfsvfs
->z_replay
&& zfsvfs
->z_replay_eof
!= 0)
904 zp
->z_size
= zfsvfs
->z_replay_eof
;
906 error
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, count
, tx
);
908 zfs_log_write(zilog
, tx
, TX_WRITE
, zp
, woff
, tx_bytes
, ioflag
);
913 ASSERT(tx_bytes
== nbytes
);
917 uio_prefaultpages(MIN(n
, max_blksz
), uio
);
920 zfs_range_unlock(rl
);
923 * If we're in replay mode, or we made no progress, return error.
924 * Otherwise, it's at least a partial write, so it's successful.
926 if (zfsvfs
->z_replay
|| uio
->uio_resid
== start_resid
) {
931 if (ioflag
& (FSYNC
| FDSYNC
) ||
932 zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
933 zil_commit(zilog
, zp
->z_id
);
940 zfs_get_done(zgd_t
*zgd
, int error
)
942 znode_t
*zp
= zgd
->zgd_private
;
943 objset_t
*os
= zp
->z_zfsvfs
->z_os
;
946 dmu_buf_rele(zgd
->zgd_db
, zgd
);
948 zfs_range_unlock(zgd
->zgd_rl
);
951 * Release the vnode asynchronously as we currently have the
952 * txg stopped from syncing.
954 VN_RELE_ASYNC(ZTOV(zp
), dsl_pool_vnrele_taskq(dmu_objset_pool(os
)));
956 if (error
== 0 && zgd
->zgd_bp
)
957 zil_add_block(zgd
->zgd_zilog
, zgd
->zgd_bp
);
959 kmem_free(zgd
, sizeof (zgd_t
));
963 static int zil_fault_io
= 0;
967 * Get data to generate a TX_WRITE intent log record.
970 zfs_get_data(void *arg
, lr_write_t
*lr
, char *buf
, zio_t
*zio
)
972 zfsvfs_t
*zfsvfs
= arg
;
973 objset_t
*os
= zfsvfs
->z_os
;
975 uint64_t object
= lr
->lr_foid
;
976 uint64_t offset
= lr
->lr_offset
;
977 uint64_t size
= lr
->lr_length
;
978 blkptr_t
*bp
= &lr
->lr_blkptr
;
987 * Nothing to do if the file has been removed
989 if (zfs_zget(zfsvfs
, object
, &zp
) != 0)
991 if (zp
->z_unlinked
) {
993 * Release the vnode asynchronously as we currently have the
994 * txg stopped from syncing.
996 VN_RELE_ASYNC(ZTOV(zp
),
997 dsl_pool_vnrele_taskq(dmu_objset_pool(os
)));
1001 zgd
= (zgd_t
*)kmem_zalloc(sizeof (zgd_t
), KM_SLEEP
);
1002 zgd
->zgd_zilog
= zfsvfs
->z_log
;
1003 zgd
->zgd_private
= zp
;
1006 * Write records come in two flavors: immediate and indirect.
1007 * For small writes it's cheaper to store the data with the
1008 * log record (immediate); for large writes it's cheaper to
1009 * sync the data and get a pointer to it (indirect) so that
1010 * we don't have to write the data twice.
1012 if (buf
!= NULL
) { /* immediate write */
1013 zgd
->zgd_rl
= zfs_range_lock(zp
, offset
, size
, RL_READER
);
1014 /* test for truncation needs to be done while range locked */
1015 if (offset
>= zp
->z_size
) {
1018 error
= dmu_read(os
, object
, offset
, size
, buf
,
1019 DMU_READ_NO_PREFETCH
);
1021 ASSERT(error
== 0 || error
== ENOENT
);
1022 } else { /* indirect write */
1024 * Have to lock the whole block to ensure when it's
1025 * written out and it's checksum is being calculated
1026 * that no one can change the data. We need to re-check
1027 * blocksize after we get the lock in case it's changed!
1032 blkoff
= ISP2(size
) ? P2PHASE(offset
, size
) : offset
;
1034 zgd
->zgd_rl
= zfs_range_lock(zp
, offset
, size
,
1036 if (zp
->z_blksz
== size
)
1039 zfs_range_unlock(zgd
->zgd_rl
);
1041 /* test for truncation needs to be done while range locked */
1042 if (lr
->lr_offset
>= zp
->z_size
)
1051 error
= dmu_buf_hold(os
, object
, offset
, zgd
, &db
,
1052 DMU_READ_NO_PREFETCH
);
1058 ASSERT(db
->db_offset
== offset
);
1059 ASSERT(db
->db_size
== size
);
1061 error
= dmu_sync(zio
, lr
->lr_common
.lrc_txg
,
1063 ASSERT(error
|| lr
->lr_length
<= zp
->z_blksz
);
1066 * On success, we need to wait for the write I/O
1067 * initiated by dmu_sync() to complete before we can
1068 * release this dbuf. We will finish everything up
1069 * in the zfs_get_done() callback.
1074 if (error
== EALREADY
) {
1075 lr
->lr_common
.lrc_txtype
= TX_WRITE2
;
1081 zfs_get_done(zgd
, error
);
1088 zfs_access(vnode_t
*vp
, int mode
, int flag
, cred_t
*cr
,
1089 caller_context_t
*ct
)
1091 znode_t
*zp
= VTOZ(vp
);
1092 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
1098 if (flag
& V_ACE_MASK
)
1099 error
= zfs_zaccess(zp
, mode
, flag
, B_FALSE
, cr
);
1101 error
= zfs_zaccess_rwx(zp
, mode
, flag
, cr
);
1108 * If vnode is for a device return a specfs vnode instead.
1111 specvp_check(vnode_t
**vpp
, cred_t
*cr
)
1115 if (IS_DEVVP(*vpp
)) {
1118 svp
= specvp(*vpp
, (*vpp
)->v_rdev
, (*vpp
)->v_type
, cr
);
1129 * Lookup an entry in a directory, or an extended attribute directory.
1130 * If it exists, return a held vnode reference for it.
1132 * IN: dvp - vnode of directory to search.
1133 * nm - name of entry to lookup.
1134 * pnp - full pathname to lookup [UNUSED].
1135 * flags - LOOKUP_XATTR set if looking for an attribute.
1136 * rdir - root directory vnode [UNUSED].
1137 * cr - credentials of caller.
1138 * ct - caller context
1139 * direntflags - directory lookup flags
1140 * realpnp - returned pathname.
1142 * OUT: vpp - vnode of located entry, NULL if not found.
1144 * RETURN: 0 if success
1145 * error code if failure
1152 zfs_lookup(vnode_t
*dvp
, char *nm
, vnode_t
**vpp
, struct pathname
*pnp
,
1153 int flags
, vnode_t
*rdir
, cred_t
*cr
, caller_context_t
*ct
,
1154 int *direntflags
, pathname_t
*realpnp
)
1156 znode_t
*zdp
= VTOZ(dvp
);
1157 zfsvfs_t
*zfsvfs
= zdp
->z_zfsvfs
;
1161 if (!(flags
& (LOOKUP_XATTR
| FIGNORECASE
))) {
1163 if (dvp
->v_type
!= VDIR
) {
1165 } else if (zdp
->z_sa_hdl
== NULL
) {
1169 if (nm
[0] == 0 || (nm
[0] == '.' && nm
[1] == '\0')) {
1170 error
= zfs_fastaccesschk_execute(zdp
, cr
);
1178 vnode_t
*tvp
= dnlc_lookup(dvp
, nm
);
1181 error
= zfs_fastaccesschk_execute(zdp
, cr
);
1186 if (tvp
== DNLC_NO_VNODE
) {
1191 return (specvp_check(vpp
, cr
));
1197 DTRACE_PROBE2(zfs__fastpath__lookup__miss
, vnode_t
*, dvp
, char *, nm
);
1204 if (flags
& LOOKUP_XATTR
) {
1206 * If the xattr property is off, refuse the lookup request.
1208 if (!(zfsvfs
->z_vfs
->vfs_flag
& VFS_XATTR
)) {
1214 * We don't allow recursive attributes..
1215 * Maybe someday we will.
1217 if (zdp
->z_pflags
& ZFS_XATTR
) {
1222 if (error
= zfs_get_xattrdir(VTOZ(dvp
), vpp
, cr
, flags
)) {
1228 * Do we have permission to get into attribute directory?
1231 if (error
= zfs_zaccess(VTOZ(*vpp
), ACE_EXECUTE
, 0,
1241 if (dvp
->v_type
!= VDIR
) {
1247 * Check accessibility of directory.
1250 if (error
= zfs_zaccess(zdp
, ACE_EXECUTE
, 0, B_FALSE
, cr
)) {
1255 if (zfsvfs
->z_utf8
&& u8_validate(nm
, strlen(nm
),
1256 NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
1261 error
= zfs_dirlook(zdp
, nm
, vpp
, flags
, direntflags
, realpnp
);
1263 error
= specvp_check(vpp
, cr
);
1270 * Attempt to create a new entry in a directory. If the entry
1271 * already exists, truncate the file if permissible, else return
1272 * an error. Return the vp of the created or trunc'd file.
1274 * IN: dvp - vnode of directory to put new file entry in.
1275 * name - name of new file entry.
1276 * vap - attributes of new file.
1277 * excl - flag indicating exclusive or non-exclusive mode.
1278 * mode - mode to open file with.
1279 * cr - credentials of caller.
1280 * flag - large file flag [UNUSED].
1281 * ct - caller context
1282 * vsecp - ACL to be set
1284 * OUT: vpp - vnode of created or trunc'd entry.
1286 * RETURN: 0 if success
1287 * error code if failure
1290 * dvp - ctime|mtime updated if new entry created
1291 * vp - ctime|mtime always, atime if new
1296 zfs_create(vnode_t
*dvp
, char *name
, vattr_t
*vap
, vcexcl_t excl
,
1297 int mode
, vnode_t
**vpp
, cred_t
*cr
, int flag
, caller_context_t
*ct
,
1300 znode_t
*zp
, *dzp
= VTOZ(dvp
);
1301 zfsvfs_t
*zfsvfs
= dzp
->z_zfsvfs
;
1309 gid_t gid
= crgetgid(cr
);
1310 zfs_acl_ids_t acl_ids
;
1311 boolean_t fuid_dirtied
;
1312 boolean_t have_acl
= B_FALSE
;
1315 * If we have an ephemeral id, ACL, or XVATTR then
1316 * make sure file system is at proper version
1319 ksid
= crgetsid(cr
, KSID_OWNER
);
1321 uid
= ksid_getid(ksid
);
1325 if (zfsvfs
->z_use_fuids
== B_FALSE
&&
1326 (vsecp
|| (vap
->va_mask
& AT_XVATTR
) ||
1327 IS_EPHEMERAL(uid
) || IS_EPHEMERAL(gid
)))
1333 zilog
= zfsvfs
->z_log
;
1335 if (zfsvfs
->z_utf8
&& u8_validate(name
, strlen(name
),
1336 NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
1341 if (vap
->va_mask
& AT_XVATTR
) {
1342 if ((error
= secpolicy_xvattr((xvattr_t
*)vap
,
1343 crgetuid(cr
), cr
, vap
->va_type
)) != 0) {
1351 if ((vap
->va_mode
& VSVTX
) && secpolicy_vnode_stky_modify(cr
))
1352 vap
->va_mode
&= ~VSVTX
;
1354 if (*name
== '\0') {
1356 * Null component name refers to the directory itself.
1363 /* possible VN_HOLD(zp) */
1366 if (flag
& FIGNORECASE
)
1369 error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
,
1373 zfs_acl_ids_free(&acl_ids
);
1374 if (strcmp(name
, "..") == 0)
1385 * Create a new file object and update the directory
1388 if (error
= zfs_zaccess(dzp
, ACE_ADD_FILE
, 0, B_FALSE
, cr
)) {
1390 zfs_acl_ids_free(&acl_ids
);
1395 * We only support the creation of regular files in
1396 * extended attribute directories.
1399 if ((dzp
->z_pflags
& ZFS_XATTR
) &&
1400 (vap
->va_type
!= VREG
)) {
1402 zfs_acl_ids_free(&acl_ids
);
1407 if (!have_acl
&& (error
= zfs_acl_ids_create(dzp
, 0, vap
,
1408 cr
, vsecp
, &acl_ids
)) != 0)
1412 if (zfs_acl_ids_overquota(zfsvfs
, &acl_ids
)) {
1413 zfs_acl_ids_free(&acl_ids
);
1418 tx
= dmu_tx_create(os
);
1420 dmu_tx_hold_sa_create(tx
, acl_ids
.z_aclp
->z_acl_bytes
+
1421 ZFS_SA_BASE_ATTR_SIZE
);
1423 fuid_dirtied
= zfsvfs
->z_fuid_dirty
;
1425 zfs_fuid_txhold(zfsvfs
, tx
);
1426 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, name
);
1427 dmu_tx_hold_sa(tx
, dzp
->z_sa_hdl
, B_FALSE
);
1428 if (!zfsvfs
->z_use_sa
&&
1429 acl_ids
.z_aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
1430 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
,
1431 0, acl_ids
.z_aclp
->z_acl_bytes
);
1433 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
1435 zfs_dirent_unlock(dl
);
1436 if (error
== ERESTART
) {
1441 zfs_acl_ids_free(&acl_ids
);
1446 zfs_mknode(dzp
, vap
, tx
, cr
, 0, &zp
, &acl_ids
);
1449 zfs_fuid_sync(zfsvfs
, tx
);
1451 (void) zfs_link_create(dl
, zp
, tx
, ZNEW
);
1452 txtype
= zfs_log_create_txtype(Z_FILE
, vsecp
, vap
);
1453 if (flag
& FIGNORECASE
)
1455 zfs_log_create(zilog
, tx
, txtype
, dzp
, zp
, name
,
1456 vsecp
, acl_ids
.z_fuidp
, vap
);
1457 zfs_acl_ids_free(&acl_ids
);
1460 int aflags
= (flag
& FAPPEND
) ? V_APPEND
: 0;
1463 zfs_acl_ids_free(&acl_ids
);
1467 * A directory entry already exists for this name.
1470 * Can't truncate an existing file if in exclusive mode.
1477 * Can't open a directory for writing.
1479 if ((ZTOV(zp
)->v_type
== VDIR
) && (mode
& S_IWRITE
)) {
1484 * Verify requested access to file.
1486 if (mode
&& (error
= zfs_zaccess_rwx(zp
, mode
, aflags
, cr
))) {
1490 mutex_enter(&dzp
->z_lock
);
1492 mutex_exit(&dzp
->z_lock
);
1495 * Truncate regular files if requested.
1497 if ((ZTOV(zp
)->v_type
== VREG
) &&
1498 (vap
->va_mask
& AT_SIZE
) && (vap
->va_size
== 0)) {
1499 /* we can't hold any locks when calling zfs_freesp() */
1500 zfs_dirent_unlock(dl
);
1502 error
= zfs_freesp(zp
, 0, 0, mode
, TRUE
);
1504 vnevent_create(ZTOV(zp
), ct
);
1511 zfs_dirent_unlock(dl
);
1518 error
= specvp_check(vpp
, cr
);
1521 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
1522 zil_commit(zilog
, 0);
1529 * Remove an entry from a directory.
1531 * IN: dvp - vnode of directory to remove entry from.
1532 * name - name of entry to remove.
1533 * cr - credentials of caller.
1534 * ct - caller context
1535 * flags - case flags
1537 * RETURN: 0 if success
1538 * error code if failure
1542 * vp - ctime (if nlink > 0)
1545 uint64_t null_xattr
= 0;
1549 zfs_remove(vnode_t
*dvp
, char *name
, cred_t
*cr
, caller_context_t
*ct
,
1552 znode_t
*zp
, *dzp
= VTOZ(dvp
);
1555 zfsvfs_t
*zfsvfs
= dzp
->z_zfsvfs
;
1557 uint64_t acl_obj
, xattr_obj
;
1558 uint64_t xattr_obj_unlinked
= 0;
1562 boolean_t may_delete_now
, delete_now
= FALSE
;
1563 boolean_t unlinked
, toobig
= FALSE
;
1565 pathname_t
*realnmp
= NULL
;
1572 zilog
= zfsvfs
->z_log
;
1574 if (flags
& FIGNORECASE
) {
1584 * Attempt to lock directory; fail if entry doesn't exist.
1586 if (error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
,
1596 if (error
= zfs_zaccess_delete(dzp
, zp
, cr
)) {
1601 * Need to use rmdir for removing directories.
1603 if (vp
->v_type
== VDIR
) {
1608 vnevent_remove(vp
, dvp
, name
, ct
);
1611 dnlc_remove(dvp
, realnmp
->pn_buf
);
1613 dnlc_remove(dvp
, name
);
1615 mutex_enter(&vp
->v_lock
);
1616 may_delete_now
= vp
->v_count
== 1 && !vn_has_cached_data(vp
);
1617 mutex_exit(&vp
->v_lock
);
1620 * We may delete the znode now, or we may put it in the unlinked set;
1621 * it depends on whether we're the last link, and on whether there are
1622 * other holds on the vnode. So we dmu_tx_hold() the right things to
1623 * allow for either case.
1626 tx
= dmu_tx_create(zfsvfs
->z_os
);
1627 dmu_tx_hold_zap(tx
, dzp
->z_id
, FALSE
, name
);
1628 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
1629 zfs_sa_upgrade_txholds(tx
, zp
);
1630 zfs_sa_upgrade_txholds(tx
, dzp
);
1631 if (may_delete_now
) {
1633 zp
->z_size
> zp
->z_blksz
* DMU_MAX_DELETEBLKCNT
;
1634 /* if the file is too big, only hold_free a token amount */
1635 dmu_tx_hold_free(tx
, zp
->z_id
, 0,
1636 (toobig
? DMU_MAX_ACCESS
: DMU_OBJECT_END
));
1639 /* are there any extended attributes? */
1640 error
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_XATTR(zfsvfs
),
1641 &xattr_obj
, sizeof (xattr_obj
));
1642 if (error
== 0 && xattr_obj
) {
1643 error
= zfs_zget(zfsvfs
, xattr_obj
, &xzp
);
1644 ASSERT3U(error
, ==, 0);
1645 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_TRUE
);
1646 dmu_tx_hold_sa(tx
, xzp
->z_sa_hdl
, B_FALSE
);
1649 mutex_enter(&zp
->z_lock
);
1650 if ((acl_obj
= zfs_external_acl(zp
)) != 0 && may_delete_now
)
1651 dmu_tx_hold_free(tx
, acl_obj
, 0, DMU_OBJECT_END
);
1652 mutex_exit(&zp
->z_lock
);
1654 /* charge as an update -- would be nice not to charge at all */
1655 dmu_tx_hold_zap(tx
, zfsvfs
->z_unlinkedobj
, FALSE
, NULL
);
1657 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
1659 zfs_dirent_unlock(dl
);
1663 if (error
== ERESTART
) {
1676 * Remove the directory entry.
1678 error
= zfs_link_destroy(dl
, zp
, tx
, zflg
, &unlinked
);
1688 * Hold z_lock so that we can make sure that the ACL obj
1689 * hasn't changed. Could have been deleted due to
1692 mutex_enter(&zp
->z_lock
);
1693 mutex_enter(&vp
->v_lock
);
1694 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_XATTR(zfsvfs
),
1695 &xattr_obj_unlinked
, sizeof (xattr_obj_unlinked
));
1696 delete_now
= may_delete_now
&& !toobig
&&
1697 vp
->v_count
== 1 && !vn_has_cached_data(vp
) &&
1698 xattr_obj
== xattr_obj_unlinked
&& zfs_external_acl(zp
) ==
1700 mutex_exit(&vp
->v_lock
);
1704 if (xattr_obj_unlinked
) {
1705 ASSERT3U(xzp
->z_links
, ==, 2);
1706 mutex_enter(&xzp
->z_lock
);
1707 xzp
->z_unlinked
= 1;
1709 error
= sa_update(xzp
->z_sa_hdl
, SA_ZPL_LINKS(zfsvfs
),
1710 &xzp
->z_links
, sizeof (xzp
->z_links
), tx
);
1711 ASSERT3U(error
, ==, 0);
1712 mutex_exit(&xzp
->z_lock
);
1713 zfs_unlinked_add(xzp
, tx
);
1716 error
= sa_remove(zp
->z_sa_hdl
,
1717 SA_ZPL_XATTR(zfsvfs
), tx
);
1719 error
= sa_update(zp
->z_sa_hdl
,
1720 SA_ZPL_XATTR(zfsvfs
), &null_xattr
,
1721 sizeof (uint64_t), tx
);
1722 ASSERT3U(error
, ==, 0);
1724 mutex_enter(&vp
->v_lock
);
1726 ASSERT3U(vp
->v_count
, ==, 0);
1727 mutex_exit(&vp
->v_lock
);
1728 mutex_exit(&zp
->z_lock
);
1729 zfs_znode_delete(zp
, tx
);
1730 } else if (unlinked
) {
1731 mutex_exit(&zp
->z_lock
);
1732 zfs_unlinked_add(zp
, tx
);
1736 if (flags
& FIGNORECASE
)
1738 zfs_log_remove(zilog
, tx
, txtype
, dzp
, name
, obj
);
1745 zfs_dirent_unlock(dl
);
1752 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
1753 zil_commit(zilog
, 0);
1760 * Create a new directory and insert it into dvp using the name
1761 * provided. Return a pointer to the inserted directory.
1763 * IN: dvp - vnode of directory to add subdir to.
1764 * dirname - name of new directory.
1765 * vap - attributes of new directory.
1766 * cr - credentials of caller.
1767 * ct - caller context
1768 * vsecp - ACL to be set
1770 * OUT: vpp - vnode of created directory.
1772 * RETURN: 0 if success
1773 * error code if failure
1776 * dvp - ctime|mtime updated
1777 * vp - ctime|mtime|atime updated
1781 zfs_mkdir(vnode_t
*dvp
, char *dirname
, vattr_t
*vap
, vnode_t
**vpp
, cred_t
*cr
,
1782 caller_context_t
*ct
, int flags
, vsecattr_t
*vsecp
)
1784 znode_t
*zp
, *dzp
= VTOZ(dvp
);
1785 zfsvfs_t
*zfsvfs
= dzp
->z_zfsvfs
;
1794 gid_t gid
= crgetgid(cr
);
1795 zfs_acl_ids_t acl_ids
;
1796 boolean_t fuid_dirtied
;
1798 ASSERT(vap
->va_type
== VDIR
);
1801 * If we have an ephemeral id, ACL, or XVATTR then
1802 * make sure file system is at proper version
1805 ksid
= crgetsid(cr
, KSID_OWNER
);
1807 uid
= ksid_getid(ksid
);
1810 if (zfsvfs
->z_use_fuids
== B_FALSE
&&
1811 (vsecp
|| (vap
->va_mask
& AT_XVATTR
) ||
1812 IS_EPHEMERAL(uid
) || IS_EPHEMERAL(gid
)))
1817 zilog
= zfsvfs
->z_log
;
1819 if (dzp
->z_pflags
& ZFS_XATTR
) {
1824 if (zfsvfs
->z_utf8
&& u8_validate(dirname
,
1825 strlen(dirname
), NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
1829 if (flags
& FIGNORECASE
)
1832 if (vap
->va_mask
& AT_XVATTR
) {
1833 if ((error
= secpolicy_xvattr((xvattr_t
*)vap
,
1834 crgetuid(cr
), cr
, vap
->va_type
)) != 0) {
1840 if ((error
= zfs_acl_ids_create(dzp
, 0, vap
, cr
,
1841 vsecp
, &acl_ids
)) != 0) {
1846 * First make sure the new directory doesn't exist.
1848 * Existence is checked first to make sure we don't return
1849 * EACCES instead of EEXIST which can cause some applications
1855 if (error
= zfs_dirent_lock(&dl
, dzp
, dirname
, &zp
, zf
,
1857 zfs_acl_ids_free(&acl_ids
);
1862 if (error
= zfs_zaccess(dzp
, ACE_ADD_SUBDIRECTORY
, 0, B_FALSE
, cr
)) {
1863 zfs_acl_ids_free(&acl_ids
);
1864 zfs_dirent_unlock(dl
);
1869 if (zfs_acl_ids_overquota(zfsvfs
, &acl_ids
)) {
1870 zfs_acl_ids_free(&acl_ids
);
1871 zfs_dirent_unlock(dl
);
1877 * Add a new entry to the directory.
1879 tx
= dmu_tx_create(zfsvfs
->z_os
);
1880 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, dirname
);
1881 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, FALSE
, NULL
);
1882 fuid_dirtied
= zfsvfs
->z_fuid_dirty
;
1884 zfs_fuid_txhold(zfsvfs
, tx
);
1885 if (!zfsvfs
->z_use_sa
&& acl_ids
.z_aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
1886 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
, 0,
1887 acl_ids
.z_aclp
->z_acl_bytes
);
1890 dmu_tx_hold_sa_create(tx
, acl_ids
.z_aclp
->z_acl_bytes
+
1891 ZFS_SA_BASE_ATTR_SIZE
);
1893 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
1895 zfs_dirent_unlock(dl
);
1896 if (error
== ERESTART
) {
1901 zfs_acl_ids_free(&acl_ids
);
1910 zfs_mknode(dzp
, vap
, tx
, cr
, 0, &zp
, &acl_ids
);
1913 zfs_fuid_sync(zfsvfs
, tx
);
1916 * Now put new name in parent dir.
1918 (void) zfs_link_create(dl
, zp
, tx
, ZNEW
);
1922 txtype
= zfs_log_create_txtype(Z_DIR
, vsecp
, vap
);
1923 if (flags
& FIGNORECASE
)
1925 zfs_log_create(zilog
, tx
, txtype
, dzp
, zp
, dirname
, vsecp
,
1926 acl_ids
.z_fuidp
, vap
);
1928 zfs_acl_ids_free(&acl_ids
);
1932 zfs_dirent_unlock(dl
);
1934 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
1935 zil_commit(zilog
, 0);
1942 * Remove a directory subdir entry. If the current working
1943 * directory is the same as the subdir to be removed, the
1946 * IN: dvp - vnode of directory to remove from.
1947 * name - name of directory to be removed.
1948 * cwd - vnode of current working directory.
1949 * cr - credentials of caller.
1950 * ct - caller context
1951 * flags - case flags
1953 * RETURN: 0 if success
1954 * error code if failure
1957 * dvp - ctime|mtime updated
1961 zfs_rmdir(vnode_t
*dvp
, char *name
, vnode_t
*cwd
, cred_t
*cr
,
1962 caller_context_t
*ct
, int flags
)
1964 znode_t
*dzp
= VTOZ(dvp
);
1967 zfsvfs_t
*zfsvfs
= dzp
->z_zfsvfs
;
1976 zilog
= zfsvfs
->z_log
;
1978 if (flags
& FIGNORECASE
)
1984 * Attempt to lock directory; fail if entry doesn't exist.
1986 if (error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
,
1994 if (error
= zfs_zaccess_delete(dzp
, zp
, cr
)) {
1998 if (vp
->v_type
!= VDIR
) {
2008 vnevent_rmdir(vp
, dvp
, name
, ct
);
2011 * Grab a lock on the directory to make sure that noone is
2012 * trying to add (or lookup) entries while we are removing it.
2014 rw_enter(&zp
->z_name_lock
, RW_WRITER
);
2017 * Grab a lock on the parent pointer to make sure we play well
2018 * with the treewalk and directory rename code.
2020 rw_enter(&zp
->z_parent_lock
, RW_WRITER
);
2022 tx
= dmu_tx_create(zfsvfs
->z_os
);
2023 dmu_tx_hold_zap(tx
, dzp
->z_id
, FALSE
, name
);
2024 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
2025 dmu_tx_hold_zap(tx
, zfsvfs
->z_unlinkedobj
, FALSE
, NULL
);
2026 zfs_sa_upgrade_txholds(tx
, zp
);
2027 zfs_sa_upgrade_txholds(tx
, dzp
);
2028 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
2030 rw_exit(&zp
->z_parent_lock
);
2031 rw_exit(&zp
->z_name_lock
);
2032 zfs_dirent_unlock(dl
);
2034 if (error
== ERESTART
) {
2044 error
= zfs_link_destroy(dl
, zp
, tx
, zflg
, NULL
);
2047 uint64_t txtype
= TX_RMDIR
;
2048 if (flags
& FIGNORECASE
)
2050 zfs_log_remove(zilog
, tx
, txtype
, dzp
, name
, ZFS_NO_OBJECT
);
2055 rw_exit(&zp
->z_parent_lock
);
2056 rw_exit(&zp
->z_name_lock
);
2058 zfs_dirent_unlock(dl
);
2062 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
2063 zil_commit(zilog
, 0);
2070 * Read as many directory entries as will fit into the provided
2071 * buffer from the given directory cursor position (specified in
2072 * the uio structure.
2074 * IN: vp - vnode of directory to read.
2075 * uio - structure supplying read location, range info,
2076 * and return buffer.
2077 * cr - credentials of caller.
2078 * ct - caller context
2079 * flags - case flags
2081 * OUT: uio - updated offset and range, buffer filled.
2082 * eofp - set to true if end-of-file detected.
2084 * RETURN: 0 if success
2085 * error code if failure
2088 * vp - atime updated
2090 * Note that the low 4 bits of the cookie returned by zap is always zero.
2091 * This allows us to use the low range for "special" directory entries:
2092 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
2093 * we use the offset 2 for the '.zfs' directory.
2097 zfs_readdir(vnode_t
*vp
, uio_t
*uio
, cred_t
*cr
, int *eofp
,
2098 caller_context_t
*ct
, int flags
)
2100 znode_t
*zp
= VTOZ(vp
);
2104 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
2109 zap_attribute_t zap
;
2110 uint_t bytes_wanted
;
2111 uint64_t offset
; /* must be unsigned; checks for < 1 */
2117 boolean_t check_sysattrs
;
2122 if ((error
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_PARENT(zfsvfs
),
2123 &parent
, sizeof (parent
))) != 0) {
2129 * If we are not given an eof variable,
2136 * Check for valid iov_len.
2138 if (uio
->uio_iov
->iov_len
<= 0) {
2144 * Quit if directory has been removed (posix)
2146 if ((*eofp
= zp
->z_unlinked
) != 0) {
2153 offset
= uio
->uio_loffset
;
2154 prefetch
= zp
->z_zn_prefetch
;
2157 * Initialize the iterator cursor.
2161 * Start iteration from the beginning of the directory.
2163 zap_cursor_init(&zc
, os
, zp
->z_id
);
2166 * The offset is a serialized cursor.
2168 zap_cursor_init_serialized(&zc
, os
, zp
->z_id
, offset
);
2172 * Get space to change directory entries into fs independent format.
2174 iovp
= uio
->uio_iov
;
2175 bytes_wanted
= iovp
->iov_len
;
2176 if (uio
->uio_segflg
!= UIO_SYSSPACE
|| uio
->uio_iovcnt
!= 1) {
2177 bufsize
= bytes_wanted
;
2178 outbuf
= kmem_alloc(bufsize
, KM_SLEEP
);
2179 odp
= (struct dirent64
*)outbuf
;
2181 bufsize
= bytes_wanted
;
2182 odp
= (struct dirent64
*)iovp
->iov_base
;
2184 eodp
= (struct edirent
*)odp
;
2187 * If this VFS supports the system attribute view interface; and
2188 * we're looking at an extended attribute directory; and we care
2189 * about normalization conflicts on this vfs; then we must check
2190 * for normalization conflicts with the sysattr name space.
2192 check_sysattrs
= vfs_has_feature(vp
->v_vfsp
, VFSFT_SYSATTR_VIEWS
) &&
2193 (vp
->v_flag
& V_XATTRDIR
) && zfsvfs
->z_norm
&&
2194 (flags
& V_RDDIR_ENTFLAGS
);
2197 * Transform to file-system independent format
2200 while (outcount
< bytes_wanted
) {
2203 off64_t
*next
= NULL
;
2206 * Special case `.', `..', and `.zfs'.
2209 (void) strcpy(zap
.za_name
, ".");
2210 zap
.za_normalization_conflict
= 0;
2212 } else if (offset
== 1) {
2213 (void) strcpy(zap
.za_name
, "..");
2214 zap
.za_normalization_conflict
= 0;
2216 } else if (offset
== 2 && zfs_show_ctldir(zp
)) {
2217 (void) strcpy(zap
.za_name
, ZFS_CTLDIR_NAME
);
2218 zap
.za_normalization_conflict
= 0;
2219 objnum
= ZFSCTL_INO_ROOT
;
2224 if (error
= zap_cursor_retrieve(&zc
, &zap
)) {
2225 if ((*eofp
= (error
== ENOENT
)) != 0)
2231 if (zap
.za_integer_length
!= 8 ||
2232 zap
.za_num_integers
!= 1) {
2233 cmn_err(CE_WARN
, "zap_readdir: bad directory "
2234 "entry, obj = %lld, offset = %lld\n",
2235 (u_longlong_t
)zp
->z_id
,
2236 (u_longlong_t
)offset
);
2241 objnum
= ZFS_DIRENT_OBJ(zap
.za_first_integer
);
2243 * MacOS X can extract the object type here such as:
2244 * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer);
2247 if (check_sysattrs
&& !zap
.za_normalization_conflict
) {
2248 zap
.za_normalization_conflict
=
2249 xattr_sysattr_casechk(zap
.za_name
);
2253 if (flags
& V_RDDIR_ACCFILTER
) {
2255 * If we have no access at all, don't include
2256 * this entry in the returned information
2259 if (zfs_zget(zp
->z_zfsvfs
, objnum
, &ezp
) != 0)
2261 if (!zfs_has_access(ezp
, cr
)) {
2268 if (flags
& V_RDDIR_ENTFLAGS
)
2269 reclen
= EDIRENT_RECLEN(strlen(zap
.za_name
));
2271 reclen
= DIRENT64_RECLEN(strlen(zap
.za_name
));
2274 * Will this entry fit in the buffer?
2276 if (outcount
+ reclen
> bufsize
) {
2278 * Did we manage to fit anything in the buffer?
2286 if (flags
& V_RDDIR_ENTFLAGS
) {
2288 * Add extended flag entry:
2290 eodp
->ed_ino
= objnum
;
2291 eodp
->ed_reclen
= reclen
;
2292 /* NOTE: ed_off is the offset for the *next* entry */
2293 next
= &(eodp
->ed_off
);
2294 eodp
->ed_eflags
= zap
.za_normalization_conflict
?
2295 ED_CASE_CONFLICT
: 0;
2296 (void) strncpy(eodp
->ed_name
, zap
.za_name
,
2297 EDIRENT_NAMELEN(reclen
));
2298 eodp
= (edirent_t
*)((intptr_t)eodp
+ reclen
);
2303 odp
->d_ino
= objnum
;
2304 odp
->d_reclen
= reclen
;
2305 /* NOTE: d_off is the offset for the *next* entry */
2306 next
= &(odp
->d_off
);
2307 (void) strncpy(odp
->d_name
, zap
.za_name
,
2308 DIRENT64_NAMELEN(reclen
));
2309 odp
= (dirent64_t
*)((intptr_t)odp
+ reclen
);
2313 ASSERT(outcount
<= bufsize
);
2315 /* Prefetch znode */
2317 dmu_prefetch(os
, objnum
, 0, 0);
2321 * Move to the next entry, fill in the previous offset.
2323 if (offset
> 2 || (offset
== 2 && !zfs_show_ctldir(zp
))) {
2324 zap_cursor_advance(&zc
);
2325 offset
= zap_cursor_serialize(&zc
);
2332 zp
->z_zn_prefetch
= B_FALSE
; /* a lookup will re-enable pre-fetching */
2334 if (uio
->uio_segflg
== UIO_SYSSPACE
&& uio
->uio_iovcnt
== 1) {
2335 iovp
->iov_base
+= outcount
;
2336 iovp
->iov_len
-= outcount
;
2337 uio
->uio_resid
-= outcount
;
2338 } else if (error
= uiomove(outbuf
, (long)outcount
, UIO_READ
, uio
)) {
2340 * Reset the pointer.
2342 offset
= uio
->uio_loffset
;
2346 zap_cursor_fini(&zc
);
2347 if (uio
->uio_segflg
!= UIO_SYSSPACE
|| uio
->uio_iovcnt
!= 1)
2348 kmem_free(outbuf
, bufsize
);
2350 if (error
== ENOENT
)
2353 ZFS_ACCESSTIME_STAMP(zfsvfs
, zp
);
2355 uio
->uio_loffset
= offset
;
2360 ulong_t zfs_fsync_sync_cnt
= 4;
2363 zfs_fsync(vnode_t
*vp
, int syncflag
, cred_t
*cr
, caller_context_t
*ct
)
2365 znode_t
*zp
= VTOZ(vp
);
2366 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
2369 * Regardless of whether this is required for standards conformance,
2370 * this is the logical behavior when fsync() is called on a file with
2371 * dirty pages. We use B_ASYNC since the ZIL transactions are already
2372 * going to be pushed out as part of the zil_commit().
2374 if (vn_has_cached_data(vp
) && !(syncflag
& FNODSYNC
) &&
2375 (vp
->v_type
== VREG
) && !(IS_SWAPVP(vp
)))
2376 (void) VOP_PUTPAGE(vp
, (offset_t
)0, (size_t)0, B_ASYNC
, cr
, ct
);
2378 (void) tsd_set(zfs_fsyncer_key
, (void *)zfs_fsync_sync_cnt
);
2380 if (zfsvfs
->z_os
->os_sync
!= ZFS_SYNC_DISABLED
) {
2383 zil_commit(zfsvfs
->z_log
, zp
->z_id
);
2391 * Get the requested file attributes and place them in the provided
2394 * IN: vp - vnode of file.
2395 * vap - va_mask identifies requested attributes.
2396 * If AT_XVATTR set, then optional attrs are requested
2397 * flags - ATTR_NOACLCHECK (CIFS server context)
2398 * cr - credentials of caller.
2399 * ct - caller context
2401 * OUT: vap - attribute values.
2403 * RETURN: 0 (always succeeds)
2407 zfs_getattr(vnode_t
*vp
, vattr_t
*vap
, int flags
, cred_t
*cr
,
2408 caller_context_t
*ct
)
2410 znode_t
*zp
= VTOZ(vp
);
2411 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
2414 uint64_t mtime
[2], ctime
[2];
2415 xvattr_t
*xvap
= (xvattr_t
*)vap
; /* vap may be an xvattr_t * */
2416 xoptattr_t
*xoap
= NULL
;
2417 boolean_t skipaclchk
= (flags
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
2418 sa_bulk_attr_t bulk
[2];
2424 zfs_fuid_map_ids(zp
, cr
, &vap
->va_uid
, &vap
->va_gid
);
2426 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
, &mtime
, 16);
2427 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
, &ctime
, 16);
2429 if ((error
= sa_bulk_lookup(zp
->z_sa_hdl
, bulk
, count
)) != 0) {
2435 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2436 * Also, if we are the owner don't bother, since owner should
2437 * always be allowed to read basic attributes of file.
2439 if (!(zp
->z_pflags
& ZFS_ACL_TRIVIAL
) &&
2440 (vap
->va_uid
!= crgetuid(cr
))) {
2441 if (error
= zfs_zaccess(zp
, ACE_READ_ATTRIBUTES
, 0,
2449 * Return all attributes. It's cheaper to provide the answer
2450 * than to determine whether we were asked the question.
2453 mutex_enter(&zp
->z_lock
);
2454 vap
->va_type
= vp
->v_type
;
2455 vap
->va_mode
= zp
->z_mode
& MODEMASK
;
2456 vap
->va_fsid
= zp
->z_zfsvfs
->z_vfs
->vfs_dev
;
2457 vap
->va_nodeid
= zp
->z_id
;
2458 if ((vp
->v_flag
& VROOT
) && zfs_show_ctldir(zp
))
2459 links
= zp
->z_links
+ 1;
2461 links
= zp
->z_links
;
2462 vap
->va_nlink
= MIN(links
, UINT32_MAX
); /* nlink_t limit! */
2463 vap
->va_size
= zp
->z_size
;
2464 vap
->va_rdev
= vp
->v_rdev
;
2465 vap
->va_seq
= zp
->z_seq
;
2468 * Add in any requested optional attributes and the create time.
2469 * Also set the corresponding bits in the returned attribute bitmap.
2471 if ((xoap
= xva_getxoptattr(xvap
)) != NULL
&& zfsvfs
->z_use_fuids
) {
2472 if (XVA_ISSET_REQ(xvap
, XAT_ARCHIVE
)) {
2474 ((zp
->z_pflags
& ZFS_ARCHIVE
) != 0);
2475 XVA_SET_RTN(xvap
, XAT_ARCHIVE
);
2478 if (XVA_ISSET_REQ(xvap
, XAT_READONLY
)) {
2479 xoap
->xoa_readonly
=
2480 ((zp
->z_pflags
& ZFS_READONLY
) != 0);
2481 XVA_SET_RTN(xvap
, XAT_READONLY
);
2484 if (XVA_ISSET_REQ(xvap
, XAT_SYSTEM
)) {
2486 ((zp
->z_pflags
& ZFS_SYSTEM
) != 0);
2487 XVA_SET_RTN(xvap
, XAT_SYSTEM
);
2490 if (XVA_ISSET_REQ(xvap
, XAT_HIDDEN
)) {
2492 ((zp
->z_pflags
& ZFS_HIDDEN
) != 0);
2493 XVA_SET_RTN(xvap
, XAT_HIDDEN
);
2496 if (XVA_ISSET_REQ(xvap
, XAT_NOUNLINK
)) {
2497 xoap
->xoa_nounlink
=
2498 ((zp
->z_pflags
& ZFS_NOUNLINK
) != 0);
2499 XVA_SET_RTN(xvap
, XAT_NOUNLINK
);
2502 if (XVA_ISSET_REQ(xvap
, XAT_IMMUTABLE
)) {
2503 xoap
->xoa_immutable
=
2504 ((zp
->z_pflags
& ZFS_IMMUTABLE
) != 0);
2505 XVA_SET_RTN(xvap
, XAT_IMMUTABLE
);
2508 if (XVA_ISSET_REQ(xvap
, XAT_APPENDONLY
)) {
2509 xoap
->xoa_appendonly
=
2510 ((zp
->z_pflags
& ZFS_APPENDONLY
) != 0);
2511 XVA_SET_RTN(xvap
, XAT_APPENDONLY
);
2514 if (XVA_ISSET_REQ(xvap
, XAT_NODUMP
)) {
2516 ((zp
->z_pflags
& ZFS_NODUMP
) != 0);
2517 XVA_SET_RTN(xvap
, XAT_NODUMP
);
2520 if (XVA_ISSET_REQ(xvap
, XAT_OPAQUE
)) {
2522 ((zp
->z_pflags
& ZFS_OPAQUE
) != 0);
2523 XVA_SET_RTN(xvap
, XAT_OPAQUE
);
2526 if (XVA_ISSET_REQ(xvap
, XAT_AV_QUARANTINED
)) {
2527 xoap
->xoa_av_quarantined
=
2528 ((zp
->z_pflags
& ZFS_AV_QUARANTINED
) != 0);
2529 XVA_SET_RTN(xvap
, XAT_AV_QUARANTINED
);
2532 if (XVA_ISSET_REQ(xvap
, XAT_AV_MODIFIED
)) {
2533 xoap
->xoa_av_modified
=
2534 ((zp
->z_pflags
& ZFS_AV_MODIFIED
) != 0);
2535 XVA_SET_RTN(xvap
, XAT_AV_MODIFIED
);
2538 if (XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
) &&
2539 vp
->v_type
== VREG
) {
2540 zfs_sa_get_scanstamp(zp
, xvap
);
2543 if (XVA_ISSET_REQ(xvap
, XAT_CREATETIME
)) {
2546 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_CRTIME(zfsvfs
),
2547 times
, sizeof (times
));
2548 ZFS_TIME_DECODE(&xoap
->xoa_createtime
, times
);
2549 XVA_SET_RTN(xvap
, XAT_CREATETIME
);
2552 if (XVA_ISSET_REQ(xvap
, XAT_REPARSE
)) {
2553 xoap
->xoa_reparse
= ((zp
->z_pflags
& ZFS_REPARSE
) != 0);
2554 XVA_SET_RTN(xvap
, XAT_REPARSE
);
2556 if (XVA_ISSET_REQ(xvap
, XAT_GEN
)) {
2557 xoap
->xoa_generation
= zp
->z_gen
;
2558 XVA_SET_RTN(xvap
, XAT_GEN
);
2561 if (XVA_ISSET_REQ(xvap
, XAT_OFFLINE
)) {
2563 ((zp
->z_pflags
& ZFS_OFFLINE
) != 0);
2564 XVA_SET_RTN(xvap
, XAT_OFFLINE
);
2567 if (XVA_ISSET_REQ(xvap
, XAT_SPARSE
)) {
2569 ((zp
->z_pflags
& ZFS_SPARSE
) != 0);
2570 XVA_SET_RTN(xvap
, XAT_SPARSE
);
2574 ZFS_TIME_DECODE(&vap
->va_atime
, zp
->z_atime
);
2575 ZFS_TIME_DECODE(&vap
->va_mtime
, mtime
);
2576 ZFS_TIME_DECODE(&vap
->va_ctime
, ctime
);
2578 mutex_exit(&zp
->z_lock
);
2580 sa_object_size(zp
->z_sa_hdl
, &vap
->va_blksize
, &vap
->va_nblocks
);
2582 if (zp
->z_blksz
== 0) {
2584 * Block size hasn't been set; suggest maximal I/O transfers.
2586 vap
->va_blksize
= zfsvfs
->z_max_blksz
;
2594 * Set the file attributes to the values contained in the
2597 * IN: vp - vnode of file to be modified.
2598 * vap - new attribute values.
2599 * If AT_XVATTR set, then optional attrs are being set
2600 * flags - ATTR_UTIME set if non-default time values provided.
2601 * - ATTR_NOACLCHECK (CIFS context only).
2602 * cr - credentials of caller.
2603 * ct - caller context
2605 * RETURN: 0 if success
2606 * error code if failure
2609 * vp - ctime updated, mtime updated if size changed.
2613 zfs_setattr(vnode_t
*vp
, vattr_t
*vap
, int flags
, cred_t
*cr
,
2614 caller_context_t
*ct
)
2616 znode_t
*zp
= VTOZ(vp
);
2617 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
2622 uint_t mask
= vap
->va_mask
;
2626 uint64_t new_uid
, new_gid
;
2628 uint64_t mtime
[2], ctime
[2];
2630 int need_policy
= FALSE
;
2632 zfs_fuid_info_t
*fuidp
= NULL
;
2633 xvattr_t
*xvap
= (xvattr_t
*)vap
; /* vap may be an xvattr_t * */
2636 boolean_t skipaclchk
= (flags
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
2637 boolean_t fuid_dirtied
= B_FALSE
;
2638 sa_bulk_attr_t bulk
[7], xattr_bulk
[7];
2639 int count
= 0, xattr_count
= 0;
2644 if (mask
& AT_NOSET
)
2650 zilog
= zfsvfs
->z_log
;
2653 * Make sure that if we have ephemeral uid/gid or xvattr specified
2654 * that file system is at proper version level
2657 if (zfsvfs
->z_use_fuids
== B_FALSE
&&
2658 (((mask
& AT_UID
) && IS_EPHEMERAL(vap
->va_uid
)) ||
2659 ((mask
& AT_GID
) && IS_EPHEMERAL(vap
->va_gid
)) ||
2660 (mask
& AT_XVATTR
))) {
2665 if (mask
& AT_SIZE
&& vp
->v_type
== VDIR
) {
2670 if (mask
& AT_SIZE
&& vp
->v_type
!= VREG
&& vp
->v_type
!= VFIFO
) {
2676 * If this is an xvattr_t, then get a pointer to the structure of
2677 * optional attributes. If this is NULL, then we have a vattr_t.
2679 xoap
= xva_getxoptattr(xvap
);
2681 xva_init(&tmpxvattr
);
2684 * Immutable files can only alter immutable bit and atime
2686 if ((zp
->z_pflags
& ZFS_IMMUTABLE
) &&
2687 ((mask
& (AT_SIZE
|AT_UID
|AT_GID
|AT_MTIME
|AT_MODE
)) ||
2688 ((mask
& AT_XVATTR
) && XVA_ISSET_REQ(xvap
, XAT_CREATETIME
)))) {
2693 if ((mask
& AT_SIZE
) && (zp
->z_pflags
& ZFS_READONLY
)) {
2699 * Verify timestamps doesn't overflow 32 bits.
2700 * ZFS can handle large timestamps, but 32bit syscalls can't
2701 * handle times greater than 2039. This check should be removed
2702 * once large timestamps are fully supported.
2704 if (mask
& (AT_ATIME
| AT_MTIME
)) {
2705 if (((mask
& AT_ATIME
) && TIMESPEC_OVERFLOW(&vap
->va_atime
)) ||
2706 ((mask
& AT_MTIME
) && TIMESPEC_OVERFLOW(&vap
->va_mtime
))) {
2716 /* Can this be moved to before the top label? */
2717 if (zfsvfs
->z_vfs
->vfs_flag
& VFS_RDONLY
) {
2723 * First validate permissions
2726 if (mask
& AT_SIZE
) {
2727 err
= zfs_zaccess(zp
, ACE_WRITE_DATA
, 0, skipaclchk
, cr
);
2733 * XXX - Note, we are not providing any open
2734 * mode flags here (like FNDELAY), so we may
2735 * block if there are locks present... this
2736 * should be addressed in openat().
2738 /* XXX - would it be OK to generate a log record here? */
2739 err
= zfs_freesp(zp
, vap
->va_size
, 0, 0, FALSE
);
2746 if (mask
& (AT_ATIME
|AT_MTIME
) ||
2747 ((mask
& AT_XVATTR
) && (XVA_ISSET_REQ(xvap
, XAT_HIDDEN
) ||
2748 XVA_ISSET_REQ(xvap
, XAT_READONLY
) ||
2749 XVA_ISSET_REQ(xvap
, XAT_ARCHIVE
) ||
2750 XVA_ISSET_REQ(xvap
, XAT_OFFLINE
) ||
2751 XVA_ISSET_REQ(xvap
, XAT_SPARSE
) ||
2752 XVA_ISSET_REQ(xvap
, XAT_CREATETIME
) ||
2753 XVA_ISSET_REQ(xvap
, XAT_SYSTEM
)))) {
2754 need_policy
= zfs_zaccess(zp
, ACE_WRITE_ATTRIBUTES
, 0,
2758 if (mask
& (AT_UID
|AT_GID
)) {
2759 int idmask
= (mask
& (AT_UID
|AT_GID
));
2764 * NOTE: even if a new mode is being set,
2765 * we may clear S_ISUID/S_ISGID bits.
2768 if (!(mask
& AT_MODE
))
2769 vap
->va_mode
= zp
->z_mode
;
2772 * Take ownership or chgrp to group we are a member of
2775 take_owner
= (mask
& AT_UID
) && (vap
->va_uid
== crgetuid(cr
));
2776 take_group
= (mask
& AT_GID
) &&
2777 zfs_groupmember(zfsvfs
, vap
->va_gid
, cr
);
2780 * If both AT_UID and AT_GID are set then take_owner and
2781 * take_group must both be set in order to allow taking
2784 * Otherwise, send the check through secpolicy_vnode_setattr()
2788 if (((idmask
== (AT_UID
|AT_GID
)) && take_owner
&& take_group
) ||
2789 ((idmask
== AT_UID
) && take_owner
) ||
2790 ((idmask
== AT_GID
) && take_group
)) {
2791 if (zfs_zaccess(zp
, ACE_WRITE_OWNER
, 0,
2792 skipaclchk
, cr
) == 0) {
2794 * Remove setuid/setgid for non-privileged users
2796 secpolicy_setid_clear(vap
, cr
);
2797 trim_mask
= (mask
& (AT_UID
|AT_GID
));
2806 mutex_enter(&zp
->z_lock
);
2807 oldva
.va_mode
= zp
->z_mode
;
2808 zfs_fuid_map_ids(zp
, cr
, &oldva
.va_uid
, &oldva
.va_gid
);
2809 if (mask
& AT_XVATTR
) {
2811 * Update xvattr mask to include only those attributes
2812 * that are actually changing.
2814 * the bits will be restored prior to actually setting
2815 * the attributes so the caller thinks they were set.
2817 if (XVA_ISSET_REQ(xvap
, XAT_APPENDONLY
)) {
2818 if (xoap
->xoa_appendonly
!=
2819 ((zp
->z_pflags
& ZFS_APPENDONLY
) != 0)) {
2822 XVA_CLR_REQ(xvap
, XAT_APPENDONLY
);
2823 XVA_SET_REQ(&tmpxvattr
, XAT_APPENDONLY
);
2827 if (XVA_ISSET_REQ(xvap
, XAT_NOUNLINK
)) {
2828 if (xoap
->xoa_nounlink
!=
2829 ((zp
->z_pflags
& ZFS_NOUNLINK
) != 0)) {
2832 XVA_CLR_REQ(xvap
, XAT_NOUNLINK
);
2833 XVA_SET_REQ(&tmpxvattr
, XAT_NOUNLINK
);
2837 if (XVA_ISSET_REQ(xvap
, XAT_IMMUTABLE
)) {
2838 if (xoap
->xoa_immutable
!=
2839 ((zp
->z_pflags
& ZFS_IMMUTABLE
) != 0)) {
2842 XVA_CLR_REQ(xvap
, XAT_IMMUTABLE
);
2843 XVA_SET_REQ(&tmpxvattr
, XAT_IMMUTABLE
);
2847 if (XVA_ISSET_REQ(xvap
, XAT_NODUMP
)) {
2848 if (xoap
->xoa_nodump
!=
2849 ((zp
->z_pflags
& ZFS_NODUMP
) != 0)) {
2852 XVA_CLR_REQ(xvap
, XAT_NODUMP
);
2853 XVA_SET_REQ(&tmpxvattr
, XAT_NODUMP
);
2857 if (XVA_ISSET_REQ(xvap
, XAT_AV_MODIFIED
)) {
2858 if (xoap
->xoa_av_modified
!=
2859 ((zp
->z_pflags
& ZFS_AV_MODIFIED
) != 0)) {
2862 XVA_CLR_REQ(xvap
, XAT_AV_MODIFIED
);
2863 XVA_SET_REQ(&tmpxvattr
, XAT_AV_MODIFIED
);
2867 if (XVA_ISSET_REQ(xvap
, XAT_AV_QUARANTINED
)) {
2868 if ((vp
->v_type
!= VREG
&&
2869 xoap
->xoa_av_quarantined
) ||
2870 xoap
->xoa_av_quarantined
!=
2871 ((zp
->z_pflags
& ZFS_AV_QUARANTINED
) != 0)) {
2874 XVA_CLR_REQ(xvap
, XAT_AV_QUARANTINED
);
2875 XVA_SET_REQ(&tmpxvattr
, XAT_AV_QUARANTINED
);
2879 if (XVA_ISSET_REQ(xvap
, XAT_REPARSE
)) {
2880 mutex_exit(&zp
->z_lock
);
2885 if (need_policy
== FALSE
&&
2886 (XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
) ||
2887 XVA_ISSET_REQ(xvap
, XAT_OPAQUE
))) {
2892 mutex_exit(&zp
->z_lock
);
2894 if (mask
& AT_MODE
) {
2895 if (zfs_zaccess(zp
, ACE_WRITE_ACL
, 0, skipaclchk
, cr
) == 0) {
2896 err
= secpolicy_setid_setsticky_clear(vp
, vap
,
2902 trim_mask
|= AT_MODE
;
2910 * If trim_mask is set then take ownership
2911 * has been granted or write_acl is present and user
2912 * has the ability to modify mode. In that case remove
2913 * UID|GID and or MODE from mask so that
2914 * secpolicy_vnode_setattr() doesn't revoke it.
2918 saved_mask
= vap
->va_mask
;
2919 vap
->va_mask
&= ~trim_mask
;
2921 err
= secpolicy_vnode_setattr(cr
, vp
, vap
, &oldva
, flags
,
2922 (int (*)(void *, int, cred_t
*))zfs_zaccess_unix
, zp
);
2929 vap
->va_mask
|= saved_mask
;
2933 * secpolicy_vnode_setattr, or take ownership may have
2936 mask
= vap
->va_mask
;
2938 if ((mask
& (AT_UID
| AT_GID
))) {
2939 err
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_XATTR(zfsvfs
),
2940 &xattr_obj
, sizeof (xattr_obj
));
2942 if (err
== 0 && xattr_obj
) {
2943 err
= zfs_zget(zp
->z_zfsvfs
, xattr_obj
, &attrzp
);
2947 if (mask
& AT_UID
) {
2948 new_uid
= zfs_fuid_create(zfsvfs
,
2949 (uint64_t)vap
->va_uid
, cr
, ZFS_OWNER
, &fuidp
);
2950 if (new_uid
!= zp
->z_uid
&&
2951 zfs_fuid_overquota(zfsvfs
, B_FALSE
, new_uid
)) {
2953 VN_RELE(ZTOV(attrzp
));
2959 if (mask
& AT_GID
) {
2960 new_gid
= zfs_fuid_create(zfsvfs
, (uint64_t)vap
->va_gid
,
2961 cr
, ZFS_GROUP
, &fuidp
);
2962 if (new_gid
!= zp
->z_gid
&&
2963 zfs_fuid_overquota(zfsvfs
, B_TRUE
, new_gid
)) {
2965 VN_RELE(ZTOV(attrzp
));
2971 tx
= dmu_tx_create(zfsvfs
->z_os
);
2973 if (mask
& AT_MODE
) {
2974 uint64_t pmode
= zp
->z_mode
;
2976 new_mode
= (pmode
& S_IFMT
) | (vap
->va_mode
& ~S_IFMT
);
2978 if (err
= zfs_acl_chmod_setattr(zp
, &aclp
, new_mode
))
2981 mutex_enter(&zp
->z_lock
);
2982 if (!zp
->z_is_sa
&& ((acl_obj
= zfs_external_acl(zp
)) != 0)) {
2984 * Are we upgrading ACL from old V0 format
2987 if (zfsvfs
->z_version
>= ZPL_VERSION_FUID
&&
2988 zfs_znode_acl_version(zp
) ==
2989 ZFS_ACL_VERSION_INITIAL
) {
2990 dmu_tx_hold_free(tx
, acl_obj
, 0,
2992 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
,
2993 0, aclp
->z_acl_bytes
);
2995 dmu_tx_hold_write(tx
, acl_obj
, 0,
2998 } else if (!zp
->z_is_sa
&& aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
2999 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
,
3000 0, aclp
->z_acl_bytes
);
3002 mutex_exit(&zp
->z_lock
);
3003 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_TRUE
);
3005 if ((mask
& AT_XVATTR
) &&
3006 XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
))
3007 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_TRUE
);
3009 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
3013 dmu_tx_hold_sa(tx
, attrzp
->z_sa_hdl
, B_FALSE
);
3016 fuid_dirtied
= zfsvfs
->z_fuid_dirty
;
3018 zfs_fuid_txhold(zfsvfs
, tx
);
3020 zfs_sa_upgrade_txholds(tx
, zp
);
3022 err
= dmu_tx_assign(tx
, TXG_NOWAIT
);
3024 if (err
== ERESTART
)
3031 * Set each attribute requested.
3032 * We group settings according to the locks they need to acquire.
3034 * Note: you cannot set ctime directly, although it will be
3035 * updated as a side-effect of calling this function.
3039 if (mask
& (AT_UID
|AT_GID
|AT_MODE
))
3040 mutex_enter(&zp
->z_acl_lock
);
3041 mutex_enter(&zp
->z_lock
);
3043 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zfsvfs
), NULL
,
3044 &zp
->z_pflags
, sizeof (zp
->z_pflags
));
3047 if (mask
& (AT_UID
|AT_GID
|AT_MODE
))
3048 mutex_enter(&attrzp
->z_acl_lock
);
3049 mutex_enter(&attrzp
->z_lock
);
3050 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
3051 SA_ZPL_FLAGS(zfsvfs
), NULL
, &attrzp
->z_pflags
,
3052 sizeof (attrzp
->z_pflags
));
3055 if (mask
& (AT_UID
|AT_GID
)) {
3057 if (mask
& AT_UID
) {
3058 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_UID(zfsvfs
), NULL
,
3059 &new_uid
, sizeof (new_uid
));
3060 zp
->z_uid
= new_uid
;
3062 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
3063 SA_ZPL_UID(zfsvfs
), NULL
, &new_uid
,
3065 attrzp
->z_uid
= new_uid
;
3069 if (mask
& AT_GID
) {
3070 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GID(zfsvfs
),
3071 NULL
, &new_gid
, sizeof (new_gid
));
3072 zp
->z_gid
= new_gid
;
3074 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
3075 SA_ZPL_GID(zfsvfs
), NULL
, &new_gid
,
3077 attrzp
->z_gid
= new_gid
;
3080 if (!(mask
& AT_MODE
)) {
3081 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MODE(zfsvfs
),
3082 NULL
, &new_mode
, sizeof (new_mode
));
3083 new_mode
= zp
->z_mode
;
3085 err
= zfs_acl_chown_setattr(zp
);
3088 err
= zfs_acl_chown_setattr(attrzp
);
3093 if (mask
& AT_MODE
) {
3094 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MODE(zfsvfs
), NULL
,
3095 &new_mode
, sizeof (new_mode
));
3096 zp
->z_mode
= new_mode
;
3097 ASSERT3U((uintptr_t)aclp
, !=, NULL
);
3098 err
= zfs_aclset_common(zp
, aclp
, cr
, tx
);
3099 ASSERT3U(err
, ==, 0);
3100 if (zp
->z_acl_cached
)
3101 zfs_acl_free(zp
->z_acl_cached
);
3102 zp
->z_acl_cached
= aclp
;
3107 if (mask
& AT_ATIME
) {
3108 ZFS_TIME_ENCODE(&vap
->va_atime
, zp
->z_atime
);
3109 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_ATIME(zfsvfs
), NULL
,
3110 &zp
->z_atime
, sizeof (zp
->z_atime
));
3113 if (mask
& AT_MTIME
) {
3114 ZFS_TIME_ENCODE(&vap
->va_mtime
, mtime
);
3115 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
,
3116 mtime
, sizeof (mtime
));
3119 /* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
3120 if (mask
& AT_SIZE
&& !(mask
& AT_MTIME
)) {
3121 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
),
3122 NULL
, mtime
, sizeof (mtime
));
3123 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
,
3124 &ctime
, sizeof (ctime
));
3125 zfs_tstamp_update_setup(zp
, CONTENT_MODIFIED
, mtime
, ctime
,
3127 } else if (mask
!= 0) {
3128 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
,
3129 &ctime
, sizeof (ctime
));
3130 zfs_tstamp_update_setup(zp
, STATE_CHANGED
, mtime
, ctime
,
3133 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
3134 SA_ZPL_CTIME(zfsvfs
), NULL
,
3135 &ctime
, sizeof (ctime
));
3136 zfs_tstamp_update_setup(attrzp
, STATE_CHANGED
,
3137 mtime
, ctime
, B_TRUE
);
3141 * Do this after setting timestamps to prevent timestamp
3142 * update from toggling bit
3145 if (xoap
&& (mask
& AT_XVATTR
)) {
3148 * restore trimmed off masks
3149 * so that return masks can be set for caller.
3152 if (XVA_ISSET_REQ(&tmpxvattr
, XAT_APPENDONLY
)) {
3153 XVA_SET_REQ(xvap
, XAT_APPENDONLY
);
3155 if (XVA_ISSET_REQ(&tmpxvattr
, XAT_NOUNLINK
)) {
3156 XVA_SET_REQ(xvap
, XAT_NOUNLINK
);
3158 if (XVA_ISSET_REQ(&tmpxvattr
, XAT_IMMUTABLE
)) {
3159 XVA_SET_REQ(xvap
, XAT_IMMUTABLE
);
3161 if (XVA_ISSET_REQ(&tmpxvattr
, XAT_NODUMP
)) {
3162 XVA_SET_REQ(xvap
, XAT_NODUMP
);
3164 if (XVA_ISSET_REQ(&tmpxvattr
, XAT_AV_MODIFIED
)) {
3165 XVA_SET_REQ(xvap
, XAT_AV_MODIFIED
);
3167 if (XVA_ISSET_REQ(&tmpxvattr
, XAT_AV_QUARANTINED
)) {
3168 XVA_SET_REQ(xvap
, XAT_AV_QUARANTINED
);
3171 if (XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
))
3172 ASSERT(vp
->v_type
== VREG
);
3174 zfs_xvattr_set(zp
, xvap
, tx
);
3178 zfs_fuid_sync(zfsvfs
, tx
);
3181 zfs_log_setattr(zilog
, tx
, TX_SETATTR
, zp
, vap
, mask
, fuidp
);
3183 mutex_exit(&zp
->z_lock
);
3184 if (mask
& (AT_UID
|AT_GID
|AT_MODE
))
3185 mutex_exit(&zp
->z_acl_lock
);
3188 if (mask
& (AT_UID
|AT_GID
|AT_MODE
))
3189 mutex_exit(&attrzp
->z_acl_lock
);
3190 mutex_exit(&attrzp
->z_lock
);
3193 if (err
== 0 && attrzp
) {
3194 err2
= sa_bulk_update(attrzp
->z_sa_hdl
, xattr_bulk
,
3200 VN_RELE(ZTOV(attrzp
));
3205 zfs_fuid_info_free(fuidp
);
3211 if (err
== ERESTART
)
3214 err2
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, count
, tx
);
3219 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
3220 zil_commit(zilog
, 0);
3226 typedef struct zfs_zlock
{
3227 krwlock_t
*zl_rwlock
; /* lock we acquired */
3228 znode_t
*zl_znode
; /* znode we held */
3229 struct zfs_zlock
*zl_next
; /* next in list */
3233 * Drop locks and release vnodes that were held by zfs_rename_lock().
3236 zfs_rename_unlock(zfs_zlock_t
**zlpp
)
3240 while ((zl
= *zlpp
) != NULL
) {
3241 if (zl
->zl_znode
!= NULL
)
3242 VN_RELE(ZTOV(zl
->zl_znode
));
3243 rw_exit(zl
->zl_rwlock
);
3244 *zlpp
= zl
->zl_next
;
3245 kmem_free(zl
, sizeof (*zl
));
3250 * Search back through the directory tree, using the ".." entries.
3251 * Lock each directory in the chain to prevent concurrent renames.
3252 * Fail any attempt to move a directory into one of its own descendants.
3253 * XXX - z_parent_lock can overlap with map or grow locks
3256 zfs_rename_lock(znode_t
*szp
, znode_t
*tdzp
, znode_t
*sdzp
, zfs_zlock_t
**zlpp
)
3260 uint64_t rootid
= zp
->z_zfsvfs
->z_root
;
3261 uint64_t oidp
= zp
->z_id
;
3262 krwlock_t
*rwlp
= &szp
->z_parent_lock
;
3263 krw_t rw
= RW_WRITER
;
3266 * First pass write-locks szp and compares to zp->z_id.
3267 * Later passes read-lock zp and compare to zp->z_parent.
3270 if (!rw_tryenter(rwlp
, rw
)) {
3272 * Another thread is renaming in this path.
3273 * Note that if we are a WRITER, we don't have any
3274 * parent_locks held yet.
3276 if (rw
== RW_READER
&& zp
->z_id
> szp
->z_id
) {
3278 * Drop our locks and restart
3280 zfs_rename_unlock(&zl
);
3284 rwlp
= &szp
->z_parent_lock
;
3289 * Wait for other thread to drop its locks
3295 zl
= kmem_alloc(sizeof (*zl
), KM_SLEEP
);
3296 zl
->zl_rwlock
= rwlp
;
3297 zl
->zl_znode
= NULL
;
3298 zl
->zl_next
= *zlpp
;
3301 if (oidp
== szp
->z_id
) /* We're a descendant of szp */
3304 if (oidp
== rootid
) /* We've hit the top */
3307 if (rw
== RW_READER
) { /* i.e. not the first pass */
3308 int error
= zfs_zget(zp
->z_zfsvfs
, oidp
, &zp
);
3313 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_PARENT(zp
->z_zfsvfs
),
3314 &oidp
, sizeof (oidp
));
3315 rwlp
= &zp
->z_parent_lock
;
3318 } while (zp
->z_id
!= sdzp
->z_id
);
3324 * Move an entry from the provided source directory to the target
3325 * directory. Change the entry name as indicated.
3327 * IN: sdvp - Source directory containing the "old entry".
3328 * snm - Old entry name.
3329 * tdvp - Target directory to contain the "new entry".
3330 * tnm - New entry name.
3331 * cr - credentials of caller.
3332 * ct - caller context
3333 * flags - case flags
3335 * RETURN: 0 if success
3336 * error code if failure
3339 * sdvp,tdvp - ctime|mtime updated
3343 zfs_rename(vnode_t
*sdvp
, char *snm
, vnode_t
*tdvp
, char *tnm
, cred_t
*cr
,
3344 caller_context_t
*ct
, int flags
)
3346 znode_t
*tdzp
, *szp
, *tzp
;
3347 znode_t
*sdzp
= VTOZ(sdvp
);
3348 zfsvfs_t
*zfsvfs
= sdzp
->z_zfsvfs
;
3351 zfs_dirlock_t
*sdl
, *tdl
;
3354 int cmp
, serr
, terr
;
3359 ZFS_VERIFY_ZP(sdzp
);
3360 zilog
= zfsvfs
->z_log
;
3363 * Make sure we have the real vp for the target directory.
3365 if (VOP_REALVP(tdvp
, &realvp
, ct
) == 0)
3368 if (tdvp
->v_vfsp
!= sdvp
->v_vfsp
|| zfsctl_is_node(tdvp
)) {
3374 ZFS_VERIFY_ZP(tdzp
);
3375 if (zfsvfs
->z_utf8
&& u8_validate(tnm
,
3376 strlen(tnm
), NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
3381 if (flags
& FIGNORECASE
)
3390 * This is to prevent the creation of links into attribute space
3391 * by renaming a linked file into/outof an attribute directory.
3392 * See the comment in zfs_link() for why this is considered bad.
3394 if ((tdzp
->z_pflags
& ZFS_XATTR
) != (sdzp
->z_pflags
& ZFS_XATTR
)) {
3400 * Lock source and target directory entries. To prevent deadlock,
3401 * a lock ordering must be defined. We lock the directory with
3402 * the smallest object id first, or if it's a tie, the one with
3403 * the lexically first name.
3405 if (sdzp
->z_id
< tdzp
->z_id
) {
3407 } else if (sdzp
->z_id
> tdzp
->z_id
) {
3411 * First compare the two name arguments without
3412 * considering any case folding.
3414 int nofold
= (zfsvfs
->z_norm
& ~U8_TEXTPREP_TOUPPER
);
3416 cmp
= u8_strcmp(snm
, tnm
, 0, nofold
, U8_UNICODE_LATEST
, &error
);
3417 ASSERT(error
== 0 || !zfsvfs
->z_utf8
);
3420 * POSIX: "If the old argument and the new argument
3421 * both refer to links to the same existing file,
3422 * the rename() function shall return successfully
3423 * and perform no other action."
3429 * If the file system is case-folding, then we may
3430 * have some more checking to do. A case-folding file
3431 * system is either supporting mixed case sensitivity
3432 * access or is completely case-insensitive. Note
3433 * that the file system is always case preserving.
3435 * In mixed sensitivity mode case sensitive behavior
3436 * is the default. FIGNORECASE must be used to
3437 * explicitly request case insensitive behavior.
3439 * If the source and target names provided differ only
3440 * by case (e.g., a request to rename 'tim' to 'Tim'),
3441 * we will treat this as a special case in the
3442 * case-insensitive mode: as long as the source name
3443 * is an exact match, we will allow this to proceed as
3444 * a name-change request.
3446 if ((zfsvfs
->z_case
== ZFS_CASE_INSENSITIVE
||
3447 (zfsvfs
->z_case
== ZFS_CASE_MIXED
&&
3448 flags
& FIGNORECASE
)) &&
3449 u8_strcmp(snm
, tnm
, 0, zfsvfs
->z_norm
, U8_UNICODE_LATEST
,
3452 * case preserving rename request, require exact
3461 * If the source and destination directories are the same, we should
3462 * grab the z_name_lock of that directory only once.
3466 rw_enter(&sdzp
->z_name_lock
, RW_READER
);
3470 serr
= zfs_dirent_lock(&sdl
, sdzp
, snm
, &szp
,
3471 ZEXISTS
| zflg
, NULL
, NULL
);
3472 terr
= zfs_dirent_lock(&tdl
,
3473 tdzp
, tnm
, &tzp
, ZRENAMING
| zflg
, NULL
, NULL
);
3475 terr
= zfs_dirent_lock(&tdl
,
3476 tdzp
, tnm
, &tzp
, zflg
, NULL
, NULL
);
3477 serr
= zfs_dirent_lock(&sdl
,
3478 sdzp
, snm
, &szp
, ZEXISTS
| ZRENAMING
| zflg
,
3484 * Source entry invalid or not there.
3487 zfs_dirent_unlock(tdl
);
3493 rw_exit(&sdzp
->z_name_lock
);
3495 if (strcmp(snm
, "..") == 0)
3501 zfs_dirent_unlock(sdl
);
3505 rw_exit(&sdzp
->z_name_lock
);
3507 if (strcmp(tnm
, "..") == 0)
3514 * Must have write access at the source to remove the old entry
3515 * and write access at the target to create the new entry.
3516 * Note that if target and source are the same, this can be
3517 * done in a single check.
3520 if (error
= zfs_zaccess_rename(sdzp
, szp
, tdzp
, tzp
, cr
))
3523 if (ZTOV(szp
)->v_type
== VDIR
) {
3525 * Check to make sure rename is valid.
3526 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3528 if (error
= zfs_rename_lock(szp
, tdzp
, sdzp
, &zl
))
3533 * Does target exist?
3537 * Source and target must be the same type.
3539 if (ZTOV(szp
)->v_type
== VDIR
) {
3540 if (ZTOV(tzp
)->v_type
!= VDIR
) {
3545 if (ZTOV(tzp
)->v_type
== VDIR
) {
3551 * POSIX dictates that when the source and target
3552 * entries refer to the same file object, rename
3553 * must do nothing and exit without error.
3555 if (szp
->z_id
== tzp
->z_id
) {
3561 vnevent_rename_src(ZTOV(szp
), sdvp
, snm
, ct
);
3563 vnevent_rename_dest(ZTOV(tzp
), tdvp
, tnm
, ct
);
3566 * notify the target directory if it is not the same
3567 * as source directory.
3570 vnevent_rename_dest_dir(tdvp
, ct
);
3573 tx
= dmu_tx_create(zfsvfs
->z_os
);
3574 dmu_tx_hold_sa(tx
, szp
->z_sa_hdl
, B_FALSE
);
3575 dmu_tx_hold_sa(tx
, sdzp
->z_sa_hdl
, B_FALSE
);
3576 dmu_tx_hold_zap(tx
, sdzp
->z_id
, FALSE
, snm
);
3577 dmu_tx_hold_zap(tx
, tdzp
->z_id
, TRUE
, tnm
);
3579 dmu_tx_hold_sa(tx
, tdzp
->z_sa_hdl
, B_FALSE
);
3580 zfs_sa_upgrade_txholds(tx
, tdzp
);
3583 dmu_tx_hold_sa(tx
, tzp
->z_sa_hdl
, B_FALSE
);
3584 zfs_sa_upgrade_txholds(tx
, tzp
);
3587 zfs_sa_upgrade_txholds(tx
, szp
);
3588 dmu_tx_hold_zap(tx
, zfsvfs
->z_unlinkedobj
, FALSE
, NULL
);
3589 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
3592 zfs_rename_unlock(&zl
);
3593 zfs_dirent_unlock(sdl
);
3594 zfs_dirent_unlock(tdl
);
3597 rw_exit(&sdzp
->z_name_lock
);
3602 if (error
== ERESTART
) {
3612 if (tzp
) /* Attempt to remove the existing target */
3613 error
= zfs_link_destroy(tdl
, tzp
, tx
, zflg
, NULL
);
3616 error
= zfs_link_create(tdl
, szp
, tx
, ZRENAMING
);
3618 szp
->z_pflags
|= ZFS_AV_MODIFIED
;
3620 error
= sa_update(szp
->z_sa_hdl
, SA_ZPL_FLAGS(zfsvfs
),
3621 (void *)&szp
->z_pflags
, sizeof (uint64_t), tx
);
3622 ASSERT3U(error
, ==, 0);
3624 error
= zfs_link_destroy(sdl
, szp
, tx
, ZRENAMING
, NULL
);
3626 zfs_log_rename(zilog
, tx
, TX_RENAME
|
3627 (flags
& FIGNORECASE
? TX_CI
: 0), sdzp
,
3628 sdl
->dl_name
, tdzp
, tdl
->dl_name
, szp
);
3631 * Update path information for the target vnode
3633 vn_renamepath(tdvp
, ZTOV(szp
), tnm
,
3637 * At this point, we have successfully created
3638 * the target name, but have failed to remove
3639 * the source name. Since the create was done
3640 * with the ZRENAMING flag, there are
3641 * complications; for one, the link count is
3642 * wrong. The easiest way to deal with this
3643 * is to remove the newly created target, and
3644 * return the original error. This must
3645 * succeed; fortunately, it is very unlikely to
3646 * fail, since we just created it.
3648 VERIFY3U(zfs_link_destroy(tdl
, szp
, tx
,
3649 ZRENAMING
, NULL
), ==, 0);
3657 zfs_rename_unlock(&zl
);
3659 zfs_dirent_unlock(sdl
);
3660 zfs_dirent_unlock(tdl
);
3663 rw_exit(&sdzp
->z_name_lock
);
3670 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
3671 zil_commit(zilog
, 0);
3678 * Insert the indicated symbolic reference entry into the directory.
3680 * IN: dvp - Directory to contain new symbolic link.
3681 * link - Name for new symlink entry.
3682 * vap - Attributes of new entry.
3683 * target - Target path of new symlink.
3684 * cr - credentials of caller.
3685 * ct - caller context
3686 * flags - case flags
3688 * RETURN: 0 if success
3689 * error code if failure
3692 * dvp - ctime|mtime updated
3696 zfs_symlink(vnode_t
*dvp
, char *name
, vattr_t
*vap
, char *link
, cred_t
*cr
,
3697 caller_context_t
*ct
, int flags
)
3699 znode_t
*zp
, *dzp
= VTOZ(dvp
);
3702 zfsvfs_t
*zfsvfs
= dzp
->z_zfsvfs
;
3704 uint64_t len
= strlen(link
);
3707 zfs_acl_ids_t acl_ids
;
3708 boolean_t fuid_dirtied
;
3709 uint64_t txtype
= TX_SYMLINK
;
3711 ASSERT(vap
->va_type
== VLNK
);
3715 zilog
= zfsvfs
->z_log
;
3717 if (zfsvfs
->z_utf8
&& u8_validate(name
, strlen(name
),
3718 NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
3722 if (flags
& FIGNORECASE
)
3725 if (len
> MAXPATHLEN
) {
3727 return (ENAMETOOLONG
);
3730 if ((error
= zfs_acl_ids_create(dzp
, 0,
3731 vap
, cr
, NULL
, &acl_ids
)) != 0) {
3737 * Attempt to lock directory; fail if entry already exists.
3739 error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
, NULL
, NULL
);
3741 zfs_acl_ids_free(&acl_ids
);
3746 if (error
= zfs_zaccess(dzp
, ACE_ADD_FILE
, 0, B_FALSE
, cr
)) {
3747 zfs_acl_ids_free(&acl_ids
);
3748 zfs_dirent_unlock(dl
);
3753 if (zfs_acl_ids_overquota(zfsvfs
, &acl_ids
)) {
3754 zfs_acl_ids_free(&acl_ids
);
3755 zfs_dirent_unlock(dl
);
3759 tx
= dmu_tx_create(zfsvfs
->z_os
);
3760 fuid_dirtied
= zfsvfs
->z_fuid_dirty
;
3761 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
, 0, MAX(1, len
));
3762 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, name
);
3763 dmu_tx_hold_sa_create(tx
, acl_ids
.z_aclp
->z_acl_bytes
+
3764 ZFS_SA_BASE_ATTR_SIZE
+ len
);
3765 dmu_tx_hold_sa(tx
, dzp
->z_sa_hdl
, B_FALSE
);
3766 if (!zfsvfs
->z_use_sa
&& acl_ids
.z_aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
3767 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
, 0,
3768 acl_ids
.z_aclp
->z_acl_bytes
);
3771 zfs_fuid_txhold(zfsvfs
, tx
);
3772 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
3774 zfs_dirent_unlock(dl
);
3775 if (error
== ERESTART
) {
3780 zfs_acl_ids_free(&acl_ids
);
3787 * Create a new object for the symlink.
3788 * for version 4 ZPL datsets the symlink will be an SA attribute
3790 zfs_mknode(dzp
, vap
, tx
, cr
, 0, &zp
, &acl_ids
);
3793 zfs_fuid_sync(zfsvfs
, tx
);
3795 mutex_enter(&zp
->z_lock
);
3797 error
= sa_update(zp
->z_sa_hdl
, SA_ZPL_SYMLINK(zfsvfs
),
3800 zfs_sa_symlink(zp
, link
, len
, tx
);
3801 mutex_exit(&zp
->z_lock
);
3804 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_SIZE(zfsvfs
),
3805 &zp
->z_size
, sizeof (zp
->z_size
), tx
);
3807 * Insert the new object into the directory.
3809 (void) zfs_link_create(dl
, zp
, tx
, ZNEW
);
3811 if (flags
& FIGNORECASE
)
3813 zfs_log_symlink(zilog
, tx
, txtype
, dzp
, zp
, name
, link
);
3815 zfs_acl_ids_free(&acl_ids
);
3819 zfs_dirent_unlock(dl
);
3823 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
3824 zil_commit(zilog
, 0);
3831 * Return, in the buffer contained in the provided uio structure,
3832 * the symbolic path referred to by vp.
3834 * IN: vp - vnode of symbolic link.
3835 * uoip - structure to contain the link path.
3836 * cr - credentials of caller.
3837 * ct - caller context
3839 * OUT: uio - structure to contain the link path.
3841 * RETURN: 0 if success
3842 * error code if failure
3845 * vp - atime updated
3849 zfs_readlink(vnode_t
*vp
, uio_t
*uio
, cred_t
*cr
, caller_context_t
*ct
)
3851 znode_t
*zp
= VTOZ(vp
);
3852 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
3858 mutex_enter(&zp
->z_lock
);
3860 error
= sa_lookup_uio(zp
->z_sa_hdl
,
3861 SA_ZPL_SYMLINK(zfsvfs
), uio
);
3863 error
= zfs_sa_readlink(zp
, uio
);
3864 mutex_exit(&zp
->z_lock
);
3866 ZFS_ACCESSTIME_STAMP(zfsvfs
, zp
);
3873 * Insert a new entry into directory tdvp referencing svp.
3875 * IN: tdvp - Directory to contain new entry.
3876 * svp - vnode of new entry.
3877 * name - name of new entry.
3878 * cr - credentials of caller.
3879 * ct - caller context
3881 * RETURN: 0 if success
3882 * error code if failure
3885 * tdvp - ctime|mtime updated
3886 * svp - ctime updated
3890 zfs_link(vnode_t
*tdvp
, vnode_t
*svp
, char *name
, cred_t
*cr
,
3891 caller_context_t
*ct
, int flags
)
3893 znode_t
*dzp
= VTOZ(tdvp
);
3895 zfsvfs_t
*zfsvfs
= dzp
->z_zfsvfs
;
3905 ASSERT(tdvp
->v_type
== VDIR
);
3909 zilog
= zfsvfs
->z_log
;
3911 if (VOP_REALVP(svp
, &realvp
, ct
) == 0)
3915 * POSIX dictates that we return EPERM here.
3916 * Better choices include ENOTSUP or EISDIR.
3918 if (svp
->v_type
== VDIR
) {
3923 if (svp
->v_vfsp
!= tdvp
->v_vfsp
|| zfsctl_is_node(svp
)) {
3931 /* Prevent links to .zfs/shares files */
3933 if ((error
= sa_lookup(szp
->z_sa_hdl
, SA_ZPL_PARENT(zfsvfs
),
3934 &parent
, sizeof (uint64_t))) != 0) {
3938 if (parent
== zfsvfs
->z_shares_dir
) {
3943 if (zfsvfs
->z_utf8
&& u8_validate(name
,
3944 strlen(name
), NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
3948 if (flags
& FIGNORECASE
)
3952 * We do not support links between attributes and non-attributes
3953 * because of the potential security risk of creating links
3954 * into "normal" file space in order to circumvent restrictions
3955 * imposed in attribute space.
3957 if ((szp
->z_pflags
& ZFS_XATTR
) != (dzp
->z_pflags
& ZFS_XATTR
)) {
3963 owner
= zfs_fuid_map_id(zfsvfs
, szp
->z_uid
, cr
, ZFS_OWNER
);
3964 if (owner
!= crgetuid(cr
) && secpolicy_basic_link(cr
) != 0) {
3969 if (error
= zfs_zaccess(dzp
, ACE_ADD_FILE
, 0, B_FALSE
, cr
)) {
3976 * Attempt to lock directory; fail if entry already exists.
3978 error
= zfs_dirent_lock(&dl
, dzp
, name
, &tzp
, zf
, NULL
, NULL
);
3984 tx
= dmu_tx_create(zfsvfs
->z_os
);
3985 dmu_tx_hold_sa(tx
, szp
->z_sa_hdl
, B_FALSE
);
3986 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, name
);
3987 zfs_sa_upgrade_txholds(tx
, szp
);
3988 zfs_sa_upgrade_txholds(tx
, dzp
);
3989 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
3991 zfs_dirent_unlock(dl
);
3992 if (error
== ERESTART
) {
4002 error
= zfs_link_create(dl
, szp
, tx
, 0);
4005 uint64_t txtype
= TX_LINK
;
4006 if (flags
& FIGNORECASE
)
4008 zfs_log_link(zilog
, tx
, txtype
, dzp
, szp
, name
);
4013 zfs_dirent_unlock(dl
);
4016 vnevent_link(svp
, ct
);
4019 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
4020 zil_commit(zilog
, 0);
4027 * zfs_null_putapage() is used when the file system has been force
4028 * unmounted. It just drops the pages.
4032 zfs_null_putapage(vnode_t
*vp
, page_t
*pp
, u_offset_t
*offp
,
4033 size_t *lenp
, int flags
, cred_t
*cr
)
4035 pvn_write_done(pp
, B_INVAL
|B_FORCE
|B_ERROR
);
4040 * Push a page out to disk, klustering if possible.
4042 * IN: vp - file to push page to.
4043 * pp - page to push.
4044 * flags - additional flags.
4045 * cr - credentials of caller.
4047 * OUT: offp - start of range pushed.
4048 * lenp - len of range pushed.
4050 * RETURN: 0 if success
4051 * error code if failure
4053 * NOTE: callers must have locked the page to be pushed. On
4054 * exit, the page (and all other pages in the kluster) must be
4059 zfs_putapage(vnode_t
*vp
, page_t
*pp
, u_offset_t
*offp
,
4060 size_t *lenp
, int flags
, cred_t
*cr
)
4062 znode_t
*zp
= VTOZ(vp
);
4063 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
4065 u_offset_t off
, koff
;
4072 * If our blocksize is bigger than the page size, try to kluster
4073 * multiple pages so that we write a full block (thus avoiding
4074 * a read-modify-write).
4076 if (off
< zp
->z_size
&& zp
->z_blksz
> PAGESIZE
) {
4077 klen
= P2ROUNDUP((ulong_t
)zp
->z_blksz
, PAGESIZE
);
4078 koff
= ISP2(klen
) ? P2ALIGN(off
, (u_offset_t
)klen
) : 0;
4079 ASSERT(koff
<= zp
->z_size
);
4080 if (koff
+ klen
> zp
->z_size
)
4081 klen
= P2ROUNDUP(zp
->z_size
- koff
, (uint64_t)PAGESIZE
);
4082 pp
= pvn_write_kluster(vp
, pp
, &off
, &len
, koff
, klen
, flags
);
4084 ASSERT3U(btop(len
), ==, btopr(len
));
4087 * Can't push pages past end-of-file.
4089 if (off
>= zp
->z_size
) {
4090 /* ignore all pages */
4093 } else if (off
+ len
> zp
->z_size
) {
4094 int npages
= btopr(zp
->z_size
- off
);
4097 page_list_break(&pp
, &trunc
, npages
);
4098 /* ignore pages past end of file */
4100 pvn_write_done(trunc
, flags
);
4101 len
= zp
->z_size
- off
;
4104 if (zfs_owner_overquota(zfsvfs
, zp
, B_FALSE
) ||
4105 zfs_owner_overquota(zfsvfs
, zp
, B_TRUE
)) {
4110 tx
= dmu_tx_create(zfsvfs
->z_os
);
4111 dmu_tx_hold_write(tx
, zp
->z_id
, off
, len
);
4113 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
4114 zfs_sa_upgrade_txholds(tx
, zp
);
4115 err
= dmu_tx_assign(tx
, TXG_NOWAIT
);
4117 if (err
== ERESTART
) {
4126 if (zp
->z_blksz
<= PAGESIZE
) {
4127 caddr_t va
= zfs_map_page(pp
, S_READ
);
4128 ASSERT3U(len
, <=, PAGESIZE
);
4129 dmu_write(zfsvfs
->z_os
, zp
->z_id
, off
, len
, va
, tx
);
4130 zfs_unmap_page(pp
, va
);
4132 err
= dmu_write_pages(zfsvfs
->z_os
, zp
->z_id
, off
, len
, pp
, tx
);
4136 uint64_t mtime
[2], ctime
[2];
4137 sa_bulk_attr_t bulk
[3];
4140 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
,
4142 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
,
4144 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zfsvfs
), NULL
,
4146 zfs_tstamp_update_setup(zp
, CONTENT_MODIFIED
, mtime
, ctime
,
4148 zfs_log_write(zfsvfs
->z_log
, tx
, TX_WRITE
, zp
, off
, len
, 0);
4153 pvn_write_done(pp
, (err
? B_ERROR
: 0) | flags
);
4163 * Copy the portion of the file indicated from pages into the file.
4164 * The pages are stored in a page list attached to the files vnode.
4166 * IN: vp - vnode of file to push page data to.
4167 * off - position in file to put data.
4168 * len - amount of data to write.
4169 * flags - flags to control the operation.
4170 * cr - credentials of caller.
4171 * ct - caller context.
4173 * RETURN: 0 if success
4174 * error code if failure
4177 * vp - ctime|mtime updated
4181 zfs_putpage(vnode_t
*vp
, offset_t off
, size_t len
, int flags
, cred_t
*cr
,
4182 caller_context_t
*ct
)
4184 znode_t
*zp
= VTOZ(vp
);
4185 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
4197 * There's nothing to do if no data is cached.
4199 if (!vn_has_cached_data(vp
)) {
4205 * Align this request to the file block size in case we kluster.
4206 * XXX - this can result in pretty aggresive locking, which can
4207 * impact simultanious read/write access. One option might be
4208 * to break up long requests (len == 0) into block-by-block
4209 * operations to get narrower locking.
4211 blksz
= zp
->z_blksz
;
4213 io_off
= P2ALIGN_TYPED(off
, blksz
, u_offset_t
);
4216 if (len
> 0 && ISP2(blksz
))
4217 io_len
= P2ROUNDUP_TYPED(len
+ (off
- io_off
), blksz
, size_t);
4223 * Search the entire vp list for pages >= io_off.
4225 rl
= zfs_range_lock(zp
, io_off
, UINT64_MAX
, RL_WRITER
);
4226 error
= pvn_vplist_dirty(vp
, io_off
, zfs_putapage
, flags
, cr
);
4229 rl
= zfs_range_lock(zp
, io_off
, io_len
, RL_WRITER
);
4231 if (off
> zp
->z_size
) {
4232 /* past end of file */
4233 zfs_range_unlock(rl
);
4238 len
= MIN(io_len
, P2ROUNDUP(zp
->z_size
, PAGESIZE
) - io_off
);
4240 for (off
= io_off
; io_off
< off
+ len
; io_off
+= io_len
) {
4241 if ((flags
& B_INVAL
) || ((flags
& B_ASYNC
) == 0)) {
4242 pp
= page_lookup(vp
, io_off
,
4243 (flags
& (B_INVAL
| B_FREE
)) ? SE_EXCL
: SE_SHARED
);
4245 pp
= page_lookup_nowait(vp
, io_off
,
4246 (flags
& B_FREE
) ? SE_EXCL
: SE_SHARED
);
4249 if (pp
!= NULL
&& pvn_getdirty(pp
, flags
)) {
4253 * Found a dirty page to push
4255 err
= zfs_putapage(vp
, pp
, &io_off
, &io_len
, flags
, cr
);
4263 zfs_range_unlock(rl
);
4264 if ((flags
& B_ASYNC
) == 0 || zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
4265 zil_commit(zfsvfs
->z_log
, zp
->z_id
);
4272 zfs_inactive(vnode_t
*vp
, cred_t
*cr
, caller_context_t
*ct
)
4274 znode_t
*zp
= VTOZ(vp
);
4275 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
4278 rw_enter(&zfsvfs
->z_teardown_inactive_lock
, RW_READER
);
4279 if (zp
->z_sa_hdl
== NULL
) {
4281 * The fs has been unmounted, or we did a
4282 * suspend/resume and this file no longer exists.
4284 if (vn_has_cached_data(vp
)) {
4285 (void) pvn_vplist_dirty(vp
, 0, zfs_null_putapage
,
4289 mutex_enter(&zp
->z_lock
);
4290 mutex_enter(&vp
->v_lock
);
4291 ASSERT(vp
->v_count
== 1);
4293 mutex_exit(&vp
->v_lock
);
4294 mutex_exit(&zp
->z_lock
);
4295 rw_exit(&zfsvfs
->z_teardown_inactive_lock
);
4301 * Attempt to push any data in the page cache. If this fails
4302 * we will get kicked out later in zfs_zinactive().
4304 if (vn_has_cached_data(vp
)) {
4305 (void) pvn_vplist_dirty(vp
, 0, zfs_putapage
, B_INVAL
|B_ASYNC
,
4309 if (zp
->z_atime_dirty
&& zp
->z_unlinked
== 0) {
4310 dmu_tx_t
*tx
= dmu_tx_create(zfsvfs
->z_os
);
4312 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
4313 zfs_sa_upgrade_txholds(tx
, zp
);
4314 error
= dmu_tx_assign(tx
, TXG_WAIT
);
4318 mutex_enter(&zp
->z_lock
);
4319 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_ATIME(zfsvfs
),
4320 (void *)&zp
->z_atime
, sizeof (zp
->z_atime
), tx
);
4321 zp
->z_atime_dirty
= 0;
4322 mutex_exit(&zp
->z_lock
);
4328 rw_exit(&zfsvfs
->z_teardown_inactive_lock
);
4332 * Bounds-check the seek operation.
4334 * IN: vp - vnode seeking within
4335 * ooff - old file offset
4336 * noffp - pointer to new file offset
4337 * ct - caller context
4339 * RETURN: 0 if success
4340 * EINVAL if new offset invalid
4344 zfs_seek(vnode_t
*vp
, offset_t ooff
, offset_t
*noffp
,
4345 caller_context_t
*ct
)
4347 if (vp
->v_type
== VDIR
)
4349 return ((*noffp
< 0 || *noffp
> MAXOFFSET_T
) ? EINVAL
: 0);
4353 * Pre-filter the generic locking function to trap attempts to place
4354 * a mandatory lock on a memory mapped file.
4357 zfs_frlock(vnode_t
*vp
, int cmd
, flock64_t
*bfp
, int flag
, offset_t offset
,
4358 flk_callback_t
*flk_cbp
, cred_t
*cr
, caller_context_t
*ct
)
4360 znode_t
*zp
= VTOZ(vp
);
4361 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
4367 * We are following the UFS semantics with respect to mapcnt
4368 * here: If we see that the file is mapped already, then we will
4369 * return an error, but we don't worry about races between this
4370 * function and zfs_map().
4372 if (zp
->z_mapcnt
> 0 && MANDMODE(zp
->z_mode
)) {
4377 return (fs_frlock(vp
, cmd
, bfp
, flag
, offset
, flk_cbp
, cr
, ct
));
4381 * If we can't find a page in the cache, we will create a new page
4382 * and fill it with file data. For efficiency, we may try to fill
4383 * multiple pages at once (klustering) to fill up the supplied page
4384 * list. Note that the pages to be filled are held with an exclusive
4385 * lock to prevent access by other threads while they are being filled.
4388 zfs_fillpage(vnode_t
*vp
, u_offset_t off
, struct seg
*seg
,
4389 caddr_t addr
, page_t
*pl
[], size_t plsz
, enum seg_rw rw
)
4391 znode_t
*zp
= VTOZ(vp
);
4392 page_t
*pp
, *cur_pp
;
4393 objset_t
*os
= zp
->z_zfsvfs
->z_os
;
4394 u_offset_t io_off
, total
;
4398 if (plsz
== PAGESIZE
|| zp
->z_blksz
<= PAGESIZE
) {
4400 * We only have a single page, don't bother klustering
4404 pp
= page_create_va(vp
, io_off
, io_len
,
4405 PG_EXCL
| PG_WAIT
, seg
, addr
);
4408 * Try to find enough pages to fill the page list
4410 pp
= pvn_read_kluster(vp
, off
, seg
, addr
, &io_off
,
4411 &io_len
, off
, plsz
, 0);
4415 * The page already exists, nothing to do here.
4422 * Fill the pages in the kluster.
4425 for (total
= io_off
+ io_len
; io_off
< total
; io_off
+= PAGESIZE
) {
4428 ASSERT3U(io_off
, ==, cur_pp
->p_offset
);
4429 va
= zfs_map_page(cur_pp
, S_WRITE
);
4430 err
= dmu_read(os
, zp
->z_id
, io_off
, PAGESIZE
, va
,
4432 zfs_unmap_page(cur_pp
, va
);
4434 /* On error, toss the entire kluster */
4435 pvn_read_done(pp
, B_ERROR
);
4436 /* convert checksum errors into IO errors */
4441 cur_pp
= cur_pp
->p_next
;
4445 * Fill in the page list array from the kluster starting
4446 * from the desired offset `off'.
4447 * NOTE: the page list will always be null terminated.
4449 pvn_plist_init(pp
, pl
, plsz
, off
, io_len
, rw
);
4450 ASSERT(pl
== NULL
|| (*pl
)->p_offset
== off
);
4456 * Return pointers to the pages for the file region [off, off + len]
4457 * in the pl array. If plsz is greater than len, this function may
4458 * also return page pointers from after the specified region
4459 * (i.e. the region [off, off + plsz]). These additional pages are
4460 * only returned if they are already in the cache, or were created as
4461 * part of a klustered read.
4463 * IN: vp - vnode of file to get data from.
4464 * off - position in file to get data from.
4465 * len - amount of data to retrieve.
4466 * plsz - length of provided page list.
4467 * seg - segment to obtain pages for.
4468 * addr - virtual address of fault.
4469 * rw - mode of created pages.
4470 * cr - credentials of caller.
4471 * ct - caller context.
4473 * OUT: protp - protection mode of created pages.
4474 * pl - list of pages created.
4476 * RETURN: 0 if success
4477 * error code if failure
4480 * vp - atime updated
4484 zfs_getpage(vnode_t
*vp
, offset_t off
, size_t len
, uint_t
*protp
,
4485 page_t
*pl
[], size_t plsz
, struct seg
*seg
, caddr_t addr
,
4486 enum seg_rw rw
, cred_t
*cr
, caller_context_t
*ct
)
4488 znode_t
*zp
= VTOZ(vp
);
4489 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
4493 /* we do our own caching, faultahead is unnecessary */
4496 else if (len
> plsz
)
4499 len
= P2ROUNDUP(len
, PAGESIZE
);
4500 ASSERT(plsz
>= len
);
4509 * Loop through the requested range [off, off + len) looking
4510 * for pages. If we don't find a page, we will need to create
4511 * a new page and fill it with data from the file.
4514 if (*pl
= page_lookup(vp
, off
, SE_SHARED
))
4516 else if (err
= zfs_fillpage(vp
, off
, seg
, addr
, pl
, plsz
, rw
))
4519 ASSERT3U((*pl
)->p_offset
, ==, off
);
4523 ASSERT3U(len
, >=, PAGESIZE
);
4526 ASSERT3U(plsz
, >=, PAGESIZE
);
4533 * Fill out the page array with any pages already in the cache.
4536 (*pl
++ = page_lookup_nowait(vp
, off
, SE_SHARED
))) {
4543 * Release any pages we have previously locked.
4548 ZFS_ACCESSTIME_STAMP(zfsvfs
, zp
);
4558 * Request a memory map for a section of a file. This code interacts
4559 * with common code and the VM system as follows:
4561 * common code calls mmap(), which ends up in smmap_common()
4563 * this calls VOP_MAP(), which takes you into (say) zfs
4565 * zfs_map() calls as_map(), passing segvn_create() as the callback
4567 * segvn_create() creates the new segment and calls VOP_ADDMAP()
4569 * zfs_addmap() updates z_mapcnt
4573 zfs_map(vnode_t
*vp
, offset_t off
, struct as
*as
, caddr_t
*addrp
,
4574 size_t len
, uchar_t prot
, uchar_t maxprot
, uint_t flags
, cred_t
*cr
,
4575 caller_context_t
*ct
)
4577 znode_t
*zp
= VTOZ(vp
);
4578 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
4579 segvn_crargs_t vn_a
;
4585 if ((prot
& PROT_WRITE
) && (zp
->z_pflags
&
4586 (ZFS_IMMUTABLE
| ZFS_READONLY
| ZFS_APPENDONLY
))) {
4591 if ((prot
& (PROT_READ
| PROT_EXEC
)) &&
4592 (zp
->z_pflags
& ZFS_AV_QUARANTINED
)) {
4597 if (vp
->v_flag
& VNOMAP
) {
4602 if (off
< 0 || len
> MAXOFFSET_T
- off
) {
4607 if (vp
->v_type
!= VREG
) {
4613 * If file is locked, disallow mapping.
4615 if (MANDMODE(zp
->z_mode
) && vn_has_flocks(vp
)) {
4621 error
= choose_addr(as
, addrp
, len
, off
, ADDR_VACALIGN
, flags
);
4629 vn_a
.offset
= (u_offset_t
)off
;
4630 vn_a
.type
= flags
& MAP_TYPE
;
4632 vn_a
.maxprot
= maxprot
;
4635 vn_a
.flags
= flags
& ~MAP_TYPE
;
4637 vn_a
.lgrp_mem_policy_flags
= 0;
4639 error
= as_map(as
, *addrp
, len
, segvn_create
, &vn_a
);
4648 zfs_addmap(vnode_t
*vp
, offset_t off
, struct as
*as
, caddr_t addr
,
4649 size_t len
, uchar_t prot
, uchar_t maxprot
, uint_t flags
, cred_t
*cr
,
4650 caller_context_t
*ct
)
4652 uint64_t pages
= btopr(len
);
4654 atomic_add_64(&VTOZ(vp
)->z_mapcnt
, pages
);
4659 * The reason we push dirty pages as part of zfs_delmap() is so that we get a
4660 * more accurate mtime for the associated file. Since we don't have a way of
4661 * detecting when the data was actually modified, we have to resort to
4662 * heuristics. If an explicit msync() is done, then we mark the mtime when the
4663 * last page is pushed. The problem occurs when the msync() call is omitted,
4664 * which by far the most common case:
4672 * putpage() via fsflush
4674 * If we wait until fsflush to come along, we can have a modification time that
4675 * is some arbitrary point in the future. In order to prevent this in the
4676 * common case, we flush pages whenever a (MAP_SHARED, PROT_WRITE) mapping is
4681 zfs_delmap(vnode_t
*vp
, offset_t off
, struct as
*as
, caddr_t addr
,
4682 size_t len
, uint_t prot
, uint_t maxprot
, uint_t flags
, cred_t
*cr
,
4683 caller_context_t
*ct
)
4685 uint64_t pages
= btopr(len
);
4687 ASSERT3U(VTOZ(vp
)->z_mapcnt
, >=, pages
);
4688 atomic_add_64(&VTOZ(vp
)->z_mapcnt
, -pages
);
4690 if ((flags
& MAP_SHARED
) && (prot
& PROT_WRITE
) &&
4691 vn_has_cached_data(vp
))
4692 (void) VOP_PUTPAGE(vp
, off
, len
, B_ASYNC
, cr
, ct
);
4698 * Free or allocate space in a file. Currently, this function only
4699 * supports the `F_FREESP' command. However, this command is somewhat
4700 * misnamed, as its functionality includes the ability to allocate as
4701 * well as free space.
4703 * IN: vp - vnode of file to free data in.
4704 * cmd - action to take (only F_FREESP supported).
4705 * bfp - section of file to free/alloc.
4706 * flag - current file open mode flags.
4707 * offset - current file offset.
4708 * cr - credentials of caller [UNUSED].
4709 * ct - caller context.
4711 * RETURN: 0 if success
4712 * error code if failure
4715 * vp - ctime|mtime updated
4719 zfs_space(vnode_t
*vp
, int cmd
, flock64_t
*bfp
, int flag
,
4720 offset_t offset
, cred_t
*cr
, caller_context_t
*ct
)
4722 znode_t
*zp
= VTOZ(vp
);
4723 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
4730 if (cmd
!= F_FREESP
) {
4735 if (error
= convoff(vp
, bfp
, 0, offset
)) {
4740 if (bfp
->l_len
< 0) {
4746 len
= bfp
->l_len
; /* 0 means from off to end of file */
4748 error
= zfs_freesp(zp
, off
, len
, flag
, TRUE
);
4756 zfs_fid(vnode_t
*vp
, fid_t
*fidp
, caller_context_t
*ct
)
4758 znode_t
*zp
= VTOZ(vp
);
4759 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
4762 uint64_t object
= zp
->z_id
;
4769 if ((error
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_GEN(zfsvfs
),
4770 &gen64
, sizeof (uint64_t))) != 0) {
4775 gen
= (uint32_t)gen64
;
4777 size
= (zfsvfs
->z_parent
!= zfsvfs
) ? LONG_FID_LEN
: SHORT_FID_LEN
;
4778 if (fidp
->fid_len
< size
) {
4779 fidp
->fid_len
= size
;
4784 zfid
= (zfid_short_t
*)fidp
;
4786 zfid
->zf_len
= size
;
4788 for (i
= 0; i
< sizeof (zfid
->zf_object
); i
++)
4789 zfid
->zf_object
[i
] = (uint8_t)(object
>> (8 * i
));
4791 /* Must have a non-zero generation number to distinguish from .zfs */
4794 for (i
= 0; i
< sizeof (zfid
->zf_gen
); i
++)
4795 zfid
->zf_gen
[i
] = (uint8_t)(gen
>> (8 * i
));
4797 if (size
== LONG_FID_LEN
) {
4798 uint64_t objsetid
= dmu_objset_id(zfsvfs
->z_os
);
4801 zlfid
= (zfid_long_t
*)fidp
;
4803 for (i
= 0; i
< sizeof (zlfid
->zf_setid
); i
++)
4804 zlfid
->zf_setid
[i
] = (uint8_t)(objsetid
>> (8 * i
));
4806 /* XXX - this should be the generation number for the objset */
4807 for (i
= 0; i
< sizeof (zlfid
->zf_setgen
); i
++)
4808 zlfid
->zf_setgen
[i
] = 0;
4816 zfs_pathconf(vnode_t
*vp
, int cmd
, ulong_t
*valp
, cred_t
*cr
,
4817 caller_context_t
*ct
)
4829 case _PC_FILESIZEBITS
:
4833 case _PC_XATTR_EXISTS
:
4835 zfsvfs
= zp
->z_zfsvfs
;
4839 error
= zfs_dirent_lock(&dl
, zp
, "", &xzp
,
4840 ZXATTR
| ZEXISTS
| ZSHARED
, NULL
, NULL
);
4842 zfs_dirent_unlock(dl
);
4843 if (!zfs_dirempty(xzp
))
4846 } else if (error
== ENOENT
) {
4848 * If there aren't extended attributes, it's the
4849 * same as having zero of them.
4856 case _PC_SATTR_ENABLED
:
4857 case _PC_SATTR_EXISTS
:
4858 *valp
= vfs_has_feature(vp
->v_vfsp
, VFSFT_SYSATTR_VIEWS
) &&
4859 (vp
->v_type
== VREG
|| vp
->v_type
== VDIR
);
4862 case _PC_ACCESS_FILTERING
:
4863 *valp
= vfs_has_feature(vp
->v_vfsp
, VFSFT_ACCESS_FILTER
) &&
4867 case _PC_ACL_ENABLED
:
4868 *valp
= _ACL_ACE_ENABLED
;
4871 case _PC_MIN_HOLE_SIZE
:
4872 *valp
= (ulong_t
)SPA_MINBLOCKSIZE
;
4875 case _PC_TIMESTAMP_RESOLUTION
:
4876 /* nanosecond timestamp resolution */
4881 return (fs_pathconf(vp
, cmd
, valp
, cr
, ct
));
4887 zfs_getsecattr(vnode_t
*vp
, vsecattr_t
*vsecp
, int flag
, cred_t
*cr
,
4888 caller_context_t
*ct
)
4890 znode_t
*zp
= VTOZ(vp
);
4891 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
4893 boolean_t skipaclchk
= (flag
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
4897 error
= zfs_getacl(zp
, vsecp
, skipaclchk
, cr
);
4905 zfs_setsecattr(vnode_t
*vp
, vsecattr_t
*vsecp
, int flag
, cred_t
*cr
,
4906 caller_context_t
*ct
)
4908 znode_t
*zp
= VTOZ(vp
);
4909 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
4911 boolean_t skipaclchk
= (flag
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
4912 zilog_t
*zilog
= zfsvfs
->z_log
;
4917 error
= zfs_setacl(zp
, vsecp
, skipaclchk
, cr
);
4919 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
4920 zil_commit(zilog
, 0);
4927 * Tunable, both must be a power of 2.
4929 * zcr_blksz_min: the smallest read we may consider to loan out an arcbuf
4930 * zcr_blksz_max: if set to less than the file block size, allow loaning out of
4931 * an arcbuf for a partial block read
4933 int zcr_blksz_min
= (1 << 10); /* 1K */
4934 int zcr_blksz_max
= (1 << 17); /* 128K */
4938 zfs_reqzcbuf(vnode_t
*vp
, enum uio_rw ioflag
, xuio_t
*xuio
, cred_t
*cr
,
4939 caller_context_t
*ct
)
4941 znode_t
*zp
= VTOZ(vp
);
4942 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
4943 int max_blksz
= zfsvfs
->z_max_blksz
;
4944 uio_t
*uio
= &xuio
->xu_uio
;
4945 ssize_t size
= uio
->uio_resid
;
4946 offset_t offset
= uio
->uio_loffset
;
4951 int preamble
, postamble
;
4953 if (xuio
->xu_type
!= UIOTYPE_ZEROCOPY
)
4961 * Loan out an arc_buf for write if write size is bigger than
4962 * max_blksz, and the file's block size is also max_blksz.
4965 if (size
< blksz
|| zp
->z_blksz
!= blksz
) {
4970 * Caller requests buffers for write before knowing where the
4971 * write offset might be (e.g. NFS TCP write).
4976 preamble
= P2PHASE(offset
, blksz
);
4978 preamble
= blksz
- preamble
;
4983 postamble
= P2PHASE(size
, blksz
);
4986 fullblk
= size
/ blksz
;
4987 (void) dmu_xuio_init(xuio
,
4988 (preamble
!= 0) + fullblk
+ (postamble
!= 0));
4989 DTRACE_PROBE3(zfs_reqzcbuf_align
, int, preamble
,
4990 int, postamble
, int,
4991 (preamble
!= 0) + fullblk
+ (postamble
!= 0));
4994 * Have to fix iov base/len for partial buffers. They
4995 * currently represent full arc_buf's.
4998 /* data begins in the middle of the arc_buf */
4999 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
5002 (void) dmu_xuio_add(xuio
, abuf
,
5003 blksz
- preamble
, preamble
);
5006 for (i
= 0; i
< fullblk
; i
++) {
5007 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
5010 (void) dmu_xuio_add(xuio
, abuf
, 0, blksz
);
5014 /* data ends in the middle of the arc_buf */
5015 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
5018 (void) dmu_xuio_add(xuio
, abuf
, 0, postamble
);
5023 * Loan out an arc_buf for read if the read size is larger than
5024 * the current file block size. Block alignment is not
5025 * considered. Partial arc_buf will be loaned out for read.
5027 blksz
= zp
->z_blksz
;
5028 if (blksz
< zcr_blksz_min
)
5029 blksz
= zcr_blksz_min
;
5030 if (blksz
> zcr_blksz_max
)
5031 blksz
= zcr_blksz_max
;
5032 /* avoid potential complexity of dealing with it */
5033 if (blksz
> max_blksz
) {
5038 maxsize
= zp
->z_size
- uio
->uio_loffset
;
5042 if (size
< blksz
|| vn_has_cached_data(vp
)) {
5052 uio
->uio_extflg
= UIO_XUIO
;
5053 XUIO_XUZC_RW(xuio
) = ioflag
;
5060 zfs_retzcbuf(vnode_t
*vp
, xuio_t
*xuio
, cred_t
*cr
, caller_context_t
*ct
)
5064 int ioflag
= XUIO_XUZC_RW(xuio
);
5066 ASSERT(xuio
->xu_type
== UIOTYPE_ZEROCOPY
);
5068 i
= dmu_xuio_cnt(xuio
);
5070 abuf
= dmu_xuio_arcbuf(xuio
, i
);
5072 * if abuf == NULL, it must be a write buffer
5073 * that has been returned in zfs_write().
5076 dmu_return_arcbuf(abuf
);
5077 ASSERT(abuf
|| ioflag
== UIO_WRITE
);
5080 dmu_xuio_fini(xuio
);
5085 * Predeclare these here so that the compiler assumes that
5086 * this is an "old style" function declaration that does
5087 * not include arguments => we won't get type mismatch errors
5088 * in the initializations that follow.
5090 static int zfs_inval();
5091 static int zfs_isdir();
5105 * Directory vnode operations template
5107 vnodeops_t
*zfs_dvnodeops
;
5108 const fs_operation_def_t zfs_dvnodeops_template
[] = {
5109 VOPNAME_OPEN
, { .vop_open
= zfs_open
},
5110 VOPNAME_CLOSE
, { .vop_close
= zfs_close
},
5111 VOPNAME_READ
, { .error
= zfs_isdir
},
5112 VOPNAME_WRITE
, { .error
= zfs_isdir
},
5113 VOPNAME_IOCTL
, { .vop_ioctl
= zfs_ioctl
},
5114 VOPNAME_GETATTR
, { .vop_getattr
= zfs_getattr
},
5115 VOPNAME_SETATTR
, { .vop_setattr
= zfs_setattr
},
5116 VOPNAME_ACCESS
, { .vop_access
= zfs_access
},
5117 VOPNAME_LOOKUP
, { .vop_lookup
= zfs_lookup
},
5118 VOPNAME_CREATE
, { .vop_create
= zfs_create
},
5119 VOPNAME_REMOVE
, { .vop_remove
= zfs_remove
},
5120 VOPNAME_LINK
, { .vop_link
= zfs_link
},
5121 VOPNAME_RENAME
, { .vop_rename
= zfs_rename
},
5122 VOPNAME_MKDIR
, { .vop_mkdir
= zfs_mkdir
},
5123 VOPNAME_RMDIR
, { .vop_rmdir
= zfs_rmdir
},
5124 VOPNAME_READDIR
, { .vop_readdir
= zfs_readdir
},
5125 VOPNAME_SYMLINK
, { .vop_symlink
= zfs_symlink
},
5126 VOPNAME_FSYNC
, { .vop_fsync
= zfs_fsync
},
5127 VOPNAME_INACTIVE
, { .vop_inactive
= zfs_inactive
},
5128 VOPNAME_FID
, { .vop_fid
= zfs_fid
},
5129 VOPNAME_SEEK
, { .vop_seek
= zfs_seek
},
5130 VOPNAME_PATHCONF
, { .vop_pathconf
= zfs_pathconf
},
5131 VOPNAME_GETSECATTR
, { .vop_getsecattr
= zfs_getsecattr
},
5132 VOPNAME_SETSECATTR
, { .vop_setsecattr
= zfs_setsecattr
},
5133 VOPNAME_VNEVENT
, { .vop_vnevent
= fs_vnevent_support
},
5138 * Regular file vnode operations template
5140 vnodeops_t
*zfs_fvnodeops
;
5141 const fs_operation_def_t zfs_fvnodeops_template
[] = {
5142 VOPNAME_OPEN
, { .vop_open
= zfs_open
},
5143 VOPNAME_CLOSE
, { .vop_close
= zfs_close
},
5144 VOPNAME_READ
, { .vop_read
= zfs_read
},
5145 VOPNAME_WRITE
, { .vop_write
= zfs_write
},
5146 VOPNAME_IOCTL
, { .vop_ioctl
= zfs_ioctl
},
5147 VOPNAME_GETATTR
, { .vop_getattr
= zfs_getattr
},
5148 VOPNAME_SETATTR
, { .vop_setattr
= zfs_setattr
},
5149 VOPNAME_ACCESS
, { .vop_access
= zfs_access
},
5150 VOPNAME_LOOKUP
, { .vop_lookup
= zfs_lookup
},
5151 VOPNAME_RENAME
, { .vop_rename
= zfs_rename
},
5152 VOPNAME_FSYNC
, { .vop_fsync
= zfs_fsync
},
5153 VOPNAME_INACTIVE
, { .vop_inactive
= zfs_inactive
},
5154 VOPNAME_FID
, { .vop_fid
= zfs_fid
},
5155 VOPNAME_SEEK
, { .vop_seek
= zfs_seek
},
5156 VOPNAME_FRLOCK
, { .vop_frlock
= zfs_frlock
},
5157 VOPNAME_SPACE
, { .vop_space
= zfs_space
},
5158 VOPNAME_GETPAGE
, { .vop_getpage
= zfs_getpage
},
5159 VOPNAME_PUTPAGE
, { .vop_putpage
= zfs_putpage
},
5160 VOPNAME_MAP
, { .vop_map
= zfs_map
},
5161 VOPNAME_ADDMAP
, { .vop_addmap
= zfs_addmap
},
5162 VOPNAME_DELMAP
, { .vop_delmap
= zfs_delmap
},
5163 VOPNAME_PATHCONF
, { .vop_pathconf
= zfs_pathconf
},
5164 VOPNAME_GETSECATTR
, { .vop_getsecattr
= zfs_getsecattr
},
5165 VOPNAME_SETSECATTR
, { .vop_setsecattr
= zfs_setsecattr
},
5166 VOPNAME_VNEVENT
, { .vop_vnevent
= fs_vnevent_support
},
5167 VOPNAME_REQZCBUF
, { .vop_reqzcbuf
= zfs_reqzcbuf
},
5168 VOPNAME_RETZCBUF
, { .vop_retzcbuf
= zfs_retzcbuf
},
5173 * Symbolic link vnode operations template
5175 vnodeops_t
*zfs_symvnodeops
;
5176 const fs_operation_def_t zfs_symvnodeops_template
[] = {
5177 VOPNAME_GETATTR
, { .vop_getattr
= zfs_getattr
},
5178 VOPNAME_SETATTR
, { .vop_setattr
= zfs_setattr
},
5179 VOPNAME_ACCESS
, { .vop_access
= zfs_access
},
5180 VOPNAME_RENAME
, { .vop_rename
= zfs_rename
},
5181 VOPNAME_READLINK
, { .vop_readlink
= zfs_readlink
},
5182 VOPNAME_INACTIVE
, { .vop_inactive
= zfs_inactive
},
5183 VOPNAME_FID
, { .vop_fid
= zfs_fid
},
5184 VOPNAME_PATHCONF
, { .vop_pathconf
= zfs_pathconf
},
5185 VOPNAME_VNEVENT
, { .vop_vnevent
= fs_vnevent_support
},
5190 * special share hidden files vnode operations template
5192 vnodeops_t
*zfs_sharevnodeops
;
5193 const fs_operation_def_t zfs_sharevnodeops_template
[] = {
5194 VOPNAME_GETATTR
, { .vop_getattr
= zfs_getattr
},
5195 VOPNAME_ACCESS
, { .vop_access
= zfs_access
},
5196 VOPNAME_INACTIVE
, { .vop_inactive
= zfs_inactive
},
5197 VOPNAME_FID
, { .vop_fid
= zfs_fid
},
5198 VOPNAME_PATHCONF
, { .vop_pathconf
= zfs_pathconf
},
5199 VOPNAME_GETSECATTR
, { .vop_getsecattr
= zfs_getsecattr
},
5200 VOPNAME_SETSECATTR
, { .vop_setsecattr
= zfs_setsecattr
},
5201 VOPNAME_VNEVENT
, { .vop_vnevent
= fs_vnevent_support
},
5206 * Extended attribute directory vnode operations template
5207 * This template is identical to the directory vnodes
5208 * operation template except for restricted operations:
5211 * Note that there are other restrictions embedded in:
5212 * zfs_create() - restrict type to VREG
5213 * zfs_link() - no links into/out of attribute space
5214 * zfs_rename() - no moves into/out of attribute space
5216 vnodeops_t
*zfs_xdvnodeops
;
5217 const fs_operation_def_t zfs_xdvnodeops_template
[] = {
5218 VOPNAME_OPEN
, { .vop_open
= zfs_open
},
5219 VOPNAME_CLOSE
, { .vop_close
= zfs_close
},
5220 VOPNAME_IOCTL
, { .vop_ioctl
= zfs_ioctl
},
5221 VOPNAME_GETATTR
, { .vop_getattr
= zfs_getattr
},
5222 VOPNAME_SETATTR
, { .vop_setattr
= zfs_setattr
},
5223 VOPNAME_ACCESS
, { .vop_access
= zfs_access
},
5224 VOPNAME_LOOKUP
, { .vop_lookup
= zfs_lookup
},
5225 VOPNAME_CREATE
, { .vop_create
= zfs_create
},
5226 VOPNAME_REMOVE
, { .vop_remove
= zfs_remove
},
5227 VOPNAME_LINK
, { .vop_link
= zfs_link
},
5228 VOPNAME_RENAME
, { .vop_rename
= zfs_rename
},
5229 VOPNAME_MKDIR
, { .error
= zfs_inval
},
5230 VOPNAME_RMDIR
, { .vop_rmdir
= zfs_rmdir
},
5231 VOPNAME_READDIR
, { .vop_readdir
= zfs_readdir
},
5232 VOPNAME_SYMLINK
, { .error
= zfs_inval
},
5233 VOPNAME_FSYNC
, { .vop_fsync
= zfs_fsync
},
5234 VOPNAME_INACTIVE
, { .vop_inactive
= zfs_inactive
},
5235 VOPNAME_FID
, { .vop_fid
= zfs_fid
},
5236 VOPNAME_SEEK
, { .vop_seek
= zfs_seek
},
5237 VOPNAME_PATHCONF
, { .vop_pathconf
= zfs_pathconf
},
5238 VOPNAME_GETSECATTR
, { .vop_getsecattr
= zfs_getsecattr
},
5239 VOPNAME_SETSECATTR
, { .vop_setsecattr
= zfs_setsecattr
},
5240 VOPNAME_VNEVENT
, { .vop_vnevent
= fs_vnevent_support
},
5245 * Error vnode operations template
5247 vnodeops_t
*zfs_evnodeops
;
5248 const fs_operation_def_t zfs_evnodeops_template
[] = {
5249 VOPNAME_INACTIVE
, { .vop_inactive
= zfs_inactive
},
5250 VOPNAME_PATHCONF
, { .vop_pathconf
= zfs_pathconf
},