4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
26 * Copyright 2015 Joyent, Inc.
27 * Copyright 2017 Nexenta Systems, Inc.
30 /* Portions Copyright 2007 Jeremy Teo */
31 /* Portions Copyright 2010 Robert Milkowski */
33 #include <sys/types.h>
34 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/sysmacros.h>
38 #include <sys/resource.h>
40 #include <sys/vnode.h>
44 #include <sys/taskq.h>
46 #include <sys/vmsystm.h>
47 #include <sys/atomic.h>
49 #include <vm/seg_vn.h>
53 #include <vm/seg_kpm.h>
55 #include <sys/pathname.h>
56 #include <sys/cmn_err.h>
57 #include <sys/errno.h>
58 #include <sys/unistd.h>
59 #include <sys/zfs_dir.h>
60 #include <sys/zfs_acl.h>
61 #include <sys/zfs_ioctl.h>
62 #include <sys/fs/zfs.h>
64 #include <sys/dmu_objset.h>
70 #include <sys/dirent.h>
71 #include <sys/policy.h>
72 #include <sys/sunddi.h>
73 #include <sys/filio.h>
75 #include "sys/fs_subr.h"
76 #include <sys/zfs_ctldir.h>
77 #include <sys/zfs_fuid.h>
78 #include <sys/zfs_sa.h>
80 #include <sys/zfs_rlock.h>
81 #include <sys/extdirent.h>
82 #include <sys/kidmap.h>
89 * Each vnode op performs some logical unit of work. To do this, the ZPL must
90 * properly lock its in-core state, create a DMU transaction, do the work,
91 * record this work in the intent log (ZIL), commit the DMU transaction,
92 * and wait for the intent log to commit if it is a synchronous operation.
93 * Moreover, the vnode ops must work in both normal and log replay context.
94 * The ordering of events is important to avoid deadlocks and references
95 * to freed memory. The example below illustrates the following Big Rules:
97 * (1) A check must be made in each zfs thread for a mounted file system.
98 * This is done avoiding races using ZFS_ENTER(zfsvfs).
99 * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
100 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
101 * can return EIO from the calling function.
103 * (2) VN_RELE() should always be the last thing except for zil_commit()
104 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
105 * First, if it's the last reference, the vnode/znode
106 * can be freed, so the zp may point to freed memory. Second, the last
107 * reference will call zfs_zinactive(), which may induce a lot of work --
108 * pushing cached pages (which acquires range locks) and syncing out
109 * cached atime changes. Third, zfs_zinactive() may require a new tx,
110 * which could deadlock the system if you were already holding one.
111 * If you must call VN_RELE() within a tx then use VN_RELE_ASYNC().
113 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
114 * as they can span dmu_tx_assign() calls.
116 * (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
117 * dmu_tx_assign(). This is critical because we don't want to block
118 * while holding locks.
120 * If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
121 * reduces lock contention and CPU usage when we must wait (note that if
122 * throughput is constrained by the storage, nearly every transaction
125 * Note, in particular, that if a lock is sometimes acquired before
126 * the tx assigns, and sometimes after (e.g. z_lock), then failing
127 * to use a non-blocking assign can deadlock the system. The scenario:
129 * Thread A has grabbed a lock before calling dmu_tx_assign().
130 * Thread B is in an already-assigned tx, and blocks for this lock.
131 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
132 * forever, because the previous txg can't quiesce until B's tx commits.
134 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
135 * then drop all locks, call dmu_tx_wait(), and try again. On subsequent
136 * calls to dmu_tx_assign(), pass TXG_WAITED rather than TXG_NOWAIT,
137 * to indicate that this operation has already called dmu_tx_wait().
138 * This will ensure that we don't retry forever, waiting a short bit
141 * (5) If the operation succeeded, generate the intent log entry for it
142 * before dropping locks. This ensures that the ordering of events
143 * in the intent log matches the order in which they actually occurred.
144 * During ZIL replay the zfs_log_* functions will update the sequence
145 * number to indicate the zil transaction has replayed.
147 * (6) At the end of each vnode op, the DMU tx must always commit,
148 * regardless of whether there were any errors.
150 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
151 * to ensure that synchronous semantics are provided when necessary.
153 * In general, this is how things should be ordered in each vnode op:
155 * ZFS_ENTER(zfsvfs); // exit if unmounted
157 * zfs_dirent_lock(&dl, ...) // lock directory entry (may VN_HOLD())
158 * rw_enter(...); // grab any other locks you need
159 * tx = dmu_tx_create(...); // get DMU tx
160 * dmu_tx_hold_*(); // hold each object you might modify
161 * error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
163 * rw_exit(...); // drop locks
164 * zfs_dirent_unlock(dl); // unlock directory entry
165 * VN_RELE(...); // release held vnodes
166 * if (error == ERESTART) {
172 * dmu_tx_abort(tx); // abort DMU tx
173 * ZFS_EXIT(zfsvfs); // finished in zfs
174 * return (error); // really out of space
176 * error = do_real_work(); // do whatever this VOP does
178 * zfs_log_*(...); // on success, make ZIL entry
179 * dmu_tx_commit(tx); // commit DMU tx -- error or not
180 * rw_exit(...); // drop locks
181 * zfs_dirent_unlock(dl); // unlock directory entry
182 * VN_RELE(...); // release held vnodes
183 * zil_commit(zilog, foid); // synchronous when necessary
184 * ZFS_EXIT(zfsvfs); // finished in zfs
185 * return (error); // done, report error
190 zfs_open(vnode_t
**vpp
, int flag
, cred_t
*cr
, caller_context_t
*ct
)
192 znode_t
*zp
= VTOZ(*vpp
);
193 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
198 if ((flag
& FWRITE
) && (zp
->z_pflags
& ZFS_APPENDONLY
) &&
199 ((flag
& FAPPEND
) == 0)) {
201 return (SET_ERROR(EPERM
));
204 if (!zfs_has_ctldir(zp
) && zp
->z_zfsvfs
->z_vscan
&&
205 ZTOV(zp
)->v_type
== VREG
&&
206 !(zp
->z_pflags
& ZFS_AV_QUARANTINED
) && zp
->z_size
> 0) {
207 if (fs_vscan(*vpp
, cr
, 0) != 0) {
209 return (SET_ERROR(EACCES
));
213 /* Keep a count of the synchronous opens in the znode */
214 if (flag
& (FSYNC
| FDSYNC
))
215 atomic_inc_32(&zp
->z_sync_cnt
);
223 zfs_close(vnode_t
*vp
, int flag
, int count
, offset_t offset
, cred_t
*cr
,
224 caller_context_t
*ct
)
226 znode_t
*zp
= VTOZ(vp
);
227 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
230 * Clean up any locks held by this process on the vp.
232 cleanlocks(vp
, ddi_get_pid(), 0);
233 cleanshares(vp
, ddi_get_pid());
238 /* Decrement the synchronous opens in the znode */
239 if ((flag
& (FSYNC
| FDSYNC
)) && (count
== 1))
240 atomic_dec_32(&zp
->z_sync_cnt
);
242 if (!zfs_has_ctldir(zp
) && zp
->z_zfsvfs
->z_vscan
&&
243 ZTOV(zp
)->v_type
== VREG
&&
244 !(zp
->z_pflags
& ZFS_AV_QUARANTINED
) && zp
->z_size
> 0)
245 VERIFY(fs_vscan(vp
, cr
, 1) == 0);
252 * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and
253 * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter.
256 zfs_holey(vnode_t
*vp
, int cmd
, offset_t
*off
)
258 znode_t
*zp
= VTOZ(vp
);
259 uint64_t noff
= (uint64_t)*off
; /* new offset */
264 file_sz
= zp
->z_size
;
265 if (noff
>= file_sz
) {
266 return (SET_ERROR(ENXIO
));
269 if (cmd
== _FIO_SEEK_HOLE
)
274 error
= dmu_offset_next(zp
->z_zfsvfs
->z_os
, zp
->z_id
, hole
, &noff
);
277 return (SET_ERROR(ENXIO
));
280 * We could find a hole that begins after the logical end-of-file,
281 * because dmu_offset_next() only works on whole blocks. If the
282 * EOF falls mid-block, then indicate that the "virtual hole"
283 * at the end of the file begins at the logical EOF, rather than
284 * at the end of the last block.
286 if (noff
> file_sz
) {
299 zfs_ioctl(vnode_t
*vp
, int com
, intptr_t data
, int flag
, cred_t
*cred
,
300 int *rvalp
, caller_context_t
*ct
)
304 dmu_object_info_t doi
;
312 return (zfs_sync(vp
->v_vfsp
, 0, cred
));
315 * The following two ioctls are used by bfu. Faking out,
316 * necessary to avoid bfu errors.
328 if (ddi_copyin((void *)data
, &off
, sizeof (off
), flag
))
329 return (SET_ERROR(EFAULT
));
332 zfsvfs
= zp
->z_zfsvfs
;
336 /* offset parameter is in/out */
337 error
= zfs_holey(vp
, com
, &off
);
341 if (ddi_copyout(&off
, (void *)data
, sizeof (off
), flag
))
342 return (SET_ERROR(EFAULT
));
345 case _FIO_COUNT_FILLED
:
348 * _FIO_COUNT_FILLED adds a new ioctl command which
349 * exposes the number of filled blocks in a
353 zfsvfs
= zp
->z_zfsvfs
;
358 * Wait for all dirty blocks for this object
359 * to get synced out to disk, and the DMU info
362 error
= dmu_object_wait_synced(zfsvfs
->z_os
, zp
->z_id
);
369 * Retrieve fill count from DMU object.
371 error
= dmu_object_info(zfsvfs
->z_os
, zp
->z_id
, &doi
);
377 ndata
= doi
.doi_fill_count
;
380 if (ddi_copyout(&ndata
, (void *)data
, sizeof (ndata
), flag
))
381 return (SET_ERROR(EFAULT
));
385 return (SET_ERROR(ENOTTY
));
389 * Utility functions to map and unmap a single physical page. These
390 * are used to manage the mappable copies of ZFS file data, and therefore
391 * do not update ref/mod bits.
394 zfs_map_page(page_t
*pp
, enum seg_rw rw
)
397 return (hat_kpm_mapin(pp
, 0));
398 ASSERT(rw
== S_READ
|| rw
== S_WRITE
);
399 return (ppmapin(pp
, PROT_READ
| ((rw
== S_WRITE
) ? PROT_WRITE
: 0),
404 zfs_unmap_page(page_t
*pp
, caddr_t addr
)
407 hat_kpm_mapout(pp
, 0, addr
);
414 * When a file is memory mapped, we must keep the IO data synchronized
415 * between the DMU cache and the memory mapped pages. What this means:
417 * On Write: If we find a memory mapped page, we write to *both*
418 * the page and the dmu buffer.
421 update_pages(vnode_t
*vp
, int64_t start
, int len
, objset_t
*os
, uint64_t oid
)
425 off
= start
& PAGEOFFSET
;
426 for (start
&= PAGEMASK
; len
> 0; start
+= PAGESIZE
) {
428 uint64_t nbytes
= MIN(PAGESIZE
- off
, len
);
430 if (pp
= page_lookup(&vp
->v_object
, start
, SE_SHARED
)) {
433 va
= zfs_map_page(pp
, S_WRITE
);
434 (void) dmu_read(os
, oid
, start
+off
, nbytes
, va
+off
,
436 zfs_unmap_page(pp
, va
);
445 * When a file is memory mapped, we must keep the IO data synchronized
446 * between the DMU cache and the memory mapped pages. What this means:
448 * On Read: We "read" preferentially from memory mapped pages,
449 * else we default from the dmu buffer.
451 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
452 * the file is memory mapped.
455 mappedread(vnode_t
*vp
, int nbytes
, uio_t
*uio
)
457 znode_t
*zp
= VTOZ(vp
);
462 start
= uio
->uio_loffset
;
463 off
= start
& PAGEOFFSET
;
464 for (start
&= PAGEMASK
; len
> 0; start
+= PAGESIZE
) {
466 uint64_t bytes
= MIN(PAGESIZE
- off
, len
);
468 if (pp
= page_lookup(&vp
->v_object
, start
, SE_SHARED
)) {
471 va
= zfs_map_page(pp
, S_READ
);
472 error
= uiomove(va
+ off
, bytes
, UIO_READ
, uio
);
473 zfs_unmap_page(pp
, va
);
476 error
= dmu_read_uio_dbuf(sa_get_db(zp
->z_sa_hdl
),
487 offset_t zfs_read_chunk_size
= 1024 * 1024; /* Tunable */
490 * Read bytes from specified file into supplied buffer.
492 * IN: vp - vnode of file to be read from.
493 * uio - structure supplying read location, range info,
495 * ioflag - SYNC flags; used to provide FRSYNC semantics.
496 * cr - credentials of caller.
497 * ct - caller context
499 * OUT: uio - updated offset and range, buffer filled.
501 * RETURN: 0 on success, error code on failure.
504 * vp - atime updated if byte count > 0
508 zfs_read(vnode_t
*vp
, uio_t
*uio
, int ioflag
, cred_t
*cr
, caller_context_t
*ct
)
510 znode_t
*zp
= VTOZ(vp
);
511 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
520 if (zp
->z_pflags
& ZFS_AV_QUARANTINED
) {
522 return (SET_ERROR(EACCES
));
526 * Validate file offset
528 if (uio
->uio_loffset
< 0) {
530 return (SET_ERROR(EINVAL
));
534 * Fasttrack empty reads
536 if (uio
->uio_resid
== 0) {
542 * Check for mandatory locks
544 if (MANDMODE(zp
->z_mode
)) {
545 if (error
= chklock(vp
, FREAD
,
546 uio
->uio_loffset
, uio
->uio_resid
, uio
->uio_fmode
, ct
)) {
553 * If we're in FRSYNC mode, sync out this znode before reading it.
555 if (ioflag
& FRSYNC
|| zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
556 zil_commit(zfsvfs
->z_log
, zp
->z_id
);
559 * Lock the range against changes.
561 rl
= zfs_range_lock(zp
, uio
->uio_loffset
, uio
->uio_resid
, RL_READER
);
564 * If we are reading past end-of-file we can skip
565 * to the end; but we might still need to set atime.
567 if (uio
->uio_loffset
>= zp
->z_size
) {
572 ASSERT(uio
->uio_loffset
< zp
->z_size
);
573 n
= MIN(uio
->uio_resid
, zp
->z_size
- uio
->uio_loffset
);
575 if ((uio
->uio_extflg
== UIO_XUIO
) &&
576 (((xuio_t
*)uio
)->xu_type
== UIOTYPE_ZEROCOPY
)) {
578 int blksz
= zp
->z_blksz
;
579 uint64_t offset
= uio
->uio_loffset
;
581 xuio
= (xuio_t
*)uio
;
583 nblk
= (P2ROUNDUP(offset
+ n
, blksz
) - P2ALIGN(offset
,
586 ASSERT(offset
+ n
<= blksz
);
589 (void) dmu_xuio_init(xuio
, nblk
);
591 if (vn_has_cached_data(vp
)) {
593 * For simplicity, we always allocate a full buffer
594 * even if we only expect to read a portion of a block.
596 while (--nblk
>= 0) {
597 (void) dmu_xuio_add(xuio
,
598 dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
605 nbytes
= MIN(n
, zfs_read_chunk_size
-
606 P2PHASE(uio
->uio_loffset
, zfs_read_chunk_size
));
608 if (vn_has_cached_data(vp
)) {
609 error
= mappedread(vp
, nbytes
, uio
);
611 error
= dmu_read_uio_dbuf(sa_get_db(zp
->z_sa_hdl
),
615 /* convert checksum errors into IO errors */
617 error
= SET_ERROR(EIO
);
624 zfs_range_unlock(rl
);
626 ZFS_ACCESSTIME_STAMP(zfsvfs
, zp
);
632 * Write the bytes to a file.
634 * IN: vp - vnode of file to be written to.
635 * uio - structure supplying write location, range info,
637 * ioflag - FAPPEND, FSYNC, and/or FDSYNC. FAPPEND is
638 * set if in append mode.
639 * cr - credentials of caller.
640 * ct - caller context (NFS/CIFS fem monitor only)
642 * OUT: uio - updated offset and range.
644 * RETURN: 0 on success, error code on failure.
647 * vp - ctime|mtime updated if byte count > 0
652 zfs_write(vnode_t
*vp
, uio_t
*uio
, int ioflag
, cred_t
*cr
, caller_context_t
*ct
)
654 znode_t
*zp
= VTOZ(vp
);
655 rlim64_t limit
= uio
->uio_llimit
;
656 ssize_t start_resid
= uio
->uio_resid
;
660 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
665 int max_blksz
= zfsvfs
->z_max_blksz
;
668 iovec_t
*aiov
= NULL
;
671 int iovcnt
= uio
->uio_iovcnt
;
672 iovec_t
*iovp
= uio
->uio_iov
;
675 sa_bulk_attr_t bulk
[4];
676 uint64_t mtime
[2], ctime
[2];
679 * Fasttrack empty write
685 if (limit
== RLIM64_INFINITY
|| limit
> MAXOFFSET_T
)
691 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
, &mtime
, 16);
692 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
, &ctime
, 16);
693 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_SIZE(zfsvfs
), NULL
,
695 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zfsvfs
), NULL
,
699 * In a case vp->v_vfsp != zp->z_zfsvfs->z_vfs (e.g. snapshots) our
700 * callers might not be able to detect properly that we are read-only,
701 * so check it explicitly here.
703 if (zfsvfs
->z_vfs
->vfs_flag
& VFS_RDONLY
) {
705 return (SET_ERROR(EROFS
));
709 * If immutable or not appending then return EPERM.
710 * Intentionally allow ZFS_READONLY through here.
711 * See zfs_zaccess_common()
713 if ((zp
->z_pflags
& ZFS_IMMUTABLE
) ||
714 ((zp
->z_pflags
& ZFS_APPENDONLY
) && !(ioflag
& FAPPEND
) &&
715 (uio
->uio_loffset
< zp
->z_size
))) {
717 return (SET_ERROR(EPERM
));
720 zilog
= zfsvfs
->z_log
;
723 * Validate file offset
725 woff
= ioflag
& FAPPEND
? zp
->z_size
: uio
->uio_loffset
;
728 return (SET_ERROR(EINVAL
));
732 * Check for mandatory locks before calling zfs_range_lock()
733 * in order to prevent a deadlock with locks set via fcntl().
735 if (MANDMODE((mode_t
)zp
->z_mode
) &&
736 (error
= chklock(vp
, FWRITE
, woff
, n
, uio
->uio_fmode
, ct
)) != 0) {
742 * Pre-fault the pages to ensure slow (eg NFS) pages
744 * Skip this if uio contains loaned arc_buf.
746 if ((uio
->uio_extflg
== UIO_XUIO
) &&
747 (((xuio_t
*)uio
)->xu_type
== UIOTYPE_ZEROCOPY
))
748 xuio
= (xuio_t
*)uio
;
750 uio_prefaultpages(MIN(n
, max_blksz
), uio
);
753 * If in append mode, set the io offset pointer to eof.
755 if (ioflag
& FAPPEND
) {
757 * Obtain an appending range lock to guarantee file append
758 * semantics. We reset the write offset once we have the lock.
760 rl
= zfs_range_lock(zp
, 0, n
, RL_APPEND
);
762 if (rl
->r_len
== UINT64_MAX
) {
764 * We overlocked the file because this write will cause
765 * the file block size to increase.
766 * Note that zp_size cannot change with this lock held.
770 uio
->uio_loffset
= woff
;
773 * Note that if the file block size will change as a result of
774 * this write, then this range lock will lock the entire file
775 * so that we can re-write the block safely.
777 rl
= zfs_range_lock(zp
, woff
, n
, RL_WRITER
);
781 zfs_range_unlock(rl
);
783 return (SET_ERROR(EFBIG
));
786 if ((woff
+ n
) > limit
|| woff
> (limit
- n
))
789 /* Will this write extend the file length? */
790 write_eof
= (woff
+ n
> zp
->z_size
);
792 end_size
= MAX(zp
->z_size
, woff
+ n
);
795 * Write the file in reasonable size chunks. Each chunk is written
796 * in a separate transaction; this keeps the intent log records small
797 * and allows us to do more fine-grained space accounting.
801 woff
= uio
->uio_loffset
;
802 if (zfs_owner_overquota(zfsvfs
, zp
, B_FALSE
) ||
803 zfs_owner_overquota(zfsvfs
, zp
, B_TRUE
)) {
805 dmu_return_arcbuf(abuf
);
806 error
= SET_ERROR(EDQUOT
);
810 if (xuio
&& abuf
== NULL
) {
811 ASSERT(i_iov
< iovcnt
);
813 abuf
= dmu_xuio_arcbuf(xuio
, i_iov
);
814 dmu_xuio_clear(xuio
, i_iov
);
815 DTRACE_PROBE3(zfs_cp_write
, int, i_iov
,
816 iovec_t
*, aiov
, arc_buf_t
*, abuf
);
817 ASSERT((aiov
->iov_base
== abuf
->b_data
) ||
818 ((char *)aiov
->iov_base
- (char *)abuf
->b_data
+
819 aiov
->iov_len
== arc_buf_size(abuf
)));
821 } else if (abuf
== NULL
&& n
>= max_blksz
&&
822 woff
>= zp
->z_size
&&
823 P2PHASE(woff
, max_blksz
) == 0 &&
824 zp
->z_blksz
== max_blksz
) {
826 * This write covers a full block. "Borrow" a buffer
827 * from the dmu so that we can fill it before we enter
828 * a transaction. This avoids the possibility of
829 * holding up the transaction if the data copy hangs
830 * up on a pagefault (e.g., from an NFS server mapping).
834 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
836 ASSERT(abuf
!= NULL
);
837 ASSERT(arc_buf_size(abuf
) == max_blksz
);
838 if (error
= uiocopy(abuf
->b_data
, max_blksz
,
839 UIO_WRITE
, uio
, &cbytes
)) {
840 dmu_return_arcbuf(abuf
);
843 ASSERT(cbytes
== max_blksz
);
847 * Start a transaction.
849 tx
= dmu_tx_create(zfsvfs
->z_os
);
850 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
851 dmu_tx_hold_write(tx
, zp
->z_id
, woff
, MIN(n
, max_blksz
));
852 zfs_sa_upgrade_txholds(tx
, zp
);
853 error
= dmu_tx_assign(tx
, TXG_WAIT
);
857 dmu_return_arcbuf(abuf
);
862 * If zfs_range_lock() over-locked we grow the blocksize
863 * and then reduce the lock range. This will only happen
864 * on the first iteration since zfs_range_reduce() will
865 * shrink down r_len to the appropriate size.
867 if (rl
->r_len
== UINT64_MAX
) {
870 if (zp
->z_blksz
> max_blksz
) {
872 * File's blocksize is already larger than the
873 * "recordsize" property. Only let it grow to
874 * the next power of 2.
876 ASSERT(!ISP2(zp
->z_blksz
));
877 new_blksz
= MIN(end_size
,
878 1 << highbit64(zp
->z_blksz
));
880 new_blksz
= MIN(end_size
, max_blksz
);
882 zfs_grow_blocksize(zp
, new_blksz
, tx
);
883 zfs_range_reduce(rl
, woff
, n
);
887 * XXX - should we really limit each write to z_max_blksz?
888 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
890 nbytes
= MIN(n
, max_blksz
- P2PHASE(woff
, max_blksz
));
893 tx_bytes
= uio
->uio_resid
;
894 error
= dmu_write_uio_dbuf(sa_get_db(zp
->z_sa_hdl
),
896 tx_bytes
-= uio
->uio_resid
;
899 ASSERT(xuio
== NULL
|| tx_bytes
== aiov
->iov_len
);
901 * If this is not a full block write, but we are
902 * extending the file past EOF and this data starts
903 * block-aligned, use assign_arcbuf(). Otherwise,
904 * write via dmu_write().
906 if (tx_bytes
< max_blksz
&& (!write_eof
||
907 aiov
->iov_base
!= abuf
->b_data
)) {
909 dmu_write(zfsvfs
->z_os
, zp
->z_id
, woff
,
910 aiov
->iov_len
, aiov
->iov_base
, tx
);
911 dmu_return_arcbuf(abuf
);
912 xuio_stat_wbuf_copied();
914 ASSERT(xuio
|| tx_bytes
== max_blksz
);
915 dmu_assign_arcbuf(sa_get_db(zp
->z_sa_hdl
),
918 ASSERT(tx_bytes
<= uio
->uio_resid
);
919 uioskip(uio
, tx_bytes
);
921 if (tx_bytes
&& vn_has_cached_data(vp
)) {
922 update_pages(vp
, woff
,
923 tx_bytes
, zfsvfs
->z_os
, zp
->z_id
);
927 * If we made no progress, we're done. If we made even
928 * partial progress, update the znode and ZIL accordingly.
931 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_SIZE(zfsvfs
),
932 (void *)&zp
->z_size
, sizeof (uint64_t), tx
);
939 * Clear Set-UID/Set-GID bits on successful write if not
940 * privileged and at least one of the excute bits is set.
942 * It would be nice to to this after all writes have
943 * been done, but that would still expose the ISUID/ISGID
944 * to another app after the partial write is committed.
946 * Note: we don't call zfs_fuid_map_id() here because
947 * user 0 is not an ephemeral uid.
949 mutex_enter(&zp
->z_acl_lock
);
950 if ((zp
->z_mode
& (S_IXUSR
| (S_IXUSR
>> 3) |
951 (S_IXUSR
>> 6))) != 0 &&
952 (zp
->z_mode
& (S_ISUID
| S_ISGID
)) != 0 &&
953 secpolicy_vnode_setid_retain(cr
,
954 (zp
->z_mode
& S_ISUID
) != 0 && zp
->z_uid
== 0) != 0) {
956 zp
->z_mode
&= ~(S_ISUID
| S_ISGID
);
957 newmode
= zp
->z_mode
;
958 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_MODE(zfsvfs
),
959 (void *)&newmode
, sizeof (uint64_t), tx
);
961 mutex_exit(&zp
->z_acl_lock
);
963 zfs_tstamp_update_setup(zp
, CONTENT_MODIFIED
, mtime
, ctime
,
967 * Update the file size (zp_size) if it has changed;
968 * account for possible concurrent updates.
970 while ((end_size
= zp
->z_size
) < uio
->uio_loffset
) {
971 (void) atomic_cas_64(&zp
->z_size
, end_size
,
976 * If we are replaying and eof is non zero then force
977 * the file size to the specified eof. Note, there's no
978 * concurrency during replay.
980 if (zfsvfs
->z_replay
&& zfsvfs
->z_replay_eof
!= 0)
981 zp
->z_size
= zfsvfs
->z_replay_eof
;
983 error
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, count
, tx
);
985 zfs_log_write(zilog
, tx
, TX_WRITE
, zp
, woff
, tx_bytes
, ioflag
);
990 ASSERT(tx_bytes
== nbytes
);
994 uio_prefaultpages(MIN(n
, max_blksz
), uio
);
997 zfs_range_unlock(rl
);
1000 * If we're in replay mode, or we made no progress, return error.
1001 * Otherwise, it's at least a partial write, so it's successful.
1003 if (zfsvfs
->z_replay
|| uio
->uio_resid
== start_resid
) {
1008 if (ioflag
& (FSYNC
| FDSYNC
) ||
1009 zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
1010 zil_commit(zilog
, zp
->z_id
);
1017 zfs_get_done(zgd_t
*zgd
, int error
)
1019 znode_t
*zp
= zgd
->zgd_private
;
1020 objset_t
*os
= zp
->z_zfsvfs
->z_os
;
1023 dmu_buf_rele(zgd
->zgd_db
, zgd
);
1025 zfs_range_unlock(zgd
->zgd_rl
);
1028 * Release the vnode asynchronously as we currently have the
1029 * txg stopped from syncing.
1031 VN_RELE_ASYNC(ZTOV(zp
), dsl_pool_vnrele_taskq(dmu_objset_pool(os
)));
1033 if (error
== 0 && zgd
->zgd_bp
)
1034 zil_add_block(zgd
->zgd_zilog
, zgd
->zgd_bp
);
1036 kmem_free(zgd
, sizeof (zgd_t
));
1040 static int zil_fault_io
= 0;
1044 * Get data to generate a TX_WRITE intent log record.
1047 zfs_get_data(void *arg
, lr_write_t
*lr
, char *buf
, zio_t
*zio
)
1049 zfsvfs_t
*zfsvfs
= arg
;
1050 objset_t
*os
= zfsvfs
->z_os
;
1052 uint64_t object
= lr
->lr_foid
;
1053 uint64_t offset
= lr
->lr_offset
;
1054 uint64_t size
= lr
->lr_length
;
1059 ASSERT(zio
!= NULL
);
1063 * Nothing to do if the file has been removed
1065 if (zfs_zget(zfsvfs
, object
, &zp
) != 0)
1066 return (SET_ERROR(ENOENT
));
1067 if (zp
->z_unlinked
) {
1069 * Release the vnode asynchronously as we currently have the
1070 * txg stopped from syncing.
1072 VN_RELE_ASYNC(ZTOV(zp
),
1073 dsl_pool_vnrele_taskq(dmu_objset_pool(os
)));
1074 return (SET_ERROR(ENOENT
));
1077 zgd
= (zgd_t
*)kmem_zalloc(sizeof (zgd_t
), KM_SLEEP
);
1078 zgd
->zgd_zilog
= zfsvfs
->z_log
;
1079 zgd
->zgd_private
= zp
;
1082 * Write records come in two flavors: immediate and indirect.
1083 * For small writes it's cheaper to store the data with the
1084 * log record (immediate); for large writes it's cheaper to
1085 * sync the data and get a pointer to it (indirect) so that
1086 * we don't have to write the data twice.
1088 if (buf
!= NULL
) { /* immediate write */
1089 zgd
->zgd_rl
= zfs_range_lock(zp
, offset
, size
, RL_READER
);
1090 /* test for truncation needs to be done while range locked */
1091 if (offset
>= zp
->z_size
) {
1092 error
= SET_ERROR(ENOENT
);
1094 error
= dmu_read(os
, object
, offset
, size
, buf
,
1095 DMU_READ_NO_PREFETCH
);
1097 ASSERT(error
== 0 || error
== ENOENT
);
1098 } else { /* indirect write */
1100 * Have to lock the whole block to ensure when it's
1101 * written out and it's checksum is being calculated
1102 * that no one can change the data. We need to re-check
1103 * blocksize after we get the lock in case it's changed!
1108 blkoff
= ISP2(size
) ? P2PHASE(offset
, size
) : offset
;
1110 zgd
->zgd_rl
= zfs_range_lock(zp
, offset
, size
,
1112 if (zp
->z_blksz
== size
)
1115 zfs_range_unlock(zgd
->zgd_rl
);
1117 /* test for truncation needs to be done while range locked */
1118 if (lr
->lr_offset
>= zp
->z_size
)
1119 error
= SET_ERROR(ENOENT
);
1122 error
= SET_ERROR(EIO
);
1127 error
= dmu_buf_hold(os
, object
, offset
, zgd
, &db
,
1128 DMU_READ_NO_PREFETCH
);
1131 blkptr_t
*bp
= &lr
->lr_blkptr
;
1136 ASSERT(db
->db_offset
== offset
);
1137 ASSERT(db
->db_size
== size
);
1139 error
= dmu_sync(zio
, lr
->lr_common
.lrc_txg
,
1141 ASSERT(error
|| lr
->lr_length
<= size
);
1144 * On success, we need to wait for the write I/O
1145 * initiated by dmu_sync() to complete before we can
1146 * release this dbuf. We will finish everything up
1147 * in the zfs_get_done() callback.
1152 if (error
== EALREADY
) {
1153 lr
->lr_common
.lrc_txtype
= TX_WRITE2
;
1159 zfs_get_done(zgd
, error
);
1166 zfs_access(vnode_t
*vp
, int mode
, int flag
, cred_t
*cr
,
1167 caller_context_t
*ct
)
1169 znode_t
*zp
= VTOZ(vp
);
1170 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
1176 if (flag
& V_ACE_MASK
)
1177 error
= zfs_zaccess(zp
, mode
, flag
, B_FALSE
, cr
);
1179 error
= zfs_zaccess_rwx(zp
, mode
, flag
, cr
);
1186 * If vnode is for a device return a specfs vnode instead.
1189 specvp_check(vnode_t
**vpp
, cred_t
*cr
)
1193 if (IS_DEVVP(*vpp
)) {
1196 svp
= specvp(*vpp
, (*vpp
)->v_rdev
, (*vpp
)->v_type
, cr
);
1199 error
= SET_ERROR(ENOSYS
);
1207 * Lookup an entry in a directory, or an extended attribute directory.
1208 * If it exists, return a held vnode reference for it.
1210 * IN: dvp - vnode of directory to search.
1211 * nm - name of entry to lookup.
1212 * pnp - full pathname to lookup [UNUSED].
1213 * flags - LOOKUP_XATTR set if looking for an attribute.
1214 * rdir - root directory vnode [UNUSED].
1215 * cr - credentials of caller.
1216 * ct - caller context
1217 * direntflags - directory lookup flags
1218 * realpnp - returned pathname.
1220 * OUT: vpp - vnode of located entry, NULL if not found.
1222 * RETURN: 0 on success, error code on failure.
1229 zfs_lookup(vnode_t
*dvp
, char *nm
, vnode_t
**vpp
, struct pathname
*pnp
,
1230 int flags
, vnode_t
*rdir
, cred_t
*cr
, caller_context_t
*ct
,
1231 int *direntflags
, pathname_t
*realpnp
)
1233 znode_t
*zdp
= VTOZ(dvp
);
1234 zfsvfs_t
*zfsvfs
= zdp
->z_zfsvfs
;
1238 * Fast path lookup, however we must skip DNLC lookup
1239 * for case folding or normalizing lookups because the
1240 * DNLC code only stores the passed in name. This means
1241 * creating 'a' and removing 'A' on a case insensitive
1242 * file system would work, but DNLC still thinks 'a'
1243 * exists and won't let you create it again on the next
1244 * pass through fast path.
1246 if (!(flags
& (LOOKUP_XATTR
| FIGNORECASE
))) {
1248 if (dvp
->v_type
!= VDIR
) {
1249 return (SET_ERROR(ENOTDIR
));
1250 } else if (zdp
->z_sa_hdl
== NULL
) {
1251 return (SET_ERROR(EIO
));
1254 if (nm
[0] == 0 || (nm
[0] == '.' && nm
[1] == '\0')) {
1255 error
= zfs_fastaccesschk_execute(zdp
, cr
);
1262 } else if (!zdp
->z_zfsvfs
->z_norm
&&
1263 (zdp
->z_zfsvfs
->z_case
== ZFS_CASE_SENSITIVE
)) {
1265 vnode_t
*tvp
= dnlc_lookup(dvp
, nm
);
1268 error
= zfs_fastaccesschk_execute(zdp
, cr
);
1273 if (tvp
== DNLC_NO_VNODE
) {
1275 return (SET_ERROR(ENOENT
));
1278 return (specvp_check(vpp
, cr
));
1284 DTRACE_PROBE2(zfs__fastpath__lookup__miss
, vnode_t
*, dvp
, char *, nm
);
1291 if (flags
& LOOKUP_XATTR
) {
1293 * If the xattr property is off, refuse the lookup request.
1295 if (!(zfsvfs
->z_vfs
->vfs_flag
& VFS_XATTR
)) {
1297 return (SET_ERROR(EINVAL
));
1301 * We don't allow recursive attributes..
1302 * Maybe someday we will.
1304 if (zdp
->z_pflags
& ZFS_XATTR
) {
1306 return (SET_ERROR(EINVAL
));
1309 if (error
= zfs_get_xattrdir(VTOZ(dvp
), vpp
, cr
, flags
)) {
1315 * Do we have permission to get into attribute directory?
1318 if (error
= zfs_zaccess(VTOZ(*vpp
), ACE_EXECUTE
, 0,
1328 if (dvp
->v_type
!= VDIR
) {
1330 return (SET_ERROR(ENOTDIR
));
1334 * Check accessibility of directory.
1337 if (error
= zfs_zaccess(zdp
, ACE_EXECUTE
, 0, B_FALSE
, cr
)) {
1342 if (zfsvfs
->z_utf8
&& u8_validate(nm
, strlen(nm
),
1343 NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
1345 return (SET_ERROR(EILSEQ
));
1348 error
= zfs_dirlook(zdp
, nm
, vpp
, flags
, direntflags
, realpnp
);
1350 error
= specvp_check(vpp
, cr
);
1357 * Attempt to create a new entry in a directory. If the entry
1358 * already exists, truncate the file if permissible, else return
1359 * an error. Return the vp of the created or trunc'd file.
1361 * IN: dvp - vnode of directory to put new file entry in.
1362 * name - name of new file entry.
1363 * vap - attributes of new file.
1364 * excl - flag indicating exclusive or non-exclusive mode.
1365 * mode - mode to open file with.
1366 * cr - credentials of caller.
1367 * flag - large file flag [UNUSED].
1368 * ct - caller context
1369 * vsecp - ACL to be set
1371 * OUT: vpp - vnode of created or trunc'd entry.
1373 * RETURN: 0 on success, error code on failure.
1376 * dvp - ctime|mtime updated if new entry created
1377 * vp - ctime|mtime always, atime if new
1382 zfs_create(vnode_t
*dvp
, char *name
, vattr_t
*vap
, vcexcl_t excl
,
1383 int mode
, vnode_t
**vpp
, cred_t
*cr
, int flag
, caller_context_t
*ct
,
1386 znode_t
*zp
, *dzp
= VTOZ(dvp
);
1387 zfsvfs_t
*zfsvfs
= dzp
->z_zfsvfs
;
1395 gid_t gid
= crgetgid(cr
);
1396 zfs_acl_ids_t acl_ids
;
1397 boolean_t fuid_dirtied
;
1398 boolean_t have_acl
= B_FALSE
;
1399 boolean_t waited
= B_FALSE
;
1402 * If we have an ephemeral id, ACL, or XVATTR then
1403 * make sure file system is at proper version
1406 ksid
= crgetsid(cr
, KSID_OWNER
);
1408 uid
= ksid_getid(ksid
);
1412 if (zfsvfs
->z_use_fuids
== B_FALSE
&&
1413 (vsecp
|| (vap
->va_mask
& AT_XVATTR
) ||
1414 IS_EPHEMERAL(uid
) || IS_EPHEMERAL(gid
)))
1415 return (SET_ERROR(EINVAL
));
1420 zilog
= zfsvfs
->z_log
;
1422 if (zfsvfs
->z_utf8
&& u8_validate(name
, strlen(name
),
1423 NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
1425 return (SET_ERROR(EILSEQ
));
1428 if (vap
->va_mask
& AT_XVATTR
) {
1429 if ((error
= secpolicy_xvattr((xvattr_t
*)vap
,
1430 crgetuid(cr
), cr
, vap
->va_type
)) != 0) {
1438 if ((vap
->va_mode
& VSVTX
) && secpolicy_vnode_stky_modify(cr
))
1439 vap
->va_mode
&= ~VSVTX
;
1441 if (*name
== '\0') {
1443 * Null component name refers to the directory itself.
1450 /* possible VN_HOLD(zp) */
1453 if (flag
& FIGNORECASE
)
1456 error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
,
1460 zfs_acl_ids_free(&acl_ids
);
1461 if (strcmp(name
, "..") == 0)
1462 error
= SET_ERROR(EISDIR
);
1472 * Create a new file object and update the directory
1475 if (error
= zfs_zaccess(dzp
, ACE_ADD_FILE
, 0, B_FALSE
, cr
)) {
1477 zfs_acl_ids_free(&acl_ids
);
1482 * We only support the creation of regular files in
1483 * extended attribute directories.
1486 if ((dzp
->z_pflags
& ZFS_XATTR
) &&
1487 (vap
->va_type
!= VREG
)) {
1489 zfs_acl_ids_free(&acl_ids
);
1490 error
= SET_ERROR(EINVAL
);
1494 if (!have_acl
&& (error
= zfs_acl_ids_create(dzp
, 0, vap
,
1495 cr
, vsecp
, &acl_ids
)) != 0)
1499 if (zfs_acl_ids_overquota(zfsvfs
, &acl_ids
)) {
1500 zfs_acl_ids_free(&acl_ids
);
1501 error
= SET_ERROR(EDQUOT
);
1505 tx
= dmu_tx_create(os
);
1507 dmu_tx_hold_sa_create(tx
, acl_ids
.z_aclp
->z_acl_bytes
+
1508 ZFS_SA_BASE_ATTR_SIZE
);
1510 fuid_dirtied
= zfsvfs
->z_fuid_dirty
;
1512 zfs_fuid_txhold(zfsvfs
, tx
);
1513 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, name
);
1514 dmu_tx_hold_sa(tx
, dzp
->z_sa_hdl
, B_FALSE
);
1515 if (!zfsvfs
->z_use_sa
&&
1516 acl_ids
.z_aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
1517 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
,
1518 0, acl_ids
.z_aclp
->z_acl_bytes
);
1520 error
= dmu_tx_assign(tx
, waited
? TXG_WAITED
: TXG_NOWAIT
);
1522 zfs_dirent_unlock(dl
);
1523 if (error
== ERESTART
) {
1529 zfs_acl_ids_free(&acl_ids
);
1534 zfs_mknode(dzp
, vap
, tx
, cr
, 0, &zp
, &acl_ids
);
1537 zfs_fuid_sync(zfsvfs
, tx
);
1539 (void) zfs_link_create(dl
, zp
, tx
, ZNEW
);
1540 txtype
= zfs_log_create_txtype(Z_FILE
, vsecp
, vap
);
1541 if (flag
& FIGNORECASE
)
1543 zfs_log_create(zilog
, tx
, txtype
, dzp
, zp
, name
,
1544 vsecp
, acl_ids
.z_fuidp
, vap
);
1545 zfs_acl_ids_free(&acl_ids
);
1548 int aflags
= (flag
& FAPPEND
) ? V_APPEND
: 0;
1551 zfs_acl_ids_free(&acl_ids
);
1555 * A directory entry already exists for this name.
1558 * Can't truncate an existing file if in exclusive mode.
1561 error
= SET_ERROR(EEXIST
);
1565 * Can't open a directory for writing.
1567 if ((ZTOV(zp
)->v_type
== VDIR
) && (mode
& S_IWRITE
)) {
1568 error
= SET_ERROR(EISDIR
);
1572 * Verify requested access to file.
1574 if (mode
&& (error
= zfs_zaccess_rwx(zp
, mode
, aflags
, cr
))) {
1578 mutex_enter(&dzp
->z_lock
);
1580 mutex_exit(&dzp
->z_lock
);
1583 * Truncate regular files if requested.
1585 if ((ZTOV(zp
)->v_type
== VREG
) &&
1586 (vap
->va_mask
& AT_SIZE
) && (vap
->va_size
== 0)) {
1587 /* we can't hold any locks when calling zfs_freesp() */
1588 zfs_dirent_unlock(dl
);
1590 error
= zfs_freesp(zp
, 0, 0, mode
, TRUE
);
1592 vnevent_create(ZTOV(zp
), ct
);
1599 zfs_dirent_unlock(dl
);
1606 error
= specvp_check(vpp
, cr
);
1609 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
1610 zil_commit(zilog
, 0);
1617 * Remove an entry from a directory.
1619 * IN: dvp - vnode of directory to remove entry from.
1620 * name - name of entry to remove.
1621 * cr - credentials of caller.
1622 * ct - caller context
1623 * flags - case flags
1625 * RETURN: 0 on success, error code on failure.
1629 * vp - ctime (if nlink > 0)
1632 uint64_t null_xattr
= 0;
1636 zfs_remove(vnode_t
*dvp
, char *name
, cred_t
*cr
, caller_context_t
*ct
,
1639 znode_t
*zp
, *dzp
= VTOZ(dvp
);
1642 zfsvfs_t
*zfsvfs
= dzp
->z_zfsvfs
;
1644 uint64_t acl_obj
, xattr_obj
;
1645 uint64_t xattr_obj_unlinked
= 0;
1649 boolean_t may_delete_now
, delete_now
= FALSE
;
1650 boolean_t unlinked
, toobig
= FALSE
;
1652 pathname_t
*realnmp
= NULL
;
1656 boolean_t waited
= B_FALSE
;
1660 zilog
= zfsvfs
->z_log
;
1662 if (flags
& FIGNORECASE
) {
1672 * Attempt to lock directory; fail if entry doesn't exist.
1674 if (error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
,
1684 if (error
= zfs_zaccess_delete(dzp
, zp
, cr
)) {
1689 * Need to use rmdir for removing directories.
1691 if (vp
->v_type
== VDIR
) {
1692 error
= SET_ERROR(EPERM
);
1696 vnevent_remove(vp
, dvp
, name
, ct
);
1699 dnlc_remove(dvp
, realnmp
->pn_buf
);
1701 dnlc_remove(dvp
, name
);
1703 mutex_enter(&vp
->v_lock
);
1704 may_delete_now
= vp
->v_count
== 1 && !vn_has_cached_data(vp
);
1705 mutex_exit(&vp
->v_lock
);
1708 * We may delete the znode now, or we may put it in the unlinked set;
1709 * it depends on whether we're the last link, and on whether there are
1710 * other holds on the vnode. So we dmu_tx_hold() the right things to
1711 * allow for either case.
1714 tx
= dmu_tx_create(zfsvfs
->z_os
);
1715 dmu_tx_hold_zap(tx
, dzp
->z_id
, FALSE
, name
);
1716 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
1717 zfs_sa_upgrade_txholds(tx
, zp
);
1718 zfs_sa_upgrade_txholds(tx
, dzp
);
1719 if (may_delete_now
) {
1721 zp
->z_size
> zp
->z_blksz
* DMU_MAX_DELETEBLKCNT
;
1722 /* if the file is too big, only hold_free a token amount */
1723 dmu_tx_hold_free(tx
, zp
->z_id
, 0,
1724 (toobig
? DMU_MAX_ACCESS
: DMU_OBJECT_END
));
1727 /* are there any extended attributes? */
1728 error
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_XATTR(zfsvfs
),
1729 &xattr_obj
, sizeof (xattr_obj
));
1730 if (error
== 0 && xattr_obj
) {
1731 error
= zfs_zget(zfsvfs
, xattr_obj
, &xzp
);
1733 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_TRUE
);
1734 dmu_tx_hold_sa(tx
, xzp
->z_sa_hdl
, B_FALSE
);
1737 mutex_enter(&zp
->z_lock
);
1738 if ((acl_obj
= zfs_external_acl(zp
)) != 0 && may_delete_now
)
1739 dmu_tx_hold_free(tx
, acl_obj
, 0, DMU_OBJECT_END
);
1740 mutex_exit(&zp
->z_lock
);
1742 /* charge as an update -- would be nice not to charge at all */
1743 dmu_tx_hold_zap(tx
, zfsvfs
->z_unlinkedobj
, FALSE
, NULL
);
1746 * Mark this transaction as typically resulting in a net free of space
1748 dmu_tx_mark_netfree(tx
);
1750 error
= dmu_tx_assign(tx
, waited
? TXG_WAITED
: TXG_NOWAIT
);
1752 zfs_dirent_unlock(dl
);
1756 if (error
== ERESTART
) {
1770 * Remove the directory entry.
1772 error
= zfs_link_destroy(dl
, zp
, tx
, zflg
, &unlinked
);
1781 * Hold z_lock so that we can make sure that the ACL obj
1782 * hasn't changed. Could have been deleted due to
1785 mutex_enter(&zp
->z_lock
);
1786 mutex_enter(&vp
->v_lock
);
1787 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_XATTR(zfsvfs
),
1788 &xattr_obj_unlinked
, sizeof (xattr_obj_unlinked
));
1789 delete_now
= may_delete_now
&& !toobig
&&
1790 vp
->v_count
== 1 && !vn_has_cached_data(vp
) &&
1791 xattr_obj
== xattr_obj_unlinked
&& zfs_external_acl(zp
) ==
1793 mutex_exit(&vp
->v_lock
);
1797 if (xattr_obj_unlinked
) {
1798 ASSERT3U(xzp
->z_links
, ==, 2);
1799 mutex_enter(&xzp
->z_lock
);
1800 xzp
->z_unlinked
= 1;
1802 error
= sa_update(xzp
->z_sa_hdl
, SA_ZPL_LINKS(zfsvfs
),
1803 &xzp
->z_links
, sizeof (xzp
->z_links
), tx
);
1804 ASSERT3U(error
, ==, 0);
1805 mutex_exit(&xzp
->z_lock
);
1806 zfs_unlinked_add(xzp
, tx
);
1809 error
= sa_remove(zp
->z_sa_hdl
,
1810 SA_ZPL_XATTR(zfsvfs
), tx
);
1812 error
= sa_update(zp
->z_sa_hdl
,
1813 SA_ZPL_XATTR(zfsvfs
), &null_xattr
,
1814 sizeof (uint64_t), tx
);
1817 mutex_enter(&vp
->v_lock
);
1819 ASSERT0(vp
->v_count
);
1820 mutex_exit(&vp
->v_lock
);
1821 mutex_exit(&zp
->z_lock
);
1822 zfs_znode_delete(zp
, tx
);
1823 } else if (unlinked
) {
1824 mutex_exit(&zp
->z_lock
);
1825 zfs_unlinked_add(zp
, tx
);
1829 if (flags
& FIGNORECASE
)
1831 zfs_log_remove(zilog
, tx
, txtype
, dzp
, name
, obj
);
1838 zfs_dirent_unlock(dl
);
1845 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
1846 zil_commit(zilog
, 0);
1853 * Create a new directory and insert it into dvp using the name
1854 * provided. Return a pointer to the inserted directory.
1856 * IN: dvp - vnode of directory to add subdir to.
1857 * dirname - name of new directory.
1858 * vap - attributes of new directory.
1859 * cr - credentials of caller.
1860 * ct - caller context
1861 * flags - case flags
1862 * vsecp - ACL to be set
1864 * OUT: vpp - vnode of created directory.
1866 * RETURN: 0 on success, error code on failure.
1869 * dvp - ctime|mtime updated
1870 * vp - ctime|mtime|atime updated
1874 zfs_mkdir(vnode_t
*dvp
, char *dirname
, vattr_t
*vap
, vnode_t
**vpp
, cred_t
*cr
,
1875 caller_context_t
*ct
, int flags
, vsecattr_t
*vsecp
)
1877 znode_t
*zp
, *dzp
= VTOZ(dvp
);
1878 zfsvfs_t
*zfsvfs
= dzp
->z_zfsvfs
;
1887 gid_t gid
= crgetgid(cr
);
1888 zfs_acl_ids_t acl_ids
;
1889 boolean_t fuid_dirtied
;
1890 boolean_t waited
= B_FALSE
;
1892 ASSERT(vap
->va_type
== VDIR
);
1895 * If we have an ephemeral id, ACL, or XVATTR then
1896 * make sure file system is at proper version
1899 ksid
= crgetsid(cr
, KSID_OWNER
);
1901 uid
= ksid_getid(ksid
);
1904 if (zfsvfs
->z_use_fuids
== B_FALSE
&&
1905 (vsecp
|| (vap
->va_mask
& AT_XVATTR
) ||
1906 IS_EPHEMERAL(uid
) || IS_EPHEMERAL(gid
)))
1907 return (SET_ERROR(EINVAL
));
1911 zilog
= zfsvfs
->z_log
;
1913 if (dzp
->z_pflags
& ZFS_XATTR
) {
1915 return (SET_ERROR(EINVAL
));
1918 if (zfsvfs
->z_utf8
&& u8_validate(dirname
,
1919 strlen(dirname
), NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
1921 return (SET_ERROR(EILSEQ
));
1923 if (flags
& FIGNORECASE
)
1926 if (vap
->va_mask
& AT_XVATTR
) {
1927 if ((error
= secpolicy_xvattr((xvattr_t
*)vap
,
1928 crgetuid(cr
), cr
, vap
->va_type
)) != 0) {
1934 if ((error
= zfs_acl_ids_create(dzp
, 0, vap
, cr
,
1935 vsecp
, &acl_ids
)) != 0) {
1940 * First make sure the new directory doesn't exist.
1942 * Existence is checked first to make sure we don't return
1943 * EACCES instead of EEXIST which can cause some applications
1949 if (error
= zfs_dirent_lock(&dl
, dzp
, dirname
, &zp
, zf
,
1951 zfs_acl_ids_free(&acl_ids
);
1956 if (error
= zfs_zaccess(dzp
, ACE_ADD_SUBDIRECTORY
, 0, B_FALSE
, cr
)) {
1957 zfs_acl_ids_free(&acl_ids
);
1958 zfs_dirent_unlock(dl
);
1963 if (zfs_acl_ids_overquota(zfsvfs
, &acl_ids
)) {
1964 zfs_acl_ids_free(&acl_ids
);
1965 zfs_dirent_unlock(dl
);
1967 return (SET_ERROR(EDQUOT
));
1971 * Add a new entry to the directory.
1973 tx
= dmu_tx_create(zfsvfs
->z_os
);
1974 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, dirname
);
1975 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, FALSE
, NULL
);
1976 fuid_dirtied
= zfsvfs
->z_fuid_dirty
;
1978 zfs_fuid_txhold(zfsvfs
, tx
);
1979 if (!zfsvfs
->z_use_sa
&& acl_ids
.z_aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
1980 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
, 0,
1981 acl_ids
.z_aclp
->z_acl_bytes
);
1984 dmu_tx_hold_sa_create(tx
, acl_ids
.z_aclp
->z_acl_bytes
+
1985 ZFS_SA_BASE_ATTR_SIZE
);
1987 error
= dmu_tx_assign(tx
, waited
? TXG_WAITED
: TXG_NOWAIT
);
1989 zfs_dirent_unlock(dl
);
1990 if (error
== ERESTART
) {
1996 zfs_acl_ids_free(&acl_ids
);
2005 zfs_mknode(dzp
, vap
, tx
, cr
, 0, &zp
, &acl_ids
);
2008 zfs_fuid_sync(zfsvfs
, tx
);
2011 * Now put new name in parent dir.
2013 (void) zfs_link_create(dl
, zp
, tx
, ZNEW
);
2017 txtype
= zfs_log_create_txtype(Z_DIR
, vsecp
, vap
);
2018 if (flags
& FIGNORECASE
)
2020 zfs_log_create(zilog
, tx
, txtype
, dzp
, zp
, dirname
, vsecp
,
2021 acl_ids
.z_fuidp
, vap
);
2023 zfs_acl_ids_free(&acl_ids
);
2027 zfs_dirent_unlock(dl
);
2029 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
2030 zil_commit(zilog
, 0);
2037 * Remove a directory subdir entry. If the current working
2038 * directory is the same as the subdir to be removed, the
2041 * IN: dvp - vnode of directory to remove from.
2042 * name - name of directory to be removed.
2043 * cwd - vnode of current working directory.
2044 * cr - credentials of caller.
2045 * ct - caller context
2046 * flags - case flags
2048 * RETURN: 0 on success, error code on failure.
2051 * dvp - ctime|mtime updated
2055 zfs_rmdir(vnode_t
*dvp
, char *name
, vnode_t
*cwd
, cred_t
*cr
,
2056 caller_context_t
*ct
, int flags
)
2058 znode_t
*dzp
= VTOZ(dvp
);
2061 zfsvfs_t
*zfsvfs
= dzp
->z_zfsvfs
;
2067 boolean_t waited
= B_FALSE
;
2071 zilog
= zfsvfs
->z_log
;
2073 if (flags
& FIGNORECASE
)
2079 * Attempt to lock directory; fail if entry doesn't exist.
2081 if (error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
,
2089 if (error
= zfs_zaccess_delete(dzp
, zp
, cr
)) {
2093 if (vp
->v_type
!= VDIR
) {
2094 error
= SET_ERROR(ENOTDIR
);
2099 error
= SET_ERROR(EINVAL
);
2103 vnevent_rmdir(vp
, dvp
, name
, ct
);
2106 * Grab a lock on the directory to make sure that noone is
2107 * trying to add (or lookup) entries while we are removing it.
2109 rw_enter(&zp
->z_name_lock
, RW_WRITER
);
2112 * Grab a lock on the parent pointer to make sure we play well
2113 * with the treewalk and directory rename code.
2115 rw_enter(&zp
->z_parent_lock
, RW_WRITER
);
2117 tx
= dmu_tx_create(zfsvfs
->z_os
);
2118 dmu_tx_hold_zap(tx
, dzp
->z_id
, FALSE
, name
);
2119 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
2120 dmu_tx_hold_zap(tx
, zfsvfs
->z_unlinkedobj
, FALSE
, NULL
);
2121 zfs_sa_upgrade_txholds(tx
, zp
);
2122 zfs_sa_upgrade_txholds(tx
, dzp
);
2123 dmu_tx_mark_netfree(tx
);
2124 error
= dmu_tx_assign(tx
, waited
? TXG_WAITED
: TXG_NOWAIT
);
2126 rw_exit(&zp
->z_parent_lock
);
2127 rw_exit(&zp
->z_name_lock
);
2128 zfs_dirent_unlock(dl
);
2130 if (error
== ERESTART
) {
2141 error
= zfs_link_destroy(dl
, zp
, tx
, zflg
, NULL
);
2144 uint64_t txtype
= TX_RMDIR
;
2145 if (flags
& FIGNORECASE
)
2147 zfs_log_remove(zilog
, tx
, txtype
, dzp
, name
, ZFS_NO_OBJECT
);
2152 rw_exit(&zp
->z_parent_lock
);
2153 rw_exit(&zp
->z_name_lock
);
2155 zfs_dirent_unlock(dl
);
2159 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
2160 zil_commit(zilog
, 0);
2167 * Read as many directory entries as will fit into the provided
2168 * buffer from the given directory cursor position (specified in
2169 * the uio structure).
2171 * IN: vp - vnode of directory to read.
2172 * uio - structure supplying read location, range info,
2173 * and return buffer.
2174 * cr - credentials of caller.
2175 * ct - caller context
2176 * flags - case flags
2178 * OUT: uio - updated offset and range, buffer filled.
2179 * eofp - set to true if end-of-file detected.
2181 * RETURN: 0 on success, error code on failure.
2184 * vp - atime updated
2186 * Note that the low 4 bits of the cookie returned by zap is always zero.
2187 * This allows us to use the low range for "special" directory entries:
2188 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
2189 * we use the offset 2 for the '.zfs' directory.
2193 zfs_readdir(vnode_t
*vp
, uio_t
*uio
, cred_t
*cr
, int *eofp
,
2194 caller_context_t
*ct
, int flags
)
2196 znode_t
*zp
= VTOZ(vp
);
2200 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
2205 zap_attribute_t zap
;
2206 uint_t bytes_wanted
;
2207 uint64_t offset
; /* must be unsigned; checks for < 1 */
2213 boolean_t check_sysattrs
;
2218 if ((error
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_PARENT(zfsvfs
),
2219 &parent
, sizeof (parent
))) != 0) {
2225 * If we are not given an eof variable,
2232 * Check for valid iov_len.
2234 if (uio
->uio_iov
->iov_len
<= 0) {
2236 return (SET_ERROR(EINVAL
));
2240 * Quit if directory has been removed (posix)
2242 if ((*eofp
= zp
->z_unlinked
) != 0) {
2249 offset
= uio
->uio_loffset
;
2250 prefetch
= zp
->z_zn_prefetch
;
2253 * Initialize the iterator cursor.
2257 * Start iteration from the beginning of the directory.
2259 zap_cursor_init(&zc
, os
, zp
->z_id
);
2262 * The offset is a serialized cursor.
2264 zap_cursor_init_serialized(&zc
, os
, zp
->z_id
, offset
);
2268 * Get space to change directory entries into fs independent format.
2270 iovp
= uio
->uio_iov
;
2271 bytes_wanted
= iovp
->iov_len
;
2272 if (uio
->uio_segflg
!= UIO_SYSSPACE
|| uio
->uio_iovcnt
!= 1) {
2273 bufsize
= bytes_wanted
;
2274 outbuf
= kmem_alloc(bufsize
, KM_SLEEP
);
2275 odp
= (struct dirent64
*)outbuf
;
2277 bufsize
= bytes_wanted
;
2279 odp
= (struct dirent64
*)iovp
->iov_base
;
2281 eodp
= (struct edirent
*)odp
;
2284 * If this VFS supports the system attribute view interface; and
2285 * we're looking at an extended attribute directory; and we care
2286 * about normalization conflicts on this vfs; then we must check
2287 * for normalization conflicts with the sysattr name space.
2289 check_sysattrs
= vfs_has_feature(vp
->v_vfsp
, VFSFT_SYSATTR_VIEWS
) &&
2290 (vp
->v_flag
& V_XATTRDIR
) && zfsvfs
->z_norm
&&
2291 (flags
& V_RDDIR_ENTFLAGS
);
2294 * Transform to file-system independent format
2297 while (outcount
< bytes_wanted
) {
2300 off64_t
*next
= NULL
;
2303 * Special case `.', `..', and `.zfs'.
2306 (void) strcpy(zap
.za_name
, ".");
2307 zap
.za_normalization_conflict
= 0;
2309 } else if (offset
== 1) {
2310 (void) strcpy(zap
.za_name
, "..");
2311 zap
.za_normalization_conflict
= 0;
2313 } else if (offset
== 2 && zfs_show_ctldir(zp
)) {
2314 (void) strcpy(zap
.za_name
, ZFS_CTLDIR_NAME
);
2315 zap
.za_normalization_conflict
= 0;
2316 objnum
= ZFSCTL_INO_ROOT
;
2321 if (error
= zap_cursor_retrieve(&zc
, &zap
)) {
2322 if ((*eofp
= (error
== ENOENT
)) != 0)
2328 if (zap
.za_integer_length
!= 8 ||
2329 zap
.za_num_integers
!= 1) {
2330 cmn_err(CE_WARN
, "zap_readdir: bad directory "
2331 "entry, obj = %lld, offset = %lld\n",
2332 (u_longlong_t
)zp
->z_id
,
2333 (u_longlong_t
)offset
);
2334 error
= SET_ERROR(ENXIO
);
2338 objnum
= ZFS_DIRENT_OBJ(zap
.za_first_integer
);
2340 * MacOS X can extract the object type here such as:
2341 * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer);
2344 if (check_sysattrs
&& !zap
.za_normalization_conflict
) {
2345 zap
.za_normalization_conflict
=
2346 xattr_sysattr_casechk(zap
.za_name
);
2350 if (flags
& V_RDDIR_ACCFILTER
) {
2352 * If we have no access at all, don't include
2353 * this entry in the returned information
2356 if (zfs_zget(zp
->z_zfsvfs
, objnum
, &ezp
) != 0)
2358 if (!zfs_has_access(ezp
, cr
)) {
2365 if (flags
& V_RDDIR_ENTFLAGS
)
2366 reclen
= EDIRENT_RECLEN(strlen(zap
.za_name
));
2368 reclen
= DIRENT64_RECLEN(strlen(zap
.za_name
));
2371 * Will this entry fit in the buffer?
2373 if (outcount
+ reclen
> bufsize
) {
2375 * Did we manage to fit anything in the buffer?
2378 error
= SET_ERROR(EINVAL
);
2383 if (flags
& V_RDDIR_ENTFLAGS
) {
2385 * Add extended flag entry:
2387 eodp
->ed_ino
= objnum
;
2388 eodp
->ed_reclen
= reclen
;
2389 /* NOTE: ed_off is the offset for the *next* entry */
2390 next
= &(eodp
->ed_off
);
2391 eodp
->ed_eflags
= zap
.za_normalization_conflict
?
2392 ED_CASE_CONFLICT
: 0;
2393 (void) strncpy(eodp
->ed_name
, zap
.za_name
,
2394 EDIRENT_NAMELEN(reclen
));
2395 eodp
= (edirent_t
*)((intptr_t)eodp
+ reclen
);
2400 odp
->d_ino
= objnum
;
2401 odp
->d_reclen
= reclen
;
2402 /* NOTE: d_off is the offset for the *next* entry */
2403 next
= &(odp
->d_off
);
2404 (void) strncpy(odp
->d_name
, zap
.za_name
,
2405 DIRENT64_NAMELEN(reclen
));
2406 odp
= (dirent64_t
*)((intptr_t)odp
+ reclen
);
2410 ASSERT(outcount
<= bufsize
);
2412 /* Prefetch znode */
2414 dmu_prefetch(os
, objnum
, 0, 0, 0,
2415 ZIO_PRIORITY_SYNC_READ
);
2419 * Move to the next entry, fill in the previous offset.
2421 if (offset
> 2 || (offset
== 2 && !zfs_show_ctldir(zp
))) {
2422 zap_cursor_advance(&zc
);
2423 offset
= zap_cursor_serialize(&zc
);
2430 zp
->z_zn_prefetch
= B_FALSE
; /* a lookup will re-enable pre-fetching */
2432 if (uio
->uio_segflg
== UIO_SYSSPACE
&& uio
->uio_iovcnt
== 1) {
2433 iovp
->iov_base
+= outcount
;
2434 iovp
->iov_len
-= outcount
;
2435 uio
->uio_resid
-= outcount
;
2436 } else if (error
= uiomove(outbuf
, (long)outcount
, UIO_READ
, uio
)) {
2438 * Reset the pointer.
2440 offset
= uio
->uio_loffset
;
2444 zap_cursor_fini(&zc
);
2445 if (uio
->uio_segflg
!= UIO_SYSSPACE
|| uio
->uio_iovcnt
!= 1)
2446 kmem_free(outbuf
, bufsize
);
2448 if (error
== ENOENT
)
2451 ZFS_ACCESSTIME_STAMP(zfsvfs
, zp
);
2453 uio
->uio_loffset
= offset
;
2458 ulong_t zfs_fsync_sync_cnt
= 4;
2461 zfs_fsync(vnode_t
*vp
, int syncflag
, cred_t
*cr
, caller_context_t
*ct
)
2463 znode_t
*zp
= VTOZ(vp
);
2464 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
2467 * Regardless of whether this is required for standards conformance,
2468 * this is the logical behavior when fsync() is called on a file with
2469 * dirty pages. We use B_ASYNC since the ZIL transactions are already
2470 * going to be pushed out as part of the zil_commit().
2472 if (vn_has_cached_data(vp
) && !(syncflag
& FNODSYNC
) &&
2473 (vp
->v_type
== VREG
) && !(IS_SWAPVP(vp
)))
2474 (void) fop_putpage(vp
, 0, (size_t)0, B_ASYNC
, cr
, ct
);
2476 (void) tsd_set(zfs_fsyncer_key
, (void *)zfs_fsync_sync_cnt
);
2478 if (zfsvfs
->z_os
->os_sync
!= ZFS_SYNC_DISABLED
) {
2481 zil_commit(zfsvfs
->z_log
, zp
->z_id
);
2489 * Get the requested file attributes and place them in the provided
2492 * IN: vp - vnode of file.
2493 * vap - va_mask identifies requested attributes.
2494 * If AT_XVATTR set, then optional attrs are requested
2495 * flags - ATTR_NOACLCHECK (CIFS server context)
2496 * cr - credentials of caller.
2497 * ct - caller context
2499 * OUT: vap - attribute values.
2501 * RETURN: 0 (always succeeds).
2505 zfs_getattr(vnode_t
*vp
, vattr_t
*vap
, int flags
, cred_t
*cr
,
2506 caller_context_t
*ct
)
2508 znode_t
*zp
= VTOZ(vp
);
2509 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
2512 uint64_t mtime
[2], ctime
[2];
2513 xvattr_t
*xvap
= (xvattr_t
*)vap
; /* vap may be an xvattr_t * */
2514 xoptattr_t
*xoap
= NULL
;
2515 boolean_t skipaclchk
= (flags
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
2516 sa_bulk_attr_t bulk
[2];
2522 zfs_fuid_map_ids(zp
, cr
, &vap
->va_uid
, &vap
->va_gid
);
2524 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
, &mtime
, 16);
2525 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
, &ctime
, 16);
2527 if ((error
= sa_bulk_lookup(zp
->z_sa_hdl
, bulk
, count
)) != 0) {
2533 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2534 * Also, if we are the owner don't bother, since owner should
2535 * always be allowed to read basic attributes of file.
2537 if (!(zp
->z_pflags
& ZFS_ACL_TRIVIAL
) &&
2538 (vap
->va_uid
!= crgetuid(cr
))) {
2539 if (error
= zfs_zaccess(zp
, ACE_READ_ATTRIBUTES
, 0,
2547 * Return all attributes. It's cheaper to provide the answer
2548 * than to determine whether we were asked the question.
2551 mutex_enter(&zp
->z_lock
);
2552 vap
->va_type
= vp
->v_type
;
2553 vap
->va_mode
= zp
->z_mode
& MODEMASK
;
2554 vap
->va_fsid
= zp
->z_zfsvfs
->z_vfs
->vfs_dev
;
2555 vap
->va_nodeid
= zp
->z_id
;
2556 if ((vp
->v_flag
& VROOT
) && zfs_show_ctldir(zp
))
2557 links
= zp
->z_links
+ 1;
2559 links
= zp
->z_links
;
2560 vap
->va_nlink
= MIN(links
, UINT32_MAX
); /* nlink_t limit! */
2561 vap
->va_size
= zp
->z_size
;
2562 vap
->va_rdev
= vp
->v_rdev
;
2563 vap
->va_seq
= zp
->z_seq
;
2566 * Add in any requested optional attributes and the create time.
2567 * Also set the corresponding bits in the returned attribute bitmap.
2569 if ((xoap
= xva_getxoptattr(xvap
)) != NULL
&& zfsvfs
->z_use_fuids
) {
2570 if (XVA_ISSET_REQ(xvap
, XAT_ARCHIVE
)) {
2572 ((zp
->z_pflags
& ZFS_ARCHIVE
) != 0);
2573 XVA_SET_RTN(xvap
, XAT_ARCHIVE
);
2576 if (XVA_ISSET_REQ(xvap
, XAT_READONLY
)) {
2577 xoap
->xoa_readonly
=
2578 ((zp
->z_pflags
& ZFS_READONLY
) != 0);
2579 XVA_SET_RTN(xvap
, XAT_READONLY
);
2582 if (XVA_ISSET_REQ(xvap
, XAT_SYSTEM
)) {
2584 ((zp
->z_pflags
& ZFS_SYSTEM
) != 0);
2585 XVA_SET_RTN(xvap
, XAT_SYSTEM
);
2588 if (XVA_ISSET_REQ(xvap
, XAT_HIDDEN
)) {
2590 ((zp
->z_pflags
& ZFS_HIDDEN
) != 0);
2591 XVA_SET_RTN(xvap
, XAT_HIDDEN
);
2594 if (XVA_ISSET_REQ(xvap
, XAT_NOUNLINK
)) {
2595 xoap
->xoa_nounlink
=
2596 ((zp
->z_pflags
& ZFS_NOUNLINK
) != 0);
2597 XVA_SET_RTN(xvap
, XAT_NOUNLINK
);
2600 if (XVA_ISSET_REQ(xvap
, XAT_IMMUTABLE
)) {
2601 xoap
->xoa_immutable
=
2602 ((zp
->z_pflags
& ZFS_IMMUTABLE
) != 0);
2603 XVA_SET_RTN(xvap
, XAT_IMMUTABLE
);
2606 if (XVA_ISSET_REQ(xvap
, XAT_APPENDONLY
)) {
2607 xoap
->xoa_appendonly
=
2608 ((zp
->z_pflags
& ZFS_APPENDONLY
) != 0);
2609 XVA_SET_RTN(xvap
, XAT_APPENDONLY
);
2612 if (XVA_ISSET_REQ(xvap
, XAT_NODUMP
)) {
2614 ((zp
->z_pflags
& ZFS_NODUMP
) != 0);
2615 XVA_SET_RTN(xvap
, XAT_NODUMP
);
2618 if (XVA_ISSET_REQ(xvap
, XAT_OPAQUE
)) {
2620 ((zp
->z_pflags
& ZFS_OPAQUE
) != 0);
2621 XVA_SET_RTN(xvap
, XAT_OPAQUE
);
2624 if (XVA_ISSET_REQ(xvap
, XAT_AV_QUARANTINED
)) {
2625 xoap
->xoa_av_quarantined
=
2626 ((zp
->z_pflags
& ZFS_AV_QUARANTINED
) != 0);
2627 XVA_SET_RTN(xvap
, XAT_AV_QUARANTINED
);
2630 if (XVA_ISSET_REQ(xvap
, XAT_AV_MODIFIED
)) {
2631 xoap
->xoa_av_modified
=
2632 ((zp
->z_pflags
& ZFS_AV_MODIFIED
) != 0);
2633 XVA_SET_RTN(xvap
, XAT_AV_MODIFIED
);
2636 if (XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
) &&
2637 vp
->v_type
== VREG
) {
2638 zfs_sa_get_scanstamp(zp
, xvap
);
2641 if (XVA_ISSET_REQ(xvap
, XAT_CREATETIME
)) {
2644 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_CRTIME(zfsvfs
),
2645 times
, sizeof (times
));
2646 ZFS_TIME_DECODE(&xoap
->xoa_createtime
, times
);
2647 XVA_SET_RTN(xvap
, XAT_CREATETIME
);
2650 if (XVA_ISSET_REQ(xvap
, XAT_REPARSE
)) {
2651 xoap
->xoa_reparse
= ((zp
->z_pflags
& ZFS_REPARSE
) != 0);
2652 XVA_SET_RTN(xvap
, XAT_REPARSE
);
2654 if (XVA_ISSET_REQ(xvap
, XAT_GEN
)) {
2655 xoap
->xoa_generation
= zp
->z_gen
;
2656 XVA_SET_RTN(xvap
, XAT_GEN
);
2659 if (XVA_ISSET_REQ(xvap
, XAT_OFFLINE
)) {
2661 ((zp
->z_pflags
& ZFS_OFFLINE
) != 0);
2662 XVA_SET_RTN(xvap
, XAT_OFFLINE
);
2665 if (XVA_ISSET_REQ(xvap
, XAT_SPARSE
)) {
2667 ((zp
->z_pflags
& ZFS_SPARSE
) != 0);
2668 XVA_SET_RTN(xvap
, XAT_SPARSE
);
2672 ZFS_TIME_DECODE(&vap
->va_atime
, zp
->z_atime
);
2673 ZFS_TIME_DECODE(&vap
->va_mtime
, mtime
);
2674 ZFS_TIME_DECODE(&vap
->va_ctime
, ctime
);
2676 mutex_exit(&zp
->z_lock
);
2678 sa_object_size(zp
->z_sa_hdl
, &vap
->va_blksize
, &vap
->va_nblocks
);
2680 if (zp
->z_blksz
== 0) {
2682 * Block size hasn't been set; suggest maximal I/O transfers.
2684 vap
->va_blksize
= zfsvfs
->z_max_blksz
;
2692 * Set the file attributes to the values contained in the
2695 * IN: vp - vnode of file to be modified.
2696 * vap - new attribute values.
2697 * If AT_XVATTR set, then optional attrs are being set
2698 * flags - ATTR_UTIME set if non-default time values provided.
2699 * - ATTR_NOACLCHECK (CIFS context only).
2700 * cr - credentials of caller.
2701 * ct - caller context
2703 * RETURN: 0 on success, error code on failure.
2706 * vp - ctime updated, mtime updated if size changed.
2710 zfs_setattr(vnode_t
*vp
, vattr_t
*vap
, int flags
, cred_t
*cr
,
2711 caller_context_t
*ct
)
2713 znode_t
*zp
= VTOZ(vp
);
2714 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
2719 uint_t mask
= vap
->va_mask
;
2720 uint_t saved_mask
= 0;
2723 uint64_t new_uid
, new_gid
;
2725 uint64_t mtime
[2], ctime
[2];
2727 int need_policy
= FALSE
;
2729 zfs_fuid_info_t
*fuidp
= NULL
;
2730 xvattr_t
*xvap
= (xvattr_t
*)vap
; /* vap may be an xvattr_t * */
2733 boolean_t skipaclchk
= (flags
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
2734 boolean_t fuid_dirtied
= B_FALSE
;
2735 sa_bulk_attr_t bulk
[7], xattr_bulk
[7];
2736 int count
= 0, xattr_count
= 0;
2741 if (mask
& AT_NOSET
)
2742 return (SET_ERROR(EINVAL
));
2747 zilog
= zfsvfs
->z_log
;
2750 * Make sure that if we have ephemeral uid/gid or xvattr specified
2751 * that file system is at proper version level
2754 if (zfsvfs
->z_use_fuids
== B_FALSE
&&
2755 (((mask
& AT_UID
) && IS_EPHEMERAL(vap
->va_uid
)) ||
2756 ((mask
& AT_GID
) && IS_EPHEMERAL(vap
->va_gid
)) ||
2757 (mask
& AT_XVATTR
))) {
2759 return (SET_ERROR(EINVAL
));
2762 if (mask
& AT_SIZE
&& vp
->v_type
== VDIR
) {
2764 return (SET_ERROR(EISDIR
));
2767 if (mask
& AT_SIZE
&& vp
->v_type
!= VREG
&& vp
->v_type
!= VFIFO
) {
2769 return (SET_ERROR(EINVAL
));
2773 * If this is an xvattr_t, then get a pointer to the structure of
2774 * optional attributes. If this is NULL, then we have a vattr_t.
2776 xoap
= xva_getxoptattr(xvap
);
2778 xva_init(&tmpxvattr
);
2781 * Immutable files can only alter immutable bit and atime
2783 if ((zp
->z_pflags
& ZFS_IMMUTABLE
) &&
2784 ((mask
& (AT_SIZE
|AT_UID
|AT_GID
|AT_MTIME
|AT_MODE
)) ||
2785 ((mask
& AT_XVATTR
) && XVA_ISSET_REQ(xvap
, XAT_CREATETIME
)))) {
2787 return (SET_ERROR(EPERM
));
2791 * Note: ZFS_READONLY is handled in zfs_zaccess_common.
2795 * Verify timestamps doesn't overflow 32 bits.
2796 * ZFS can handle large timestamps, but 32bit syscalls can't
2797 * handle times greater than 2039. This check should be removed
2798 * once large timestamps are fully supported.
2800 if (mask
& (AT_ATIME
| AT_MTIME
)) {
2801 if (((mask
& AT_ATIME
) && TIMESPEC_OVERFLOW(&vap
->va_atime
)) ||
2802 ((mask
& AT_MTIME
) && TIMESPEC_OVERFLOW(&vap
->va_mtime
))) {
2804 return (SET_ERROR(EOVERFLOW
));
2812 /* Can this be moved to before the top label? */
2813 if (zfsvfs
->z_vfs
->vfs_flag
& VFS_RDONLY
) {
2815 return (SET_ERROR(EROFS
));
2819 * First validate permissions
2822 if (mask
& AT_SIZE
) {
2823 err
= zfs_zaccess(zp
, ACE_WRITE_DATA
, 0, skipaclchk
, cr
);
2829 * XXX - Note, we are not providing any open
2830 * mode flags here (like FNDELAY), so we may
2831 * block if there are locks present... this
2832 * should be addressed in openat().
2834 /* XXX - would it be OK to generate a log record here? */
2835 err
= zfs_freesp(zp
, vap
->va_size
, 0, 0, FALSE
);
2841 if (vap
->va_size
== 0)
2842 vnevent_truncate(ZTOV(zp
), ct
);
2845 if (mask
& (AT_ATIME
|AT_MTIME
) ||
2846 ((mask
& AT_XVATTR
) && (XVA_ISSET_REQ(xvap
, XAT_HIDDEN
) ||
2847 XVA_ISSET_REQ(xvap
, XAT_READONLY
) ||
2848 XVA_ISSET_REQ(xvap
, XAT_ARCHIVE
) ||
2849 XVA_ISSET_REQ(xvap
, XAT_OFFLINE
) ||
2850 XVA_ISSET_REQ(xvap
, XAT_SPARSE
) ||
2851 XVA_ISSET_REQ(xvap
, XAT_CREATETIME
) ||
2852 XVA_ISSET_REQ(xvap
, XAT_SYSTEM
)))) {
2853 need_policy
= zfs_zaccess(zp
, ACE_WRITE_ATTRIBUTES
, 0,
2857 if (mask
& (AT_UID
|AT_GID
)) {
2858 int idmask
= (mask
& (AT_UID
|AT_GID
));
2863 * NOTE: even if a new mode is being set,
2864 * we may clear S_ISUID/S_ISGID bits.
2867 if (!(mask
& AT_MODE
))
2868 vap
->va_mode
= zp
->z_mode
;
2871 * Take ownership or chgrp to group we are a member of
2874 take_owner
= (mask
& AT_UID
) && (vap
->va_uid
== crgetuid(cr
));
2875 take_group
= (mask
& AT_GID
) &&
2876 zfs_groupmember(zfsvfs
, vap
->va_gid
, cr
);
2879 * If both AT_UID and AT_GID are set then take_owner and
2880 * take_group must both be set in order to allow taking
2883 * Otherwise, send the check through secpolicy_vnode_setattr()
2887 if (((idmask
== (AT_UID
|AT_GID
)) && take_owner
&& take_group
) ||
2888 ((idmask
== AT_UID
) && take_owner
) ||
2889 ((idmask
== AT_GID
) && take_group
)) {
2890 if (zfs_zaccess(zp
, ACE_WRITE_OWNER
, 0,
2891 skipaclchk
, cr
) == 0) {
2893 * Remove setuid/setgid for non-privileged users
2895 secpolicy_setid_clear(vap
, cr
);
2896 trim_mask
= (mask
& (AT_UID
|AT_GID
));
2905 mutex_enter(&zp
->z_lock
);
2906 oldva
.va_mode
= zp
->z_mode
;
2907 zfs_fuid_map_ids(zp
, cr
, &oldva
.va_uid
, &oldva
.va_gid
);
2908 if (mask
& AT_XVATTR
) {
2910 * Update xvattr mask to include only those attributes
2911 * that are actually changing.
2913 * the bits will be restored prior to actually setting
2914 * the attributes so the caller thinks they were set.
2916 if (XVA_ISSET_REQ(xvap
, XAT_APPENDONLY
)) {
2917 if (xoap
->xoa_appendonly
!=
2918 ((zp
->z_pflags
& ZFS_APPENDONLY
) != 0)) {
2921 XVA_CLR_REQ(xvap
, XAT_APPENDONLY
);
2922 XVA_SET_REQ(&tmpxvattr
, XAT_APPENDONLY
);
2926 if (XVA_ISSET_REQ(xvap
, XAT_NOUNLINK
)) {
2927 if (xoap
->xoa_nounlink
!=
2928 ((zp
->z_pflags
& ZFS_NOUNLINK
) != 0)) {
2931 XVA_CLR_REQ(xvap
, XAT_NOUNLINK
);
2932 XVA_SET_REQ(&tmpxvattr
, XAT_NOUNLINK
);
2936 if (XVA_ISSET_REQ(xvap
, XAT_IMMUTABLE
)) {
2937 if (xoap
->xoa_immutable
!=
2938 ((zp
->z_pflags
& ZFS_IMMUTABLE
) != 0)) {
2941 XVA_CLR_REQ(xvap
, XAT_IMMUTABLE
);
2942 XVA_SET_REQ(&tmpxvattr
, XAT_IMMUTABLE
);
2946 if (XVA_ISSET_REQ(xvap
, XAT_NODUMP
)) {
2947 if (xoap
->xoa_nodump
!=
2948 ((zp
->z_pflags
& ZFS_NODUMP
) != 0)) {
2951 XVA_CLR_REQ(xvap
, XAT_NODUMP
);
2952 XVA_SET_REQ(&tmpxvattr
, XAT_NODUMP
);
2956 if (XVA_ISSET_REQ(xvap
, XAT_AV_MODIFIED
)) {
2957 if (xoap
->xoa_av_modified
!=
2958 ((zp
->z_pflags
& ZFS_AV_MODIFIED
) != 0)) {
2961 XVA_CLR_REQ(xvap
, XAT_AV_MODIFIED
);
2962 XVA_SET_REQ(&tmpxvattr
, XAT_AV_MODIFIED
);
2966 if (XVA_ISSET_REQ(xvap
, XAT_AV_QUARANTINED
)) {
2967 if ((vp
->v_type
!= VREG
&&
2968 xoap
->xoa_av_quarantined
) ||
2969 xoap
->xoa_av_quarantined
!=
2970 ((zp
->z_pflags
& ZFS_AV_QUARANTINED
) != 0)) {
2973 XVA_CLR_REQ(xvap
, XAT_AV_QUARANTINED
);
2974 XVA_SET_REQ(&tmpxvattr
, XAT_AV_QUARANTINED
);
2978 if (XVA_ISSET_REQ(xvap
, XAT_REPARSE
)) {
2979 mutex_exit(&zp
->z_lock
);
2981 return (SET_ERROR(EPERM
));
2984 if (need_policy
== FALSE
&&
2985 (XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
) ||
2986 XVA_ISSET_REQ(xvap
, XAT_OPAQUE
))) {
2991 mutex_exit(&zp
->z_lock
);
2993 if (mask
& AT_MODE
) {
2994 if (zfs_zaccess(zp
, ACE_WRITE_ACL
, 0, skipaclchk
, cr
) == 0) {
2995 err
= secpolicy_setid_setsticky_clear(vp
, vap
,
3001 trim_mask
|= AT_MODE
;
3009 * If trim_mask is set then take ownership
3010 * has been granted or write_acl is present and user
3011 * has the ability to modify mode. In that case remove
3012 * UID|GID and or MODE from mask so that
3013 * secpolicy_vnode_setattr() doesn't revoke it.
3017 saved_mask
= vap
->va_mask
;
3018 vap
->va_mask
&= ~trim_mask
;
3020 err
= secpolicy_vnode_setattr(cr
, vp
, vap
, &oldva
, flags
,
3021 (int (*)(void *, int, cred_t
*))zfs_zaccess_unix
, zp
);
3028 vap
->va_mask
|= saved_mask
;
3032 * secpolicy_vnode_setattr, or take ownership may have
3035 mask
= vap
->va_mask
;
3037 if ((mask
& (AT_UID
| AT_GID
))) {
3038 err
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_XATTR(zfsvfs
),
3039 &xattr_obj
, sizeof (xattr_obj
));
3041 if (err
== 0 && xattr_obj
) {
3042 err
= zfs_zget(zp
->z_zfsvfs
, xattr_obj
, &attrzp
);
3046 if (mask
& AT_UID
) {
3047 new_uid
= zfs_fuid_create(zfsvfs
,
3048 (uint64_t)vap
->va_uid
, cr
, ZFS_OWNER
, &fuidp
);
3049 if (new_uid
!= zp
->z_uid
&&
3050 zfs_fuid_overquota(zfsvfs
, B_FALSE
, new_uid
)) {
3052 VN_RELE(ZTOV(attrzp
));
3053 err
= SET_ERROR(EDQUOT
);
3058 if (mask
& AT_GID
) {
3059 new_gid
= zfs_fuid_create(zfsvfs
, (uint64_t)vap
->va_gid
,
3060 cr
, ZFS_GROUP
, &fuidp
);
3061 if (new_gid
!= zp
->z_gid
&&
3062 zfs_fuid_overquota(zfsvfs
, B_TRUE
, new_gid
)) {
3064 VN_RELE(ZTOV(attrzp
));
3065 err
= SET_ERROR(EDQUOT
);
3070 tx
= dmu_tx_create(zfsvfs
->z_os
);
3072 if (mask
& AT_MODE
) {
3073 uint64_t pmode
= zp
->z_mode
;
3075 new_mode
= (pmode
& S_IFMT
) | (vap
->va_mode
& ~S_IFMT
);
3077 if (zp
->z_zfsvfs
->z_acl_mode
== ZFS_ACL_RESTRICTED
&&
3078 !(zp
->z_pflags
& ZFS_ACL_TRIVIAL
)) {
3079 err
= SET_ERROR(EPERM
);
3083 if (err
= zfs_acl_chmod_setattr(zp
, &aclp
, new_mode
))
3086 mutex_enter(&zp
->z_lock
);
3087 if (!zp
->z_is_sa
&& ((acl_obj
= zfs_external_acl(zp
)) != 0)) {
3089 * Are we upgrading ACL from old V0 format
3092 if (zfsvfs
->z_version
>= ZPL_VERSION_FUID
&&
3093 zfs_znode_acl_version(zp
) ==
3094 ZFS_ACL_VERSION_INITIAL
) {
3095 dmu_tx_hold_free(tx
, acl_obj
, 0,
3097 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
,
3098 0, aclp
->z_acl_bytes
);
3100 dmu_tx_hold_write(tx
, acl_obj
, 0,
3103 } else if (!zp
->z_is_sa
&& aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
3104 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
,
3105 0, aclp
->z_acl_bytes
);
3107 mutex_exit(&zp
->z_lock
);
3108 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_TRUE
);
3110 if ((mask
& AT_XVATTR
) &&
3111 XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
))
3112 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_TRUE
);
3114 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
3118 dmu_tx_hold_sa(tx
, attrzp
->z_sa_hdl
, B_FALSE
);
3121 fuid_dirtied
= zfsvfs
->z_fuid_dirty
;
3123 zfs_fuid_txhold(zfsvfs
, tx
);
3125 zfs_sa_upgrade_txholds(tx
, zp
);
3127 err
= dmu_tx_assign(tx
, TXG_WAIT
);
3133 * Set each attribute requested.
3134 * We group settings according to the locks they need to acquire.
3136 * Note: you cannot set ctime directly, although it will be
3137 * updated as a side-effect of calling this function.
3141 if (mask
& (AT_UID
|AT_GID
|AT_MODE
))
3142 mutex_enter(&zp
->z_acl_lock
);
3143 mutex_enter(&zp
->z_lock
);
3145 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zfsvfs
), NULL
,
3146 &zp
->z_pflags
, sizeof (zp
->z_pflags
));
3149 if (mask
& (AT_UID
|AT_GID
|AT_MODE
))
3150 mutex_enter(&attrzp
->z_acl_lock
);
3151 mutex_enter(&attrzp
->z_lock
);
3152 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
3153 SA_ZPL_FLAGS(zfsvfs
), NULL
, &attrzp
->z_pflags
,
3154 sizeof (attrzp
->z_pflags
));
3157 if (mask
& (AT_UID
|AT_GID
)) {
3159 if (mask
& AT_UID
) {
3160 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_UID(zfsvfs
), NULL
,
3161 &new_uid
, sizeof (new_uid
));
3162 zp
->z_uid
= new_uid
;
3164 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
3165 SA_ZPL_UID(zfsvfs
), NULL
, &new_uid
,
3167 attrzp
->z_uid
= new_uid
;
3171 if (mask
& AT_GID
) {
3172 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GID(zfsvfs
),
3173 NULL
, &new_gid
, sizeof (new_gid
));
3174 zp
->z_gid
= new_gid
;
3176 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
3177 SA_ZPL_GID(zfsvfs
), NULL
, &new_gid
,
3179 attrzp
->z_gid
= new_gid
;
3182 if (!(mask
& AT_MODE
)) {
3183 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MODE(zfsvfs
),
3184 NULL
, &new_mode
, sizeof (new_mode
));
3185 new_mode
= zp
->z_mode
;
3187 err
= zfs_acl_chown_setattr(zp
);
3190 err
= zfs_acl_chown_setattr(attrzp
);
3195 if (mask
& AT_MODE
) {
3196 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MODE(zfsvfs
), NULL
,
3197 &new_mode
, sizeof (new_mode
));
3198 zp
->z_mode
= new_mode
;
3199 ASSERT3U((uintptr_t)aclp
, !=, (uintptr_t)NULL
);
3200 err
= zfs_aclset_common(zp
, aclp
, cr
, tx
);
3202 if (zp
->z_acl_cached
)
3203 zfs_acl_free(zp
->z_acl_cached
);
3204 zp
->z_acl_cached
= aclp
;
3209 if (mask
& AT_ATIME
) {
3210 ZFS_TIME_ENCODE(&vap
->va_atime
, zp
->z_atime
);
3211 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_ATIME(zfsvfs
), NULL
,
3212 &zp
->z_atime
, sizeof (zp
->z_atime
));
3215 if (mask
& AT_MTIME
) {
3216 ZFS_TIME_ENCODE(&vap
->va_mtime
, mtime
);
3217 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
,
3218 mtime
, sizeof (mtime
));
3221 /* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
3222 if (mask
& AT_SIZE
&& !(mask
& AT_MTIME
)) {
3223 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
),
3224 NULL
, mtime
, sizeof (mtime
));
3225 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
,
3226 &ctime
, sizeof (ctime
));
3227 zfs_tstamp_update_setup(zp
, CONTENT_MODIFIED
, mtime
, ctime
,
3229 } else if (mask
!= 0) {
3230 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
,
3231 &ctime
, sizeof (ctime
));
3232 zfs_tstamp_update_setup(zp
, STATE_CHANGED
, mtime
, ctime
,
3235 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
3236 SA_ZPL_CTIME(zfsvfs
), NULL
,
3237 &ctime
, sizeof (ctime
));
3238 zfs_tstamp_update_setup(attrzp
, STATE_CHANGED
,
3239 mtime
, ctime
, B_TRUE
);
3243 * Do this after setting timestamps to prevent timestamp
3244 * update from toggling bit
3247 if (xoap
&& (mask
& AT_XVATTR
)) {
3250 * restore trimmed off masks
3251 * so that return masks can be set for caller.
3254 if (XVA_ISSET_REQ(&tmpxvattr
, XAT_APPENDONLY
)) {
3255 XVA_SET_REQ(xvap
, XAT_APPENDONLY
);
3257 if (XVA_ISSET_REQ(&tmpxvattr
, XAT_NOUNLINK
)) {
3258 XVA_SET_REQ(xvap
, XAT_NOUNLINK
);
3260 if (XVA_ISSET_REQ(&tmpxvattr
, XAT_IMMUTABLE
)) {
3261 XVA_SET_REQ(xvap
, XAT_IMMUTABLE
);
3263 if (XVA_ISSET_REQ(&tmpxvattr
, XAT_NODUMP
)) {
3264 XVA_SET_REQ(xvap
, XAT_NODUMP
);
3266 if (XVA_ISSET_REQ(&tmpxvattr
, XAT_AV_MODIFIED
)) {
3267 XVA_SET_REQ(xvap
, XAT_AV_MODIFIED
);
3269 if (XVA_ISSET_REQ(&tmpxvattr
, XAT_AV_QUARANTINED
)) {
3270 XVA_SET_REQ(xvap
, XAT_AV_QUARANTINED
);
3273 if (XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
))
3274 ASSERT(vp
->v_type
== VREG
);
3276 zfs_xvattr_set(zp
, xvap
, tx
);
3280 zfs_fuid_sync(zfsvfs
, tx
);
3283 zfs_log_setattr(zilog
, tx
, TX_SETATTR
, zp
, vap
, mask
, fuidp
);
3285 mutex_exit(&zp
->z_lock
);
3286 if (mask
& (AT_UID
|AT_GID
|AT_MODE
))
3287 mutex_exit(&zp
->z_acl_lock
);
3290 if (mask
& (AT_UID
|AT_GID
|AT_MODE
))
3291 mutex_exit(&attrzp
->z_acl_lock
);
3292 mutex_exit(&attrzp
->z_lock
);
3295 if (err
== 0 && attrzp
) {
3296 err2
= sa_bulk_update(attrzp
->z_sa_hdl
, xattr_bulk
,
3302 VN_RELE(ZTOV(attrzp
));
3308 zfs_fuid_info_free(fuidp
);
3314 if (err
== ERESTART
)
3317 err2
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, count
, tx
);
3322 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
3323 zil_commit(zilog
, 0);
3329 typedef struct zfs_zlock
{
3330 krwlock_t
*zl_rwlock
; /* lock we acquired */
3331 znode_t
*zl_znode
; /* znode we held */
3332 struct zfs_zlock
*zl_next
; /* next in list */
3336 * Drop locks and release vnodes that were held by zfs_rename_lock().
3339 zfs_rename_unlock(zfs_zlock_t
**zlpp
)
3343 while ((zl
= *zlpp
) != NULL
) {
3344 if (zl
->zl_znode
!= NULL
)
3345 VN_RELE(ZTOV(zl
->zl_znode
));
3346 rw_exit(zl
->zl_rwlock
);
3347 *zlpp
= zl
->zl_next
;
3348 kmem_free(zl
, sizeof (*zl
));
3353 * Search back through the directory tree, using the ".." entries.
3354 * Lock each directory in the chain to prevent concurrent renames.
3355 * Fail any attempt to move a directory into one of its own descendants.
3356 * XXX - z_parent_lock can overlap with map or grow locks
3359 zfs_rename_lock(znode_t
*szp
, znode_t
*tdzp
, znode_t
*sdzp
, zfs_zlock_t
**zlpp
)
3363 uint64_t rootid
= zp
->z_zfsvfs
->z_root
;
3364 uint64_t oidp
= zp
->z_id
;
3365 krwlock_t
*rwlp
= &szp
->z_parent_lock
;
3366 krw_t rw
= RW_WRITER
;
3369 * First pass write-locks szp and compares to zp->z_id.
3370 * Later passes read-lock zp and compare to zp->z_parent.
3373 if (!rw_tryenter(rwlp
, rw
)) {
3375 * Another thread is renaming in this path.
3376 * Note that if we are a WRITER, we don't have any
3377 * parent_locks held yet.
3379 if (rw
== RW_READER
&& zp
->z_id
> szp
->z_id
) {
3381 * Drop our locks and restart
3383 zfs_rename_unlock(&zl
);
3387 rwlp
= &szp
->z_parent_lock
;
3392 * Wait for other thread to drop its locks
3398 zl
= kmem_alloc(sizeof (*zl
), KM_SLEEP
);
3399 zl
->zl_rwlock
= rwlp
;
3400 zl
->zl_znode
= NULL
;
3401 zl
->zl_next
= *zlpp
;
3404 if (oidp
== szp
->z_id
) /* We're a descendant of szp */
3405 return (SET_ERROR(EINVAL
));
3407 if (oidp
== rootid
) /* We've hit the top */
3410 if (rw
== RW_READER
) { /* i.e. not the first pass */
3411 int error
= zfs_zget(zp
->z_zfsvfs
, oidp
, &zp
);
3416 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_PARENT(zp
->z_zfsvfs
),
3417 &oidp
, sizeof (oidp
));
3418 rwlp
= &zp
->z_parent_lock
;
3421 } while (zp
->z_id
!= sdzp
->z_id
);
3427 * Move an entry from the provided source directory to the target
3428 * directory. Change the entry name as indicated.
3430 * IN: sdvp - Source directory containing the "old entry".
3431 * snm - Old entry name.
3432 * tdvp - Target directory to contain the "new entry".
3433 * tnm - New entry name.
3434 * cr - credentials of caller.
3435 * ct - caller context
3436 * flags - case flags
3438 * RETURN: 0 on success, error code on failure.
3441 * sdvp,tdvp - ctime|mtime updated
3445 zfs_rename(vnode_t
*sdvp
, char *snm
, vnode_t
*tdvp
, char *tnm
, cred_t
*cr
,
3446 caller_context_t
*ct
, int flags
)
3448 znode_t
*tdzp
, *szp
, *tzp
;
3449 znode_t
*sdzp
= VTOZ(sdvp
);
3450 zfsvfs_t
*zfsvfs
= sdzp
->z_zfsvfs
;
3453 zfs_dirlock_t
*sdl
, *tdl
;
3456 int cmp
, serr
, terr
;
3457 int error
= 0, rm_err
= 0;
3459 boolean_t waited
= B_FALSE
;
3462 ZFS_VERIFY_ZP(sdzp
);
3463 zilog
= zfsvfs
->z_log
;
3466 * Make sure we have the real vp for the target directory.
3468 if (fop_realvp(tdvp
, &realvp
, ct
) == 0)
3472 ZFS_VERIFY_ZP(tdzp
);
3475 * We check z_zfsvfs rather than v_vfsp here, because snapshots and the
3476 * ctldir appear to have the same v_vfsp.
3478 if (tdzp
->z_zfsvfs
!= zfsvfs
|| zfsctl_is_node(tdvp
)) {
3480 return (SET_ERROR(EXDEV
));
3483 if (zfsvfs
->z_utf8
&& u8_validate(tnm
,
3484 strlen(tnm
), NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
3486 return (SET_ERROR(EILSEQ
));
3489 if (flags
& FIGNORECASE
)
3498 * This is to prevent the creation of links into attribute space
3499 * by renaming a linked file into/outof an attribute directory.
3500 * See the comment in zfs_link() for why this is considered bad.
3502 if ((tdzp
->z_pflags
& ZFS_XATTR
) != (sdzp
->z_pflags
& ZFS_XATTR
)) {
3504 return (SET_ERROR(EINVAL
));
3508 * Lock source and target directory entries. To prevent deadlock,
3509 * a lock ordering must be defined. We lock the directory with
3510 * the smallest object id first, or if it's a tie, the one with
3511 * the lexically first name.
3513 if (sdzp
->z_id
< tdzp
->z_id
) {
3515 } else if (sdzp
->z_id
> tdzp
->z_id
) {
3519 * First compare the two name arguments without
3520 * considering any case folding.
3522 int nofold
= (zfsvfs
->z_norm
& ~U8_TEXTPREP_TOUPPER
);
3524 cmp
= u8_strcmp(snm
, tnm
, 0, nofold
, U8_UNICODE_LATEST
, &error
);
3525 ASSERT(error
== 0 || !zfsvfs
->z_utf8
);
3528 * POSIX: "If the old argument and the new argument
3529 * both refer to links to the same existing file,
3530 * the rename() function shall return successfully
3531 * and perform no other action."
3537 * If the file system is case-folding, then we may
3538 * have some more checking to do. A case-folding file
3539 * system is either supporting mixed case sensitivity
3540 * access or is completely case-insensitive. Note
3541 * that the file system is always case preserving.
3543 * In mixed sensitivity mode case sensitive behavior
3544 * is the default. FIGNORECASE must be used to
3545 * explicitly request case insensitive behavior.
3547 * If the source and target names provided differ only
3548 * by case (e.g., a request to rename 'tim' to 'Tim'),
3549 * we will treat this as a special case in the
3550 * case-insensitive mode: as long as the source name
3551 * is an exact match, we will allow this to proceed as
3552 * a name-change request.
3554 if ((zfsvfs
->z_case
== ZFS_CASE_INSENSITIVE
||
3555 (zfsvfs
->z_case
== ZFS_CASE_MIXED
&&
3556 flags
& FIGNORECASE
)) &&
3557 u8_strcmp(snm
, tnm
, 0, zfsvfs
->z_norm
, U8_UNICODE_LATEST
,
3560 * case preserving rename request, require exact
3569 * If the source and destination directories are the same, we should
3570 * grab the z_name_lock of that directory only once.
3574 rw_enter(&sdzp
->z_name_lock
, RW_READER
);
3578 serr
= zfs_dirent_lock(&sdl
, sdzp
, snm
, &szp
,
3579 ZEXISTS
| zflg
, NULL
, NULL
);
3580 terr
= zfs_dirent_lock(&tdl
,
3581 tdzp
, tnm
, &tzp
, ZRENAMING
| zflg
, NULL
, NULL
);
3583 terr
= zfs_dirent_lock(&tdl
,
3584 tdzp
, tnm
, &tzp
, zflg
, NULL
, NULL
);
3585 serr
= zfs_dirent_lock(&sdl
,
3586 sdzp
, snm
, &szp
, ZEXISTS
| ZRENAMING
| zflg
,
3592 * Source entry invalid or not there.
3595 zfs_dirent_unlock(tdl
);
3601 rw_exit(&sdzp
->z_name_lock
);
3603 if (strcmp(snm
, "..") == 0)
3604 serr
= SET_ERROR(EINVAL
);
3609 zfs_dirent_unlock(sdl
);
3613 rw_exit(&sdzp
->z_name_lock
);
3615 if (strcmp(tnm
, "..") == 0)
3616 terr
= SET_ERROR(EINVAL
);
3622 * Must have write access at the source to remove the old entry
3623 * and write access at the target to create the new entry.
3624 * Note that if target and source are the same, this can be
3625 * done in a single check.
3628 if (error
= zfs_zaccess_rename(sdzp
, szp
, tdzp
, tzp
, cr
))
3631 if (ZTOV(szp
)->v_type
== VDIR
) {
3633 * Check to make sure rename is valid.
3634 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3636 if (error
= zfs_rename_lock(szp
, tdzp
, sdzp
, &zl
))
3641 * Does target exist?
3645 * Source and target must be the same type.
3647 if (ZTOV(szp
)->v_type
== VDIR
) {
3648 if (ZTOV(tzp
)->v_type
!= VDIR
) {
3649 error
= SET_ERROR(ENOTDIR
);
3653 if (ZTOV(tzp
)->v_type
== VDIR
) {
3654 error
= SET_ERROR(EISDIR
);
3659 * POSIX dictates that when the source and target
3660 * entries refer to the same file object, rename
3661 * must do nothing and exit without error.
3663 if (szp
->z_id
== tzp
->z_id
) {
3669 vnevent_pre_rename_src(ZTOV(szp
), sdvp
, snm
, ct
);
3671 vnevent_pre_rename_dest(ZTOV(tzp
), tdvp
, tnm
, ct
);
3674 * notify the target directory if it is not the same
3675 * as source directory.
3678 vnevent_pre_rename_dest_dir(tdvp
, ZTOV(szp
), tnm
, ct
);
3681 tx
= dmu_tx_create(zfsvfs
->z_os
);
3682 dmu_tx_hold_sa(tx
, szp
->z_sa_hdl
, B_FALSE
);
3683 dmu_tx_hold_sa(tx
, sdzp
->z_sa_hdl
, B_FALSE
);
3684 dmu_tx_hold_zap(tx
, sdzp
->z_id
, FALSE
, snm
);
3685 dmu_tx_hold_zap(tx
, tdzp
->z_id
, TRUE
, tnm
);
3687 dmu_tx_hold_sa(tx
, tdzp
->z_sa_hdl
, B_FALSE
);
3688 zfs_sa_upgrade_txholds(tx
, tdzp
);
3691 dmu_tx_hold_sa(tx
, tzp
->z_sa_hdl
, B_FALSE
);
3692 zfs_sa_upgrade_txholds(tx
, tzp
);
3695 zfs_sa_upgrade_txholds(tx
, szp
);
3696 dmu_tx_hold_zap(tx
, zfsvfs
->z_unlinkedobj
, FALSE
, NULL
);
3697 error
= dmu_tx_assign(tx
, waited
? TXG_WAITED
: TXG_NOWAIT
);
3700 zfs_rename_unlock(&zl
);
3701 zfs_dirent_unlock(sdl
);
3702 zfs_dirent_unlock(tdl
);
3705 rw_exit(&sdzp
->z_name_lock
);
3710 if (error
== ERESTART
) {
3721 if (tzp
) /* Attempt to remove the existing target */
3722 error
= rm_err
= zfs_link_destroy(tdl
, tzp
, tx
, zflg
, NULL
);
3725 error
= zfs_link_create(tdl
, szp
, tx
, ZRENAMING
);
3727 szp
->z_pflags
|= ZFS_AV_MODIFIED
;
3729 error
= sa_update(szp
->z_sa_hdl
, SA_ZPL_FLAGS(zfsvfs
),
3730 (void *)&szp
->z_pflags
, sizeof (uint64_t), tx
);
3733 error
= zfs_link_destroy(sdl
, szp
, tx
, ZRENAMING
, NULL
);
3735 zfs_log_rename(zilog
, tx
, TX_RENAME
|
3736 (flags
& FIGNORECASE
? TX_CI
: 0), sdzp
,
3737 sdl
->dl_name
, tdzp
, tdl
->dl_name
, szp
);
3740 * Update path information for the target vnode
3742 vn_renamepath(tdvp
, ZTOV(szp
), tnm
,
3746 * At this point, we have successfully created
3747 * the target name, but have failed to remove
3748 * the source name. Since the create was done
3749 * with the ZRENAMING flag, there are
3750 * complications; for one, the link count is
3751 * wrong. The easiest way to deal with this
3752 * is to remove the newly created target, and
3753 * return the original error. This must
3754 * succeed; fortunately, it is very unlikely to
3755 * fail, since we just created it.
3757 VERIFY3U(zfs_link_destroy(tdl
, szp
, tx
,
3758 ZRENAMING
, NULL
), ==, 0);
3765 if (tzp
&& rm_err
== 0)
3766 vnevent_rename_dest(ZTOV(tzp
), tdvp
, tnm
, ct
);
3769 vnevent_rename_src(ZTOV(szp
), sdvp
, snm
, ct
);
3770 /* notify the target dir if it is not the same as source dir */
3772 vnevent_rename_dest_dir(tdvp
, ct
);
3776 zfs_rename_unlock(&zl
);
3778 zfs_dirent_unlock(sdl
);
3779 zfs_dirent_unlock(tdl
);
3782 rw_exit(&sdzp
->z_name_lock
);
3789 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
3790 zil_commit(zilog
, 0);
3797 * Insert the indicated symbolic reference entry into the directory.
3799 * IN: dvp - Directory to contain new symbolic link.
3800 * link - Name for new symlink entry.
3801 * vap - Attributes of new entry.
3802 * cr - credentials of caller.
3803 * ct - caller context
3804 * flags - case flags
3806 * RETURN: 0 on success, error code on failure.
3809 * dvp - ctime|mtime updated
3813 zfs_symlink(vnode_t
*dvp
, char *name
, vattr_t
*vap
, char *link
, cred_t
*cr
,
3814 caller_context_t
*ct
, int flags
)
3816 znode_t
*zp
, *dzp
= VTOZ(dvp
);
3819 zfsvfs_t
*zfsvfs
= dzp
->z_zfsvfs
;
3821 uint64_t len
= strlen(link
);
3824 zfs_acl_ids_t acl_ids
;
3825 boolean_t fuid_dirtied
;
3826 uint64_t txtype
= TX_SYMLINK
;
3827 boolean_t waited
= B_FALSE
;
3829 ASSERT(vap
->va_type
== VLNK
);
3833 zilog
= zfsvfs
->z_log
;
3835 if (zfsvfs
->z_utf8
&& u8_validate(name
, strlen(name
),
3836 NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
3838 return (SET_ERROR(EILSEQ
));
3840 if (flags
& FIGNORECASE
)
3843 if (len
> MAXPATHLEN
) {
3845 return (SET_ERROR(ENAMETOOLONG
));
3848 if ((error
= zfs_acl_ids_create(dzp
, 0,
3849 vap
, cr
, NULL
, &acl_ids
)) != 0) {
3855 * Attempt to lock directory; fail if entry already exists.
3857 error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
, NULL
, NULL
);
3859 zfs_acl_ids_free(&acl_ids
);
3864 if (error
= zfs_zaccess(dzp
, ACE_ADD_FILE
, 0, B_FALSE
, cr
)) {
3865 zfs_acl_ids_free(&acl_ids
);
3866 zfs_dirent_unlock(dl
);
3871 if (zfs_acl_ids_overquota(zfsvfs
, &acl_ids
)) {
3872 zfs_acl_ids_free(&acl_ids
);
3873 zfs_dirent_unlock(dl
);
3875 return (SET_ERROR(EDQUOT
));
3877 tx
= dmu_tx_create(zfsvfs
->z_os
);
3878 fuid_dirtied
= zfsvfs
->z_fuid_dirty
;
3879 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
, 0, MAX(1, len
));
3880 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, name
);
3881 dmu_tx_hold_sa_create(tx
, acl_ids
.z_aclp
->z_acl_bytes
+
3882 ZFS_SA_BASE_ATTR_SIZE
+ len
);
3883 dmu_tx_hold_sa(tx
, dzp
->z_sa_hdl
, B_FALSE
);
3884 if (!zfsvfs
->z_use_sa
&& acl_ids
.z_aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
3885 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
, 0,
3886 acl_ids
.z_aclp
->z_acl_bytes
);
3889 zfs_fuid_txhold(zfsvfs
, tx
);
3890 error
= dmu_tx_assign(tx
, waited
? TXG_WAITED
: TXG_NOWAIT
);
3892 zfs_dirent_unlock(dl
);
3893 if (error
== ERESTART
) {
3899 zfs_acl_ids_free(&acl_ids
);
3906 * Create a new object for the symlink.
3907 * for version 4 ZPL datsets the symlink will be an SA attribute
3909 zfs_mknode(dzp
, vap
, tx
, cr
, 0, &zp
, &acl_ids
);
3912 zfs_fuid_sync(zfsvfs
, tx
);
3914 mutex_enter(&zp
->z_lock
);
3916 error
= sa_update(zp
->z_sa_hdl
, SA_ZPL_SYMLINK(zfsvfs
),
3919 zfs_sa_symlink(zp
, link
, len
, tx
);
3920 mutex_exit(&zp
->z_lock
);
3923 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_SIZE(zfsvfs
),
3924 &zp
->z_size
, sizeof (zp
->z_size
), tx
);
3926 * Insert the new object into the directory.
3928 (void) zfs_link_create(dl
, zp
, tx
, ZNEW
);
3930 if (flags
& FIGNORECASE
)
3932 zfs_log_symlink(zilog
, tx
, txtype
, dzp
, zp
, name
, link
);
3934 zfs_acl_ids_free(&acl_ids
);
3938 zfs_dirent_unlock(dl
);
3942 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
3943 zil_commit(zilog
, 0);
3950 * Return, in the buffer contained in the provided uio structure,
3951 * the symbolic path referred to by vp.
3953 * IN: vp - vnode of symbolic link.
3954 * uio - structure to contain the link path.
3955 * cr - credentials of caller.
3956 * ct - caller context
3958 * OUT: uio - structure containing the link path.
3960 * RETURN: 0 on success, error code on failure.
3963 * vp - atime updated
3967 zfs_readlink(vnode_t
*vp
, uio_t
*uio
, cred_t
*cr
, caller_context_t
*ct
)
3969 znode_t
*zp
= VTOZ(vp
);
3970 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
3976 mutex_enter(&zp
->z_lock
);
3978 error
= sa_lookup_uio(zp
->z_sa_hdl
,
3979 SA_ZPL_SYMLINK(zfsvfs
), uio
);
3981 error
= zfs_sa_readlink(zp
, uio
);
3982 mutex_exit(&zp
->z_lock
);
3984 ZFS_ACCESSTIME_STAMP(zfsvfs
, zp
);
3991 * Insert a new entry into directory tdvp referencing svp.
3993 * IN: tdvp - Directory to contain new entry.
3994 * svp - vnode of new entry.
3995 * name - name of new entry.
3996 * cr - credentials of caller.
3997 * ct - caller context
3999 * RETURN: 0 on success, error code on failure.
4002 * tdvp - ctime|mtime updated
4003 * svp - ctime updated
4007 zfs_link(vnode_t
*tdvp
, vnode_t
*svp
, char *name
, cred_t
*cr
,
4008 caller_context_t
*ct
, int flags
)
4010 znode_t
*dzp
= VTOZ(tdvp
);
4012 zfsvfs_t
*zfsvfs
= dzp
->z_zfsvfs
;
4021 boolean_t waited
= B_FALSE
;
4023 ASSERT(tdvp
->v_type
== VDIR
);
4027 zilog
= zfsvfs
->z_log
;
4029 if (fop_realvp(svp
, &realvp
, ct
) == 0)
4033 * POSIX dictates that we return EPERM here.
4034 * Better choices include ENOTSUP or EISDIR.
4036 if (svp
->v_type
== VDIR
) {
4038 return (SET_ERROR(EPERM
));
4045 * We check z_zfsvfs rather than v_vfsp here, because snapshots and the
4046 * ctldir appear to have the same v_vfsp.
4048 if (szp
->z_zfsvfs
!= zfsvfs
|| zfsctl_is_node(svp
)) {
4050 return (SET_ERROR(EXDEV
));
4053 /* Prevent links to .zfs/shares files */
4055 if ((error
= sa_lookup(szp
->z_sa_hdl
, SA_ZPL_PARENT(zfsvfs
),
4056 &parent
, sizeof (uint64_t))) != 0) {
4060 if (parent
== zfsvfs
->z_shares_dir
) {
4062 return (SET_ERROR(EPERM
));
4065 if (zfsvfs
->z_utf8
&& u8_validate(name
,
4066 strlen(name
), NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
4068 return (SET_ERROR(EILSEQ
));
4070 if (flags
& FIGNORECASE
)
4074 * We do not support links between attributes and non-attributes
4075 * because of the potential security risk of creating links
4076 * into "normal" file space in order to circumvent restrictions
4077 * imposed in attribute space.
4079 if ((szp
->z_pflags
& ZFS_XATTR
) != (dzp
->z_pflags
& ZFS_XATTR
)) {
4081 return (SET_ERROR(EINVAL
));
4085 owner
= zfs_fuid_map_id(zfsvfs
, szp
->z_uid
, cr
, ZFS_OWNER
);
4086 if (owner
!= crgetuid(cr
) && secpolicy_basic_link(cr
) != 0) {
4088 return (SET_ERROR(EPERM
));
4091 if (error
= zfs_zaccess(dzp
, ACE_ADD_FILE
, 0, B_FALSE
, cr
)) {
4098 * Attempt to lock directory; fail if entry already exists.
4100 error
= zfs_dirent_lock(&dl
, dzp
, name
, &tzp
, zf
, NULL
, NULL
);
4106 tx
= dmu_tx_create(zfsvfs
->z_os
);
4107 dmu_tx_hold_sa(tx
, szp
->z_sa_hdl
, B_FALSE
);
4108 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, name
);
4109 zfs_sa_upgrade_txholds(tx
, szp
);
4110 zfs_sa_upgrade_txholds(tx
, dzp
);
4111 error
= dmu_tx_assign(tx
, waited
? TXG_WAITED
: TXG_NOWAIT
);
4113 zfs_dirent_unlock(dl
);
4114 if (error
== ERESTART
) {
4125 error
= zfs_link_create(dl
, szp
, tx
, 0);
4128 uint64_t txtype
= TX_LINK
;
4129 if (flags
& FIGNORECASE
)
4131 zfs_log_link(zilog
, tx
, txtype
, dzp
, szp
, name
);
4136 zfs_dirent_unlock(dl
);
4139 vnevent_link(svp
, ct
);
4142 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
4143 zil_commit(zilog
, 0);
4150 * zfs_null_putapage() is used when the file system has been force
4151 * unmounted. It just drops the pages.
4155 zfs_null_putapage(vnode_t
*vp
, page_t
*pp
, uoff_t
*offp
,
4156 size_t *lenp
, int flags
, cred_t
*cr
)
4158 pvn_write_done(pp
, B_INVAL
|B_FORCE
|B_ERROR
);
4163 * Push a page out to disk, klustering if possible.
4165 * IN: vp - file to push page to.
4166 * pp - page to push.
4167 * flags - additional flags.
4168 * cr - credentials of caller.
4170 * OUT: offp - start of range pushed.
4171 * lenp - len of range pushed.
4173 * RETURN: 0 on success, error code on failure.
4175 * NOTE: callers must have locked the page to be pushed. On
4176 * exit, the page (and all other pages in the kluster) must be
4181 zfs_putapage(vnode_t
*vp
, page_t
*pp
, uoff_t
*offp
,
4182 size_t *lenp
, int flags
, cred_t
*cr
)
4184 znode_t
*zp
= VTOZ(vp
);
4185 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
4194 * If our blocksize is bigger than the page size, try to kluster
4195 * multiple pages so that we write a full block (thus avoiding
4196 * a read-modify-write).
4198 if (off
< zp
->z_size
&& zp
->z_blksz
> PAGESIZE
) {
4199 klen
= P2ROUNDUP((ulong_t
)zp
->z_blksz
, PAGESIZE
);
4200 koff
= ISP2(klen
) ? P2ALIGN(off
, (uoff_t
)klen
) : 0;
4201 ASSERT(koff
<= zp
->z_size
);
4202 if (koff
+ klen
> zp
->z_size
)
4203 klen
= P2ROUNDUP(zp
->z_size
- koff
, (uint64_t)PAGESIZE
);
4204 pp
= pvn_write_kluster(vp
, pp
, &off
, &len
, koff
, klen
, flags
);
4206 ASSERT3U(btop(len
), ==, btopr(len
));
4209 * Can't push pages past end-of-file.
4211 if (off
>= zp
->z_size
) {
4212 /* ignore all pages */
4215 } else if (off
+ len
> zp
->z_size
) {
4216 int npages
= btopr(zp
->z_size
- off
);
4219 page_list_break(&pp
, &trunc
, npages
);
4220 /* ignore pages past end of file */
4222 pvn_write_done(trunc
, flags
);
4223 len
= zp
->z_size
- off
;
4226 if (zfs_owner_overquota(zfsvfs
, zp
, B_FALSE
) ||
4227 zfs_owner_overquota(zfsvfs
, zp
, B_TRUE
)) {
4228 err
= SET_ERROR(EDQUOT
);
4231 tx
= dmu_tx_create(zfsvfs
->z_os
);
4232 dmu_tx_hold_write(tx
, zp
->z_id
, off
, len
);
4234 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
4235 zfs_sa_upgrade_txholds(tx
, zp
);
4236 err
= dmu_tx_assign(tx
, TXG_WAIT
);
4242 if (zp
->z_blksz
<= PAGESIZE
) {
4243 caddr_t va
= zfs_map_page(pp
, S_READ
);
4244 ASSERT3U(len
, <=, PAGESIZE
);
4245 dmu_write(zfsvfs
->z_os
, zp
->z_id
, off
, len
, va
, tx
);
4246 zfs_unmap_page(pp
, va
);
4248 err
= dmu_write_pages(zfsvfs
->z_os
, zp
->z_id
, off
, len
, pp
, tx
);
4252 uint64_t mtime
[2], ctime
[2];
4253 sa_bulk_attr_t bulk
[3];
4256 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
,
4258 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
,
4260 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zfsvfs
), NULL
,
4262 zfs_tstamp_update_setup(zp
, CONTENT_MODIFIED
, mtime
, ctime
,
4264 err
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, count
, tx
);
4266 zfs_log_write(zfsvfs
->z_log
, tx
, TX_WRITE
, zp
, off
, len
, 0);
4271 pvn_write_done(pp
, (err
? B_ERROR
: 0) | flags
);
4281 * Copy the portion of the file indicated from pages into the file.
4282 * The pages are stored in a page list attached to the files vnode.
4284 * IN: vp - vnode of file to push page data to.
4285 * off - position in file to put data.
4286 * len - amount of data to write.
4287 * flags - flags to control the operation.
4288 * cr - credentials of caller.
4289 * ct - caller context.
4291 * RETURN: 0 on success, error code on failure.
4294 * vp - ctime|mtime updated
4298 zfs_putpage(vnode_t
*vp
, offset_t off
, size_t len
, int flags
, cred_t
*cr
,
4299 caller_context_t
*ct
)
4301 znode_t
*zp
= VTOZ(vp
);
4302 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
4314 * There's nothing to do if no data is cached.
4316 if (!vn_has_cached_data(vp
)) {
4322 * Align this request to the file block size in case we kluster.
4323 * XXX - this can result in pretty aggresive locking, which can
4324 * impact simultanious read/write access. One option might be
4325 * to break up long requests (len == 0) into block-by-block
4326 * operations to get narrower locking.
4328 blksz
= zp
->z_blksz
;
4330 io_off
= P2ALIGN_TYPED(off
, blksz
, uoff_t
);
4333 if (len
> 0 && ISP2(blksz
))
4334 io_len
= P2ROUNDUP_TYPED(len
+ (off
- io_off
), blksz
, size_t);
4340 * Search the entire vp list for pages >= io_off.
4342 rl
= zfs_range_lock(zp
, io_off
, UINT64_MAX
, RL_WRITER
);
4343 error
= pvn_vplist_dirty(vp
, io_off
, zfs_putapage
, flags
, cr
);
4346 rl
= zfs_range_lock(zp
, io_off
, io_len
, RL_WRITER
);
4348 if (off
> zp
->z_size
) {
4349 /* past end of file */
4350 zfs_range_unlock(rl
);
4355 len
= MIN(io_len
, P2ROUNDUP(zp
->z_size
, PAGESIZE
) - io_off
);
4357 for (off
= io_off
; io_off
< off
+ len
; io_off
+= io_len
) {
4358 if ((flags
& B_INVAL
) || ((flags
& B_ASYNC
) == 0)) {
4359 pp
= page_lookup(&vp
->v_object
, io_off
,
4360 (flags
& (B_INVAL
| B_FREE
)) ? SE_EXCL
: SE_SHARED
);
4362 pp
= page_lookup_nowait(&vp
->v_object
, io_off
,
4363 (flags
& B_FREE
) ? SE_EXCL
: SE_SHARED
);
4366 if (pp
!= NULL
&& pvn_getdirty(pp
, flags
)) {
4370 * Found a dirty page to push
4372 err
= zfs_putapage(vp
, pp
, &io_off
, &io_len
, flags
, cr
);
4380 zfs_range_unlock(rl
);
4381 if ((flags
& B_ASYNC
) == 0 || zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
4382 zil_commit(zfsvfs
->z_log
, zp
->z_id
);
4389 zfs_inactive(vnode_t
*vp
, cred_t
*cr
, caller_context_t
*ct
)
4391 znode_t
*zp
= VTOZ(vp
);
4392 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
4395 rw_enter(&zfsvfs
->z_teardown_inactive_lock
, RW_READER
);
4396 if (zp
->z_sa_hdl
== NULL
) {
4398 * The fs has been unmounted, or we did a
4399 * suspend/resume and this file no longer exists.
4401 if (vn_has_cached_data(vp
)) {
4402 (void) pvn_vplist_dirty(vp
, 0, zfs_null_putapage
,
4406 mutex_enter(&zp
->z_lock
);
4407 mutex_enter(&vp
->v_lock
);
4408 ASSERT(vp
->v_count
== 1);
4410 mutex_exit(&vp
->v_lock
);
4411 mutex_exit(&zp
->z_lock
);
4412 rw_exit(&zfsvfs
->z_teardown_inactive_lock
);
4418 * Attempt to push any data in the page cache. If this fails
4419 * we will get kicked out later in zfs_zinactive().
4421 if (vn_has_cached_data(vp
)) {
4422 (void) pvn_vplist_dirty(vp
, 0, zfs_putapage
, B_INVAL
|B_ASYNC
,
4426 if (zp
->z_atime_dirty
&& zp
->z_unlinked
== 0) {
4427 dmu_tx_t
*tx
= dmu_tx_create(zfsvfs
->z_os
);
4429 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
4430 zfs_sa_upgrade_txholds(tx
, zp
);
4431 error
= dmu_tx_assign(tx
, TXG_WAIT
);
4435 mutex_enter(&zp
->z_lock
);
4436 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_ATIME(zfsvfs
),
4437 (void *)&zp
->z_atime
, sizeof (zp
->z_atime
), tx
);
4438 zp
->z_atime_dirty
= 0;
4439 mutex_exit(&zp
->z_lock
);
4445 rw_exit(&zfsvfs
->z_teardown_inactive_lock
);
4449 * Bounds-check the seek operation.
4451 * IN: vp - vnode seeking within
4452 * ooff - old file offset
4453 * noffp - pointer to new file offset
4454 * ct - caller context
4456 * RETURN: 0 on success, EINVAL if new offset invalid.
4460 zfs_seek(vnode_t
*vp
, offset_t ooff
, offset_t
*noffp
,
4461 caller_context_t
*ct
)
4463 if (vp
->v_type
== VDIR
)
4465 return ((*noffp
< 0 || *noffp
> MAXOFFSET_T
) ? EINVAL
: 0);
4469 * Pre-filter the generic locking function to trap attempts to place
4470 * a mandatory lock on a memory mapped file.
4473 zfs_frlock(vnode_t
*vp
, int cmd
, flock64_t
*bfp
, int flag
, offset_t offset
,
4474 flk_callback_t
*flk_cbp
, cred_t
*cr
, caller_context_t
*ct
)
4476 znode_t
*zp
= VTOZ(vp
);
4477 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
4483 * We are following the UFS semantics with respect to mapcnt
4484 * here: If we see that the file is mapped already, then we will
4485 * return an error, but we don't worry about races between this
4486 * function and zfs_map().
4488 if (zp
->z_mapcnt
> 0 && MANDMODE(zp
->z_mode
)) {
4490 return (SET_ERROR(EAGAIN
));
4493 return (fs_frlock(vp
, cmd
, bfp
, flag
, offset
, flk_cbp
, cr
, ct
));
4497 * If we can't find a page in the cache, we will create a new page
4498 * and fill it with file data. For efficiency, we may try to fill
4499 * multiple pages at once (klustering) to fill up the supplied page
4500 * list. Note that the pages to be filled are held with an exclusive
4501 * lock to prevent access by other threads while they are being filled.
4504 zfs_fillpage(vnode_t
*vp
, uoff_t off
, struct seg
*seg
,
4505 caddr_t addr
, page_t
*pl
[], size_t plsz
, enum seg_rw rw
)
4507 znode_t
*zp
= VTOZ(vp
);
4508 page_t
*pp
, *cur_pp
;
4509 objset_t
*os
= zp
->z_zfsvfs
->z_os
;
4510 uoff_t io_off
, total
;
4514 if (plsz
== PAGESIZE
|| zp
->z_blksz
<= PAGESIZE
) {
4516 * We only have a single page, don't bother klustering
4520 pp
= page_create_va(&vp
->v_object
, io_off
, io_len
,
4521 PG_EXCL
| PG_WAIT
, seg
, addr
);
4524 * Try to find enough pages to fill the page list
4526 pp
= pvn_read_kluster(vp
, off
, seg
, addr
, &io_off
,
4527 &io_len
, off
, plsz
, 0);
4531 * The page already exists, nothing to do here.
4538 * Fill the pages in the kluster.
4541 for (total
= io_off
+ io_len
; io_off
< total
; io_off
+= PAGESIZE
) {
4544 ASSERT3U(io_off
, ==, cur_pp
->p_offset
);
4545 va
= zfs_map_page(cur_pp
, S_WRITE
);
4546 err
= dmu_read(os
, zp
->z_id
, io_off
, PAGESIZE
, va
,
4548 zfs_unmap_page(cur_pp
, va
);
4550 /* On error, toss the entire kluster */
4551 pvn_read_done(pp
, B_ERROR
);
4552 /* convert checksum errors into IO errors */
4554 err
= SET_ERROR(EIO
);
4557 cur_pp
= cur_pp
->p_next
;
4561 * Fill in the page list array from the kluster starting
4562 * from the desired offset `off'.
4563 * NOTE: the page list will always be null terminated.
4565 pvn_plist_init(pp
, pl
, plsz
, off
, io_len
, rw
);
4566 ASSERT(pl
== NULL
|| (*pl
)->p_offset
== off
);
4572 * Return pointers to the pages for the file region [off, off + len]
4573 * in the pl array. If plsz is greater than len, this function may
4574 * also return page pointers from after the specified region
4575 * (i.e. the region [off, off + plsz]). These additional pages are
4576 * only returned if they are already in the cache, or were created as
4577 * part of a klustered read.
4579 * IN: vp - vnode of file to get data from.
4580 * off - position in file to get data from.
4581 * len - amount of data to retrieve.
4582 * plsz - length of provided page list.
4583 * seg - segment to obtain pages for.
4584 * addr - virtual address of fault.
4585 * rw - mode of created pages.
4586 * cr - credentials of caller.
4587 * ct - caller context.
4589 * OUT: protp - protection mode of created pages.
4590 * pl - list of pages created.
4592 * RETURN: 0 on success, error code on failure.
4595 * vp - atime updated
4599 zfs_getpage(vnode_t
*vp
, offset_t off
, size_t len
, uint_t
*protp
,
4600 page_t
*pl
[], size_t plsz
, struct seg
*seg
, caddr_t addr
,
4601 enum seg_rw rw
, cred_t
*cr
, caller_context_t
*ct
)
4603 znode_t
*zp
= VTOZ(vp
);
4604 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
4608 /* we do our own caching, faultahead is unnecessary */
4611 else if (len
> plsz
)
4614 len
= P2ROUNDUP(len
, PAGESIZE
);
4615 ASSERT(plsz
>= len
);
4624 * Loop through the requested range [off, off + len) looking
4625 * for pages. If we don't find a page, we will need to create
4626 * a new page and fill it with data from the file.
4629 if (*pl
= page_lookup(&vp
->v_object
, off
, SE_SHARED
))
4631 else if (err
= zfs_fillpage(vp
, off
, seg
, addr
, pl
, plsz
, rw
))
4634 ASSERT3U((*pl
)->p_offset
, ==, off
);
4638 ASSERT3U(len
, >=, PAGESIZE
);
4641 ASSERT3U(plsz
, >=, PAGESIZE
);
4648 * Fill out the page array with any pages already in the cache.
4651 (*pl
++ = page_lookup_nowait(&vp
->v_object
, off
, SE_SHARED
))) {
4658 * Release any pages we have previously locked.
4663 ZFS_ACCESSTIME_STAMP(zfsvfs
, zp
);
4673 * Request a memory map for a section of a file. This code interacts
4674 * with common code and the VM system as follows:
4676 * - common code calls mmap(), which ends up in smmap_common()
4677 * - this calls fop_map(), which takes you into (say) zfs
4678 * - zfs_map() calls as_map(), passing segvn_create() as the callback
4679 * - segvn_create() creates the new segment and calls fop_addmap()
4680 * - zfs_addmap() updates z_mapcnt
4684 zfs_map(vnode_t
*vp
, offset_t off
, struct as
*as
, caddr_t
*addrp
,
4685 size_t len
, uchar_t prot
, uchar_t maxprot
, uint_t flags
, cred_t
*cr
,
4686 caller_context_t
*ct
)
4688 znode_t
*zp
= VTOZ(vp
);
4689 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
4690 segvn_crargs_t vn_a
;
4697 * Note: ZFS_READONLY is handled in zfs_zaccess_common.
4700 if ((prot
& PROT_WRITE
) && (zp
->z_pflags
&
4701 (ZFS_IMMUTABLE
| ZFS_APPENDONLY
))) {
4703 return (SET_ERROR(EPERM
));
4706 if ((prot
& (PROT_READ
| PROT_EXEC
)) &&
4707 (zp
->z_pflags
& ZFS_AV_QUARANTINED
)) {
4709 return (SET_ERROR(EACCES
));
4712 if (vp
->v_flag
& VNOMAP
) {
4714 return (SET_ERROR(ENOSYS
));
4717 if (off
< 0 || len
> MAXOFFSET_T
- off
) {
4719 return (SET_ERROR(ENXIO
));
4722 if (vp
->v_type
!= VREG
) {
4724 return (SET_ERROR(ENODEV
));
4728 * If file is locked, disallow mapping.
4730 if (MANDMODE(zp
->z_mode
) && vn_has_flocks(vp
)) {
4732 return (SET_ERROR(EAGAIN
));
4736 error
= choose_addr(as
, addrp
, len
, off
, ADDR_VACALIGN
, flags
);
4744 vn_a
.offset
= (uoff_t
)off
;
4745 vn_a
.type
= flags
& MAP_TYPE
;
4747 vn_a
.maxprot
= maxprot
;
4750 vn_a
.flags
= flags
& ~MAP_TYPE
;
4752 vn_a
.lgrp_mem_policy_flags
= 0;
4754 error
= as_map(as
, *addrp
, len
, segvn_create
, &vn_a
);
4763 zfs_addmap(vnode_t
*vp
, offset_t off
, struct as
*as
, caddr_t addr
,
4764 size_t len
, uchar_t prot
, uchar_t maxprot
, uint_t flags
, cred_t
*cr
,
4765 caller_context_t
*ct
)
4767 uint64_t pages
= btopr(len
);
4769 atomic_add_64(&VTOZ(vp
)->z_mapcnt
, pages
);
4774 * The reason we push dirty pages as part of zfs_delmap() is so that we get a
4775 * more accurate mtime for the associated file. Since we don't have a way of
4776 * detecting when the data was actually modified, we have to resort to
4777 * heuristics. If an explicit msync() is done, then we mark the mtime when the
4778 * last page is pushed. The problem occurs when the msync() call is omitted,
4779 * which by far the most common case:
4787 * putpage() via fsflush
4789 * If we wait until fsflush to come along, we can have a modification time that
4790 * is some arbitrary point in the future. In order to prevent this in the
4791 * common case, we flush pages whenever a (MAP_SHARED, PROT_WRITE) mapping is
4796 zfs_delmap(vnode_t
*vp
, offset_t off
, struct as
*as
, caddr_t addr
,
4797 size_t len
, uint_t prot
, uint_t maxprot
, uint_t flags
, cred_t
*cr
,
4798 caller_context_t
*ct
)
4800 uint64_t pages
= btopr(len
);
4802 ASSERT3U(VTOZ(vp
)->z_mapcnt
, >=, pages
);
4803 atomic_add_64(&VTOZ(vp
)->z_mapcnt
, -pages
);
4805 if ((flags
& MAP_SHARED
) && (prot
& PROT_WRITE
) &&
4806 vn_has_cached_data(vp
))
4807 (void) fop_putpage(vp
, off
, len
, B_ASYNC
, cr
, ct
);
4813 * Free or allocate space in a file. Currently, this function only
4814 * supports the `F_FREESP' command. However, this command is somewhat
4815 * misnamed, as its functionality includes the ability to allocate as
4816 * well as free space.
4818 * IN: vp - vnode of file to free data in.
4819 * cmd - action to take (only F_FREESP supported).
4820 * bfp - section of file to free/alloc.
4821 * flag - current file open mode flags.
4822 * offset - current file offset.
4823 * cr - credentials of caller [UNUSED].
4824 * ct - caller context.
4826 * RETURN: 0 on success, error code on failure.
4829 * vp - ctime|mtime updated
4833 zfs_space(vnode_t
*vp
, int cmd
, flock64_t
*bfp
, int flag
,
4834 offset_t offset
, cred_t
*cr
, caller_context_t
*ct
)
4836 znode_t
*zp
= VTOZ(vp
);
4837 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
4844 if (cmd
!= F_FREESP
) {
4846 return (SET_ERROR(EINVAL
));
4850 * In a case vp->v_vfsp != zp->z_zfsvfs->z_vfs (e.g. snapshots) our
4851 * callers might not be able to detect properly that we are read-only,
4852 * so check it explicitly here.
4854 if (zfsvfs
->z_vfs
->vfs_flag
& VFS_RDONLY
) {
4856 return (SET_ERROR(EROFS
));
4859 if (error
= convoff(vp
, bfp
, 0, offset
)) {
4864 if (bfp
->l_len
< 0) {
4866 return (SET_ERROR(EINVAL
));
4870 len
= bfp
->l_len
; /* 0 means from off to end of file */
4872 error
= zfs_freesp(zp
, off
, len
, flag
, TRUE
);
4874 if (error
== 0 && off
== 0 && len
== 0)
4875 vnevent_truncate(ZTOV(zp
), ct
);
4883 zfs_fid(vnode_t
*vp
, fid_t
*fidp
, caller_context_t
*ct
)
4885 znode_t
*zp
= VTOZ(vp
);
4886 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
4889 uint64_t object
= zp
->z_id
;
4896 if ((error
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_GEN(zfsvfs
),
4897 &gen64
, sizeof (uint64_t))) != 0) {
4902 gen
= (uint32_t)gen64
;
4904 size
= (zfsvfs
->z_parent
!= zfsvfs
) ? LONG_FID_LEN
: SHORT_FID_LEN
;
4905 if (fidp
->fid_len
< size
) {
4906 fidp
->fid_len
= size
;
4908 return (SET_ERROR(ENOSPC
));
4911 zfid
= (zfid_short_t
*)fidp
;
4913 zfid
->zf_len
= size
;
4915 for (i
= 0; i
< sizeof (zfid
->zf_object
); i
++)
4916 zfid
->zf_object
[i
] = (uint8_t)(object
>> (8 * i
));
4918 /* Must have a non-zero generation number to distinguish from .zfs */
4921 for (i
= 0; i
< sizeof (zfid
->zf_gen
); i
++)
4922 zfid
->zf_gen
[i
] = (uint8_t)(gen
>> (8 * i
));
4924 if (size
== LONG_FID_LEN
) {
4925 uint64_t objsetid
= dmu_objset_id(zfsvfs
->z_os
);
4928 zlfid
= (zfid_long_t
*)fidp
;
4930 for (i
= 0; i
< sizeof (zlfid
->zf_setid
); i
++)
4931 zlfid
->zf_setid
[i
] = (uint8_t)(objsetid
>> (8 * i
));
4933 /* XXX - this should be the generation number for the objset */
4934 for (i
= 0; i
< sizeof (zlfid
->zf_setgen
); i
++)
4935 zlfid
->zf_setgen
[i
] = 0;
4943 zfs_pathconf(vnode_t
*vp
, int cmd
, ulong_t
*valp
, cred_t
*cr
,
4944 caller_context_t
*ct
)
4956 case _PC_FILESIZEBITS
:
4960 case _PC_XATTR_EXISTS
:
4962 zfsvfs
= zp
->z_zfsvfs
;
4966 error
= zfs_dirent_lock(&dl
, zp
, "", &xzp
,
4967 ZXATTR
| ZEXISTS
| ZSHARED
, NULL
, NULL
);
4969 zfs_dirent_unlock(dl
);
4970 if (!zfs_dirempty(xzp
))
4973 } else if (error
== ENOENT
) {
4975 * If there aren't extended attributes, it's the
4976 * same as having zero of them.
4983 case _PC_SATTR_ENABLED
:
4984 case _PC_SATTR_EXISTS
:
4985 *valp
= vfs_has_feature(vp
->v_vfsp
, VFSFT_SYSATTR_VIEWS
) &&
4986 (vp
->v_type
== VREG
|| vp
->v_type
== VDIR
);
4989 case _PC_ACCESS_FILTERING
:
4990 *valp
= vfs_has_feature(vp
->v_vfsp
, VFSFT_ACCESS_FILTER
) &&
4994 case _PC_ACL_ENABLED
:
4995 *valp
= _ACL_ACE_ENABLED
;
4998 case _PC_MIN_HOLE_SIZE
:
4999 *valp
= (ulong_t
)SPA_MINBLOCKSIZE
;
5002 case _PC_TIMESTAMP_RESOLUTION
:
5003 /* nanosecond timestamp resolution */
5008 return (fs_pathconf(vp
, cmd
, valp
, cr
, ct
));
5014 zfs_getsecattr(vnode_t
*vp
, vsecattr_t
*vsecp
, int flag
, cred_t
*cr
,
5015 caller_context_t
*ct
)
5017 znode_t
*zp
= VTOZ(vp
);
5018 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
5020 boolean_t skipaclchk
= (flag
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
5024 error
= zfs_getacl(zp
, vsecp
, skipaclchk
, cr
);
5032 zfs_setsecattr(vnode_t
*vp
, vsecattr_t
*vsecp
, int flag
, cred_t
*cr
,
5033 caller_context_t
*ct
)
5035 znode_t
*zp
= VTOZ(vp
);
5036 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
5038 boolean_t skipaclchk
= (flag
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
5039 zilog_t
*zilog
= zfsvfs
->z_log
;
5044 error
= zfs_setacl(zp
, vsecp
, skipaclchk
, cr
);
5046 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
5047 zil_commit(zilog
, 0);
5054 * The smallest read we may consider to loan out an arcbuf.
5055 * This must be a power of 2.
5057 int zcr_blksz_min
= (1 << 10); /* 1K */
5059 * If set to less than the file block size, allow loaning out of an
5060 * arcbuf for a partial block read. This must be a power of 2.
5062 int zcr_blksz_max
= (1 << 17); /* 128K */
5066 zfs_reqzcbuf(vnode_t
*vp
, enum uio_rw ioflag
, xuio_t
*xuio
, cred_t
*cr
,
5067 caller_context_t
*ct
)
5069 znode_t
*zp
= VTOZ(vp
);
5070 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
5071 int max_blksz
= zfsvfs
->z_max_blksz
;
5072 uio_t
*uio
= &xuio
->xu_uio
;
5073 ssize_t size
= uio
->uio_resid
;
5074 offset_t offset
= uio
->uio_loffset
;
5079 int preamble
, postamble
;
5081 if (xuio
->xu_type
!= UIOTYPE_ZEROCOPY
)
5082 return (SET_ERROR(EINVAL
));
5089 * Loan out an arc_buf for write if write size is bigger than
5090 * max_blksz, and the file's block size is also max_blksz.
5093 if (size
< blksz
|| zp
->z_blksz
!= blksz
) {
5095 return (SET_ERROR(EINVAL
));
5098 * Caller requests buffers for write before knowing where the
5099 * write offset might be (e.g. NFS TCP write).
5104 preamble
= P2PHASE(offset
, blksz
);
5106 preamble
= blksz
- preamble
;
5111 postamble
= P2PHASE(size
, blksz
);
5114 fullblk
= size
/ blksz
;
5115 (void) dmu_xuio_init(xuio
,
5116 (preamble
!= 0) + fullblk
+ (postamble
!= 0));
5117 DTRACE_PROBE3(zfs_reqzcbuf_align
, int, preamble
,
5118 int, postamble
, int,
5119 (preamble
!= 0) + fullblk
+ (postamble
!= 0));
5122 * Have to fix iov base/len for partial buffers. They
5123 * currently represent full arc_buf's.
5126 /* data begins in the middle of the arc_buf */
5127 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
5130 (void) dmu_xuio_add(xuio
, abuf
,
5131 blksz
- preamble
, preamble
);
5134 for (i
= 0; i
< fullblk
; i
++) {
5135 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
5138 (void) dmu_xuio_add(xuio
, abuf
, 0, blksz
);
5142 /* data ends in the middle of the arc_buf */
5143 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
5146 (void) dmu_xuio_add(xuio
, abuf
, 0, postamble
);
5151 * Loan out an arc_buf for read if the read size is larger than
5152 * the current file block size. Block alignment is not
5153 * considered. Partial arc_buf will be loaned out for read.
5155 blksz
= zp
->z_blksz
;
5156 if (blksz
< zcr_blksz_min
)
5157 blksz
= zcr_blksz_min
;
5158 if (blksz
> zcr_blksz_max
)
5159 blksz
= zcr_blksz_max
;
5160 /* avoid potential complexity of dealing with it */
5161 if (blksz
> max_blksz
) {
5163 return (SET_ERROR(EINVAL
));
5166 maxsize
= zp
->z_size
- uio
->uio_loffset
;
5170 if (size
< blksz
|| vn_has_cached_data(vp
)) {
5172 return (SET_ERROR(EINVAL
));
5177 return (SET_ERROR(EINVAL
));
5180 uio
->uio_extflg
= UIO_XUIO
;
5181 XUIO_XUZC_RW(xuio
) = ioflag
;
5188 zfs_retzcbuf(vnode_t
*vp
, xuio_t
*xuio
, cred_t
*cr
, caller_context_t
*ct
)
5192 int ioflag
= XUIO_XUZC_RW(xuio
);
5194 ASSERT(xuio
->xu_type
== UIOTYPE_ZEROCOPY
);
5196 i
= dmu_xuio_cnt(xuio
);
5198 abuf
= dmu_xuio_arcbuf(xuio
, i
);
5200 * if abuf == NULL, it must be a write buffer
5201 * that has been returned in zfs_write().
5204 dmu_return_arcbuf(abuf
);
5205 ASSERT(abuf
|| ioflag
== UIO_WRITE
);
5208 dmu_xuio_fini(xuio
);
5213 * Predeclare these here so that the compiler assumes that
5214 * this is an "old style" function declaration that does
5215 * not include arguments => we won't get type mismatch errors
5216 * in the initializations that follow.
5218 static int zfs_inval();
5219 static int zfs_isdir();
5224 return (SET_ERROR(EINVAL
));
5230 return (SET_ERROR(EISDIR
));
5234 * Directory vnode operations
5236 const struct vnodeops zfs_dvnodeops
= {
5238 .vop_open
= zfs_open
,
5239 .vop_close
= zfs_close
,
5240 .vop_read
= zfs_isdir
,
5241 .vop_write
= zfs_isdir
,
5242 .vop_ioctl
= zfs_ioctl
,
5243 .vop_getattr
= zfs_getattr
,
5244 .vop_setattr
= zfs_setattr
,
5245 .vop_access
= zfs_access
,
5246 .vop_lookup
= zfs_lookup
,
5247 .vop_create
= zfs_create
,
5248 .vop_remove
= zfs_remove
,
5249 .vop_link
= zfs_link
,
5250 .vop_rename
= zfs_rename
,
5251 .vop_mkdir
= zfs_mkdir
,
5252 .vop_rmdir
= zfs_rmdir
,
5253 .vop_readdir
= zfs_readdir
,
5254 .vop_symlink
= zfs_symlink
,
5255 .vop_fsync
= zfs_fsync
,
5256 .vop_inactive
= zfs_inactive
,
5258 .vop_seek
= zfs_seek
,
5259 .vop_pathconf
= zfs_pathconf
,
5260 .vop_getsecattr
= zfs_getsecattr
,
5261 .vop_setsecattr
= zfs_setsecattr
,
5262 .vop_vnevent
= fs_vnevent_support
,
5266 * Regular file vnode operations
5268 const struct vnodeops zfs_fvnodeops
= {
5270 .vop_open
= zfs_open
,
5271 .vop_close
= zfs_close
,
5272 .vop_read
= zfs_read
,
5273 .vop_write
= zfs_write
,
5274 .vop_ioctl
= zfs_ioctl
,
5275 .vop_getattr
= zfs_getattr
,
5276 .vop_setattr
= zfs_setattr
,
5277 .vop_access
= zfs_access
,
5278 .vop_lookup
= zfs_lookup
,
5279 .vop_rename
= zfs_rename
,
5280 .vop_fsync
= zfs_fsync
,
5281 .vop_inactive
= zfs_inactive
,
5283 .vop_seek
= zfs_seek
,
5284 .vop_frlock
= zfs_frlock
,
5285 .vop_space
= zfs_space
,
5286 .vop_getpage
= zfs_getpage
,
5287 .vop_putpage
= zfs_putpage
,
5289 .vop_addmap
= zfs_addmap
,
5290 .vop_delmap
= zfs_delmap
,
5291 .vop_pathconf
= zfs_pathconf
,
5292 .vop_getsecattr
= zfs_getsecattr
,
5293 .vop_setsecattr
= zfs_setsecattr
,
5294 .vop_vnevent
= fs_vnevent_support
,
5295 .vop_reqzcbuf
= zfs_reqzcbuf
,
5296 .vop_retzcbuf
= zfs_retzcbuf
,
5300 * Symbolic link vnode operations
5302 const struct vnodeops zfs_symvnodeops
= {
5304 .vop_getattr
= zfs_getattr
,
5305 .vop_setattr
= zfs_setattr
,
5306 .vop_access
= zfs_access
,
5307 .vop_rename
= zfs_rename
,
5308 .vop_readlink
= zfs_readlink
,
5309 .vop_inactive
= zfs_inactive
,
5311 .vop_pathconf
= zfs_pathconf
,
5312 .vop_vnevent
= fs_vnevent_support
,
5316 * special share hidden files vnode operations
5318 const struct vnodeops zfs_sharevnodeops
= {
5320 .vop_getattr
= zfs_getattr
,
5321 .vop_access
= zfs_access
,
5322 .vop_inactive
= zfs_inactive
,
5324 .vop_pathconf
= zfs_pathconf
,
5325 .vop_getsecattr
= zfs_getsecattr
,
5326 .vop_setsecattr
= zfs_setsecattr
,
5327 .vop_vnevent
= fs_vnevent_support
,
5331 * Extended attribute directory vnode operations
5333 * These ops are identical to the directory vnode
5334 * operations except for restricted operations:
5338 * Note that there are other restrictions embedded in:
5339 * zfs_create() - restrict type to VREG
5340 * zfs_link() - no links into/out of attribute space
5341 * zfs_rename() - no moves into/out of attribute space
5343 const struct vnodeops zfs_xdvnodeops
= {
5345 .vop_open
= zfs_open
,
5346 .vop_close
= zfs_close
,
5347 .vop_ioctl
= zfs_ioctl
,
5348 .vop_getattr
= zfs_getattr
,
5349 .vop_setattr
= zfs_setattr
,
5350 .vop_access
= zfs_access
,
5351 .vop_lookup
= zfs_lookup
,
5352 .vop_create
= zfs_create
,
5353 .vop_remove
= zfs_remove
,
5354 .vop_link
= zfs_link
,
5355 .vop_rename
= zfs_rename
,
5356 .vop_mkdir
= zfs_inval
,
5357 .vop_rmdir
= zfs_rmdir
,
5358 .vop_readdir
= zfs_readdir
,
5359 .vop_symlink
= zfs_inval
,
5360 .vop_fsync
= zfs_fsync
,
5361 .vop_inactive
= zfs_inactive
,
5363 .vop_seek
= zfs_seek
,
5364 .vop_pathconf
= zfs_pathconf
,
5365 .vop_getsecattr
= zfs_getsecattr
,
5366 .vop_setsecattr
= zfs_setsecattr
,
5367 .vop_vnevent
= fs_vnevent_support
,
5371 * Error vnode operations
5373 const struct vnodeops zfs_evnodeops
= {
5375 .vop_inactive
= zfs_inactive
,
5376 .vop_pathconf
= zfs_pathconf
,