4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2015, Joyent, Inc. All rights reserved.
28 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
29 /* All Rights Reserved */
32 * Portions of this source code were derived from Berkeley 4.3 BSD
33 * under license from the Regents of the University of California.
36 #include <sys/param.h>
37 #include <sys/isa_defs.h>
38 #include <sys/types.h>
39 #include <sys/inttypes.h>
40 #include <sys/sysmacros.h>
43 #include <sys/systm.h>
44 #include <sys/errno.h>
45 #include <sys/vnode.h>
48 #include <sys/cpuvar.h>
50 #include <sys/debug.h>
52 #include <sys/nbmlock.h>
53 #include <sys/limits.h>
55 #define COPYOUT_MAX_CACHE (1<<17) /* 128K */
57 size_t copyout_max_cached
= COPYOUT_MAX_CACHE
; /* global so it's patchable */
60 * read, write, pread, pwrite, readv, and writev syscalls.
67 read(int fdes
, void *cbuf
, size_t count
)
74 int fflag
, ioflag
, rwflag
;
80 if ((cnt
= (ssize_t
)count
) < 0)
81 return (set_errno(EINVAL
));
82 if ((fp
= getf(fdes
)) == NULL
)
83 return (set_errno(EBADF
));
84 if (((fflag
= fp
->f_flag
) & FREAD
) == 0) {
90 if (vp
->v_type
== VREG
&& cnt
== 0) {
99 * We have to enter the critical region before calling fop_rwlock
100 * to avoid a deadlock with write() calls.
102 if (nbl_need_check(vp
)) {
105 nbl_start_crit(vp
, RW_READER
);
107 error
= nbl_svmand(vp
, fp
->f_cred
, &svmand
);
110 if (nbl_conflict(vp
, NBL_READ
, fp
->f_offset
, cnt
, svmand
,
117 (void) fop_rwlock(vp
, rwflag
, NULL
);
120 * We do the following checks inside fop_rwlock so as to
121 * prevent file size from changing while these checks are
122 * being done. Also, we load fp's offset to the local
123 * variable fileoff because we can have a parallel lseek
124 * going on (f_offset is not protected by any lock) which
125 * could change f_offset. We need to see the value only
126 * once here and take a decision. Seeing it more than once
127 * can lead to incorrect functionality.
130 fileoff
= (uoff_t
)fp
->f_offset
;
131 if (fileoff
>= OFFSET_MAX(fp
) && (vp
->v_type
== VREG
)) {
133 va
.va_mask
= VATTR_SIZE
;
134 if ((error
= fop_getattr(vp
, &va
, 0, fp
->f_cred
, NULL
))) {
135 fop_rwunlock(vp
, rwflag
, NULL
);
138 if (fileoff
>= va
.va_size
) {
140 fop_rwunlock(vp
, rwflag
, NULL
);
144 fop_rwunlock(vp
, rwflag
, NULL
);
148 if ((vp
->v_type
== VREG
) &&
149 (fileoff
+ cnt
> OFFSET_MAX(fp
))) {
150 cnt
= (ssize_t
)(OFFSET_MAX(fp
) - fileoff
);
152 auio
.uio_loffset
= fileoff
;
153 auio
.uio_iov
= &aiov
;
155 auio
.uio_resid
= bcount
= cnt
;
156 auio
.uio_segflg
= UIO_USERSPACE
;
157 auio
.uio_llimit
= MAXOFFSET_T
;
158 auio
.uio_fmode
= fflag
;
160 * Only use bypass caches when the count is large enough
162 if (bcount
<= copyout_max_cached
)
163 auio
.uio_extflg
= UIO_COPY_CACHED
;
165 auio
.uio_extflg
= UIO_COPY_DEFAULT
;
167 ioflag
= auio
.uio_fmode
& (FAPPEND
|FSYNC
|FDSYNC
|FRSYNC
);
169 /* If read sync is not asked for, filter sync flags */
170 if ((ioflag
& FRSYNC
) == 0)
171 ioflag
&= ~(FSYNC
|FDSYNC
);
172 error
= fop_read(vp
, &auio
, ioflag
, fp
->f_cred
, NULL
);
173 cnt
-= auio
.uio_resid
;
176 CPU_STATS_ADDQ(cp
, sys
, sysread
, 1);
177 CPU_STATS_ADDQ(cp
, sys
, readch
, (ulong_t
)cnt
);
179 ttolwp(curthread
)->lwp_ru
.ioch
+= (ulong_t
)cnt
;
181 if (vp
->v_type
== VFIFO
) /* Backward compatibility */
183 else if (((fp
->f_flag
& FAPPEND
) == 0) ||
184 (vp
->v_type
!= VREG
) || (bcount
!= 0)) /* POSIX */
185 fp
->f_offset
= auio
.uio_loffset
;
186 fop_rwunlock(vp
, rwflag
, NULL
);
188 if (error
== EINTR
&& cnt
!= 0)
195 return (set_errno(error
));
203 write(int fdes
, void *cbuf
, size_t count
)
208 register vnode_t
*vp
;
210 int fflag
, ioflag
, rwflag
;
216 if ((cnt
= (ssize_t
)count
) < 0)
217 return (set_errno(EINVAL
));
218 if ((fp
= getf(fdes
)) == NULL
)
219 return (set_errno(EBADF
));
220 if (((fflag
= fp
->f_flag
) & FWRITE
) == 0) {
226 if (vp
->v_type
== VREG
&& cnt
== 0) {
231 aiov
.iov_base
= cbuf
;
235 * We have to enter the critical region before calling fop_rwlock
236 * to avoid a deadlock with ufs.
238 if (nbl_need_check(vp
)) {
241 nbl_start_crit(vp
, RW_READER
);
243 error
= nbl_svmand(vp
, fp
->f_cred
, &svmand
);
246 if (nbl_conflict(vp
, NBL_WRITE
, fp
->f_offset
, cnt
, svmand
,
253 (void) fop_rwlock(vp
, rwflag
, NULL
);
255 fileoff
= fp
->f_offset
;
256 if (vp
->v_type
== VREG
) {
259 * We raise psignal if write for >0 bytes causes
260 * it to exceed the ulimit.
262 if (fileoff
>= curproc
->p_fsz_ctl
) {
263 fop_rwunlock(vp
, rwflag
, NULL
);
265 mutex_enter(&curproc
->p_lock
);
266 (void) rctl_action(rctlproc_legacy
[RLIMIT_FSIZE
],
267 curproc
->p_rctls
, curproc
, RCA_UNSAFE_SIGINFO
);
268 mutex_exit(&curproc
->p_lock
);
274 * We return EFBIG if write is done at an offset
275 * greater than the offset maximum for this file structure.
278 if (fileoff
>= OFFSET_MAX(fp
)) {
279 fop_rwunlock(vp
, rwflag
, NULL
);
284 * Limit the bytes to be written upto offset maximum for
285 * this open file structure.
287 if (fileoff
+ cnt
> OFFSET_MAX(fp
))
288 cnt
= (ssize_t
)(OFFSET_MAX(fp
) - fileoff
);
290 auio
.uio_loffset
= fileoff
;
291 auio
.uio_iov
= &aiov
;
293 auio
.uio_resid
= bcount
= cnt
;
294 auio
.uio_segflg
= UIO_USERSPACE
;
295 auio
.uio_llimit
= curproc
->p_fsz_ctl
;
296 auio
.uio_fmode
= fflag
;
297 auio
.uio_extflg
= UIO_COPY_DEFAULT
;
299 ioflag
= auio
.uio_fmode
& (FAPPEND
|FSYNC
|FDSYNC
|FRSYNC
);
301 error
= fop_write(vp
, &auio
, ioflag
, fp
->f_cred
, NULL
);
302 cnt
-= auio
.uio_resid
;
305 CPU_STATS_ADDQ(cp
, sys
, syswrite
, 1);
306 CPU_STATS_ADDQ(cp
, sys
, writech
, (ulong_t
)cnt
);
308 ttolwp(curthread
)->lwp_ru
.ioch
+= (ulong_t
)cnt
;
310 if (vp
->v_type
== VFIFO
) /* Backward compatibility */
312 else if (((fp
->f_flag
& FAPPEND
) == 0) ||
313 (vp
->v_type
!= VREG
) || (bcount
!= 0)) /* POSIX */
314 fp
->f_offset
= auio
.uio_loffset
;
315 fop_rwunlock(vp
, rwflag
, NULL
);
317 if (error
== EINTR
&& cnt
!= 0)
324 return (set_errno(error
));
329 pread(int fdes
, void *cbuf
, size_t count
, off_t offset
)
334 register vnode_t
*vp
;
336 int fflag
, ioflag
, rwflag
;
339 uoff_t fileoff
= (uoff_t
)(ulong_t
)offset
;
340 const uoff_t maxoff
= MAXOFFSET_T
;
343 if ((bcount
= (ssize_t
)count
) < 0)
344 return (set_errno(EINVAL
));
346 if ((fp
= getf(fdes
)) == NULL
)
347 return (set_errno(EBADF
));
348 if (((fflag
= fp
->f_flag
) & (FREAD
)) == 0) {
356 if (vp
->v_type
== VREG
) {
362 * Return EINVAL if an invalid offset comes to pread.
363 * Negative offset from user will cause this error.
366 if (fileoff
> maxoff
) {
371 * Limit offset such that we don't read or write
372 * a file beyond the maximum offset representable in
373 * an off_t structure.
375 if (fileoff
+ bcount
> maxoff
)
376 bcount
= (ssize_t
)((offset_t
)maxoff
- fileoff
);
377 } else if (vp
->v_type
== VFIFO
) {
383 * We have to enter the critical region before calling fop_rwlock
384 * to avoid a deadlock with ufs.
386 if (nbl_need_check(vp
)) {
389 nbl_start_crit(vp
, RW_READER
);
391 error
= nbl_svmand(vp
, fp
->f_cred
, &svmand
);
394 if (nbl_conflict(vp
, NBL_READ
, fileoff
, bcount
, svmand
,
401 aiov
.iov_base
= cbuf
;
402 aiov
.iov_len
= bcount
;
403 (void) fop_rwlock(vp
, rwflag
, NULL
);
404 if (vp
->v_type
== VREG
&& fileoff
== (uoff_t
)maxoff
) {
406 va
.va_mask
= VATTR_SIZE
;
407 if ((error
= fop_getattr(vp
, &va
, 0, fp
->f_cred
, NULL
))) {
408 fop_rwunlock(vp
, rwflag
, NULL
);
411 fop_rwunlock(vp
, rwflag
, NULL
);
414 * We have to return EOF if fileoff is >= file size.
416 if (fileoff
>= va
.va_size
) {
422 * File is greater than or equal to maxoff and therefore
423 * we return EOVERFLOW.
428 auio
.uio_loffset
= fileoff
;
429 auio
.uio_iov
= &aiov
;
431 auio
.uio_resid
= bcount
;
432 auio
.uio_segflg
= UIO_USERSPACE
;
433 auio
.uio_llimit
= MAXOFFSET_T
;
434 auio
.uio_fmode
= fflag
;
435 auio
.uio_extflg
= UIO_COPY_CACHED
;
437 ioflag
= auio
.uio_fmode
& (FAPPEND
|FSYNC
|FDSYNC
|FRSYNC
);
439 /* If read sync is not asked for, filter sync flags */
440 if ((ioflag
& FRSYNC
) == 0)
441 ioflag
&= ~(FSYNC
|FDSYNC
);
442 error
= fop_read(vp
, &auio
, ioflag
, fp
->f_cred
, NULL
);
443 bcount
-= auio
.uio_resid
;
446 CPU_STATS_ADDQ(cp
, sys
, sysread
, 1);
447 CPU_STATS_ADDQ(cp
, sys
, readch
, (ulong_t
)bcount
);
449 ttolwp(curthread
)->lwp_ru
.ioch
+= (ulong_t
)bcount
;
450 fop_rwunlock(vp
, rwflag
, NULL
);
452 if (error
== EINTR
&& bcount
!= 0)
459 return (set_errno(error
));
464 pwrite(int fdes
, void *cbuf
, size_t count
, off_t offset
)
469 register vnode_t
*vp
;
471 int fflag
, ioflag
, rwflag
;
474 uoff_t fileoff
= (uoff_t
)(ulong_t
)offset
;
475 uoff_t maxoff
= MAXOFFSET_T
;
478 if ((bcount
= (ssize_t
)count
) < 0)
479 return (set_errno(EINVAL
));
480 if ((fp
= getf(fdes
)) == NULL
)
481 return (set_errno(EBADF
));
482 if (((fflag
= fp
->f_flag
) & (FWRITE
)) == 0) {
490 if (vp
->v_type
== VREG
) {
496 * return EINVAL for offsets that cannot be
497 * represented in an off_t.
499 if (fileoff
> maxoff
) {
504 * Take appropriate action if we are trying to write above the
507 if (fileoff
>= curproc
->p_fsz_ctl
) {
508 mutex_enter(&curproc
->p_lock
);
509 (void) rctl_action(rctlproc_legacy
[RLIMIT_FSIZE
],
510 curproc
->p_rctls
, curproc
, RCA_UNSAFE_SIGINFO
);
511 mutex_exit(&curproc
->p_lock
);
517 * Don't allow pwrite to cause file sizes to exceed
520 if (fileoff
== maxoff
) {
524 if (fileoff
+ count
> maxoff
)
525 bcount
= (ssize_t
)((uoff_t
)maxoff
- fileoff
);
526 } else if (vp
->v_type
== VFIFO
) {
532 * We have to enter the critical region before calling fop_rwlock
533 * to avoid a deadlock with ufs.
535 if (nbl_need_check(vp
)) {
538 nbl_start_crit(vp
, RW_READER
);
540 error
= nbl_svmand(vp
, fp
->f_cred
, &svmand
);
543 if (nbl_conflict(vp
, NBL_WRITE
, fileoff
, bcount
, svmand
,
550 aiov
.iov_base
= cbuf
;
551 aiov
.iov_len
= bcount
;
552 (void) fop_rwlock(vp
, rwflag
, NULL
);
553 auio
.uio_loffset
= fileoff
;
554 auio
.uio_iov
= &aiov
;
556 auio
.uio_resid
= bcount
;
557 auio
.uio_segflg
= UIO_USERSPACE
;
558 auio
.uio_llimit
= curproc
->p_fsz_ctl
;
559 auio
.uio_fmode
= fflag
;
560 auio
.uio_extflg
= UIO_COPY_CACHED
;
563 * The SUSv4 POSIX specification states:
564 * The pwrite() function shall be equivalent to write(), except
565 * that it writes into a given position and does not change
566 * the file offset (regardless of whether O_APPEND is set).
567 * To make this be true, we omit the FAPPEND flag from ioflag.
569 ioflag
= auio
.uio_fmode
& (FSYNC
|FDSYNC
|FRSYNC
);
571 error
= fop_write(vp
, &auio
, ioflag
, fp
->f_cred
, NULL
);
572 bcount
-= auio
.uio_resid
;
575 CPU_STATS_ADDQ(cp
, sys
, syswrite
, 1);
576 CPU_STATS_ADDQ(cp
, sys
, writech
, (ulong_t
)bcount
);
578 ttolwp(curthread
)->lwp_ru
.ioch
+= (ulong_t
)bcount
;
579 fop_rwunlock(vp
, rwflag
, NULL
);
581 if (error
== EINTR
&& bcount
!= 0)
588 return (set_errno(error
));
593 readv(int fdes
, struct iovec
*iovp
, int iovcnt
)
596 struct iovec buf
[IOV_MAX_STACK
], *aiov
= buf
;
599 register vnode_t
*vp
;
601 int fflag
, ioflag
, rwflag
;
602 ssize_t count
, bcount
;
608 if (iovcnt
<= 0 || iovcnt
> IOV_MAX
)
609 return (set_errno(EINVAL
));
611 if (iovcnt
> IOV_MAX_STACK
) {
612 aiovlen
= iovcnt
* sizeof (iovec_t
);
613 aiov
= kmem_alloc(aiovlen
, KM_SLEEP
);
616 #ifdef _SYSCALL32_IMPL
618 * 32-bit callers need to have their iovec expanded,
619 * while ensuring that they can't move more than 2Gbytes
620 * of data in a single call.
622 if (get_udatamodel() == DATAMODEL_ILP32
) {
623 struct iovec32 buf32
[IOV_MAX_STACK
], *aiov32
= buf32
;
627 aiov32len
= iovcnt
* sizeof (iovec32_t
);
629 aiov32
= kmem_alloc(aiov32len
, KM_SLEEP
);
631 if (copyin(iovp
, aiov32
, aiov32len
)) {
633 kmem_free(aiov32
, aiov32len
);
634 kmem_free(aiov
, aiovlen
);
636 return (set_errno(EFAULT
));
640 for (i
= 0; i
< iovcnt
; i
++) {
641 ssize32_t iovlen32
= aiov32
[i
].iov_len
;
643 if (iovlen32
< 0 || count32
< 0) {
645 kmem_free(aiov32
, aiov32len
);
646 kmem_free(aiov
, aiovlen
);
648 return (set_errno(EINVAL
));
650 aiov
[i
].iov_len
= iovlen32
;
652 (caddr_t
)(uintptr_t)aiov32
[i
].iov_base
;
656 kmem_free(aiov32
, aiov32len
);
659 if (copyin(iovp
, aiov
, iovcnt
* sizeof (iovec_t
))) {
661 kmem_free(aiov
, aiovlen
);
662 return (set_errno(EFAULT
));
666 for (i
= 0; i
< iovcnt
; i
++) {
667 ssize_t iovlen
= aiov
[i
].iov_len
;
669 if (iovlen
< 0 || count
< 0) {
671 kmem_free(aiov
, aiovlen
);
672 return (set_errno(EINVAL
));
675 if ((fp
= getf(fdes
)) == NULL
) {
677 kmem_free(aiov
, aiovlen
);
678 return (set_errno(EBADF
));
680 if (((fflag
= fp
->f_flag
) & FREAD
) == 0) {
685 if (vp
->v_type
== VREG
&& count
== 0) {
692 * We have to enter the critical region before calling fop_rwlock
693 * to avoid a deadlock with ufs.
695 if (nbl_need_check(vp
)) {
698 nbl_start_crit(vp
, RW_READER
);
700 error
= nbl_svmand(vp
, fp
->f_cred
, &svmand
);
703 if (nbl_conflict(vp
, NBL_READ
, fp
->f_offset
, count
, svmand
,
710 (void) fop_rwlock(vp
, rwflag
, NULL
);
711 fileoff
= fp
->f_offset
;
714 * Behaviour is same as read. Please see comments in read.
717 if ((vp
->v_type
== VREG
) && (fileoff
>= OFFSET_MAX(fp
))) {
719 va
.va_mask
= VATTR_SIZE
;
720 if ((error
= fop_getattr(vp
, &va
, 0, fp
->f_cred
, NULL
))) {
721 fop_rwunlock(vp
, rwflag
, NULL
);
724 if (fileoff
>= va
.va_size
) {
725 fop_rwunlock(vp
, rwflag
, NULL
);
729 fop_rwunlock(vp
, rwflag
, NULL
);
734 if ((vp
->v_type
== VREG
) && (fileoff
+ count
> OFFSET_MAX(fp
))) {
735 count
= (ssize_t
)(OFFSET_MAX(fp
) - fileoff
);
737 auio
.uio_loffset
= fileoff
;
739 auio
.uio_iovcnt
= iovcnt
;
740 auio
.uio_resid
= bcount
= count
;
741 auio
.uio_segflg
= UIO_USERSPACE
;
742 auio
.uio_llimit
= MAXOFFSET_T
;
743 auio
.uio_fmode
= fflag
;
744 if (bcount
<= copyout_max_cached
)
745 auio
.uio_extflg
= UIO_COPY_CACHED
;
747 auio
.uio_extflg
= UIO_COPY_DEFAULT
;
750 ioflag
= auio
.uio_fmode
& (FAPPEND
|FSYNC
|FDSYNC
|FRSYNC
);
752 /* If read sync is not asked for, filter sync flags */
753 if ((ioflag
& FRSYNC
) == 0)
754 ioflag
&= ~(FSYNC
|FDSYNC
);
755 error
= fop_read(vp
, &auio
, ioflag
, fp
->f_cred
, NULL
);
756 count
-= auio
.uio_resid
;
759 CPU_STATS_ADDQ(cp
, sys
, sysread
, 1);
760 CPU_STATS_ADDQ(cp
, sys
, readch
, (ulong_t
)count
);
762 ttolwp(curthread
)->lwp_ru
.ioch
+= (ulong_t
)count
;
764 if (vp
->v_type
== VFIFO
) /* Backward compatibility */
765 fp
->f_offset
= count
;
766 else if (((fp
->f_flag
& FAPPEND
) == 0) ||
767 (vp
->v_type
!= VREG
) || (bcount
!= 0)) /* POSIX */
768 fp
->f_offset
= auio
.uio_loffset
;
770 fop_rwunlock(vp
, rwflag
, NULL
);
772 if (error
== EINTR
&& count
!= 0)
779 kmem_free(aiov
, aiovlen
);
781 return (set_errno(error
));
786 writev(int fdes
, struct iovec
*iovp
, int iovcnt
)
789 struct iovec buf
[IOV_MAX_STACK
], *aiov
= buf
;
792 register vnode_t
*vp
;
794 int fflag
, ioflag
, rwflag
;
795 ssize_t count
, bcount
;
801 if (iovcnt
<= 0 || iovcnt
> IOV_MAX
)
802 return (set_errno(EINVAL
));
804 if (iovcnt
> IOV_MAX_STACK
) {
805 aiovlen
= iovcnt
* sizeof (iovec_t
);
806 aiov
= kmem_alloc(aiovlen
, KM_SLEEP
);
809 #ifdef _SYSCALL32_IMPL
811 * 32-bit callers need to have their iovec expanded,
812 * while ensuring that they can't move more than 2Gbytes
813 * of data in a single call.
815 if (get_udatamodel() == DATAMODEL_ILP32
) {
816 struct iovec32 buf32
[IOV_MAX_STACK
], *aiov32
= buf32
;
820 aiov32len
= iovcnt
* sizeof (iovec32_t
);
822 aiov32
= kmem_alloc(aiov32len
, KM_SLEEP
);
824 if (copyin(iovp
, aiov32
, aiov32len
)) {
826 kmem_free(aiov32
, aiov32len
);
827 kmem_free(aiov
, aiovlen
);
829 return (set_errno(EFAULT
));
833 for (i
= 0; i
< iovcnt
; i
++) {
834 ssize32_t iovlen
= aiov32
[i
].iov_len
;
836 if (iovlen
< 0 || count32
< 0) {
838 kmem_free(aiov32
, aiov32len
);
839 kmem_free(aiov
, aiovlen
);
841 return (set_errno(EINVAL
));
843 aiov
[i
].iov_len
= iovlen
;
845 (caddr_t
)(uintptr_t)aiov32
[i
].iov_base
;
848 kmem_free(aiov32
, aiov32len
);
851 if (copyin(iovp
, aiov
, iovcnt
* sizeof (iovec_t
))) {
853 kmem_free(aiov
, aiovlen
);
854 return (set_errno(EFAULT
));
858 for (i
= 0; i
< iovcnt
; i
++) {
859 ssize_t iovlen
= aiov
[i
].iov_len
;
861 if (iovlen
< 0 || count
< 0) {
863 kmem_free(aiov
, aiovlen
);
864 return (set_errno(EINVAL
));
867 if ((fp
= getf(fdes
)) == NULL
) {
869 kmem_free(aiov
, aiovlen
);
870 return (set_errno(EBADF
));
872 if (((fflag
= fp
->f_flag
) & FWRITE
) == 0) {
877 if (vp
->v_type
== VREG
&& count
== 0) {
884 * We have to enter the critical region before calling fop_rwlock
885 * to avoid a deadlock with ufs.
887 if (nbl_need_check(vp
)) {
890 nbl_start_crit(vp
, RW_READER
);
892 error
= nbl_svmand(vp
, fp
->f_cred
, &svmand
);
895 if (nbl_conflict(vp
, NBL_WRITE
, fp
->f_offset
, count
, svmand
,
902 (void) fop_rwlock(vp
, rwflag
, NULL
);
904 fileoff
= fp
->f_offset
;
907 * Behaviour is same as write. Please see comments for write.
910 if (vp
->v_type
== VREG
) {
911 if (fileoff
>= curproc
->p_fsz_ctl
) {
912 fop_rwunlock(vp
, rwflag
, NULL
);
913 mutex_enter(&curproc
->p_lock
);
914 (void) rctl_action(rctlproc_legacy
[RLIMIT_FSIZE
],
915 curproc
->p_rctls
, curproc
, RCA_UNSAFE_SIGINFO
);
916 mutex_exit(&curproc
->p_lock
);
920 if (fileoff
>= OFFSET_MAX(fp
)) {
921 fop_rwunlock(vp
, rwflag
, NULL
);
925 if (fileoff
+ count
> OFFSET_MAX(fp
))
926 count
= (ssize_t
)(OFFSET_MAX(fp
) - fileoff
);
928 auio
.uio_loffset
= fileoff
;
930 auio
.uio_iovcnt
= iovcnt
;
931 auio
.uio_resid
= bcount
= count
;
932 auio
.uio_segflg
= UIO_USERSPACE
;
933 auio
.uio_llimit
= curproc
->p_fsz_ctl
;
934 auio
.uio_fmode
= fflag
;
935 auio
.uio_extflg
= UIO_COPY_DEFAULT
;
937 ioflag
= auio
.uio_fmode
& (FAPPEND
|FSYNC
|FDSYNC
|FRSYNC
);
939 error
= fop_write(vp
, &auio
, ioflag
, fp
->f_cred
, NULL
);
940 count
-= auio
.uio_resid
;
943 CPU_STATS_ADDQ(cp
, sys
, syswrite
, 1);
944 CPU_STATS_ADDQ(cp
, sys
, writech
, (ulong_t
)count
);
946 ttolwp(curthread
)->lwp_ru
.ioch
+= (ulong_t
)count
;
948 if (vp
->v_type
== VFIFO
) /* Backward compatibility */
949 fp
->f_offset
= count
;
950 else if (((fp
->f_flag
& FAPPEND
) == 0) ||
951 (vp
->v_type
!= VREG
) || (bcount
!= 0)) /* POSIX */
952 fp
->f_offset
= auio
.uio_loffset
;
953 fop_rwunlock(vp
, rwflag
, NULL
);
955 if (error
== EINTR
&& count
!= 0)
962 kmem_free(aiov
, aiovlen
);
964 return (set_errno(error
));
969 preadv(int fdes
, struct iovec
*iovp
, int iovcnt
, off_t offset
,
970 off_t extended_offset
)
973 struct iovec buf
[IOV_MAX_STACK
], *aiov
= buf
;
976 register vnode_t
*vp
;
978 int fflag
, ioflag
, rwflag
;
979 ssize_t count
, bcount
;
983 #if defined(_SYSCALL32_IMPL) || defined(_ILP32)
984 uoff_t fileoff
= ((uoff_t
)extended_offset
<< 32) |
986 #else /* _SYSCALL32_IMPL || _ILP32 */
987 uoff_t fileoff
= (uoff_t
)(ulong_t
)offset
;
988 #endif /* _SYSCALL32_IMPR || _ILP32 */
989 const uoff_t maxoff
= MAXOFFSET_T
;
993 if (iovcnt
<= 0 || iovcnt
> IOV_MAX
)
994 return (set_errno(EINVAL
));
996 if (iovcnt
> IOV_MAX_STACK
) {
997 aiovlen
= iovcnt
* sizeof (iovec_t
);
998 aiov
= kmem_alloc(aiovlen
, KM_SLEEP
);
1001 #ifdef _SYSCALL32_IMPL
1003 * 32-bit callers need to have their iovec expanded,
1004 * while ensuring that they can't move more than 2Gbytes
1005 * of data in a single call.
1007 if (get_udatamodel() == DATAMODEL_ILP32
) {
1008 struct iovec32 buf32
[IOV_MAX_STACK
], *aiov32
= buf32
;
1012 aiov32len
= iovcnt
* sizeof (iovec32_t
);
1014 aiov32
= kmem_alloc(aiov32len
, KM_SLEEP
);
1016 if (copyin(iovp
, aiov32
, aiov32len
)) {
1018 kmem_free(aiov32
, aiov32len
);
1019 kmem_free(aiov
, aiovlen
);
1021 return (set_errno(EFAULT
));
1025 for (i
= 0; i
< iovcnt
; i
++) {
1026 ssize32_t iovlen32
= aiov32
[i
].iov_len
;
1027 count32
+= iovlen32
;
1028 if (iovlen32
< 0 || count32
< 0) {
1030 kmem_free(aiov32
, aiov32len
);
1031 kmem_free(aiov
, aiovlen
);
1033 return (set_errno(EINVAL
));
1035 aiov
[i
].iov_len
= iovlen32
;
1037 (caddr_t
)(uintptr_t)aiov32
[i
].iov_base
;
1040 kmem_free(aiov32
, aiov32len
);
1042 #endif /* _SYSCALL32_IMPL */
1043 if (copyin(iovp
, aiov
, iovcnt
* sizeof (iovec_t
))) {
1045 kmem_free(aiov
, aiovlen
);
1046 return (set_errno(EFAULT
));
1050 for (i
= 0; i
< iovcnt
; i
++) {
1051 ssize_t iovlen
= aiov
[i
].iov_len
;
1053 if (iovlen
< 0 || count
< 0) {
1055 kmem_free(aiov
, aiovlen
);
1056 return (set_errno(EINVAL
));
1060 if ((bcount
= (ssize_t
)count
) < 0) {
1062 kmem_free(aiov
, aiovlen
);
1063 return (set_errno(EINVAL
));
1065 if ((fp
= getf(fdes
)) == NULL
) {
1067 kmem_free(aiov
, aiovlen
);
1068 return (set_errno(EBADF
));
1070 if (((fflag
= fp
->f_flag
) & FREAD
) == 0) {
1076 if (vp
->v_type
== VREG
) {
1082 * return EINVAL for offsets that cannot be
1083 * represented in an off_t.
1085 if (fileoff
> maxoff
) {
1090 if (fileoff
+ bcount
> maxoff
)
1091 bcount
= (ssize_t
)((uoff_t
)maxoff
- fileoff
);
1092 } else if (vp
->v_type
== VFIFO
) {
1097 * We have to enter the critical region before calling fop_rwlock
1098 * to avoid a deadlock with ufs.
1100 if (nbl_need_check(vp
)) {
1103 nbl_start_crit(vp
, RW_READER
);
1105 error
= nbl_svmand(vp
, fp
->f_cred
, &svmand
);
1108 if (nbl_conflict(vp
, NBL_WRITE
, fileoff
, count
, svmand
,
1115 (void) fop_rwlock(vp
, rwflag
, NULL
);
1118 * Behaviour is same as read(2). Please see comments in
1122 if ((vp
->v_type
== VREG
) && (fileoff
>= OFFSET_MAX(fp
))) {
1124 va
.va_mask
= VATTR_SIZE
;
1126 fop_getattr(vp
, &va
, 0, fp
->f_cred
, NULL
))) {
1127 fop_rwunlock(vp
, rwflag
, NULL
);
1130 if (fileoff
>= va
.va_size
) {
1131 fop_rwunlock(vp
, rwflag
, NULL
);
1135 fop_rwunlock(vp
, rwflag
, NULL
);
1140 if ((vp
->v_type
== VREG
) &&
1141 (fileoff
+ count
> OFFSET_MAX(fp
))) {
1142 count
= (ssize_t
)(OFFSET_MAX(fp
) - fileoff
);
1144 auio
.uio_loffset
= fileoff
;
1145 auio
.uio_iov
= aiov
;
1146 auio
.uio_iovcnt
= iovcnt
;
1147 auio
.uio_resid
= bcount
= count
;
1148 auio
.uio_segflg
= UIO_USERSPACE
;
1149 auio
.uio_llimit
= MAXOFFSET_T
;
1150 auio
.uio_fmode
= fflag
;
1151 if (bcount
<= copyout_max_cached
)
1152 auio
.uio_extflg
= UIO_COPY_CACHED
;
1154 auio
.uio_extflg
= UIO_COPY_DEFAULT
;
1156 ioflag
= auio
.uio_fmode
& (FAPPEND
|FSYNC
|FDSYNC
|FRSYNC
);
1157 error
= fop_read(vp
, &auio
, ioflag
, fp
->f_cred
, NULL
);
1158 count
-= auio
.uio_resid
;
1159 CPU_STATS_ENTER_K();
1161 CPU_STATS_ADDQ(cp
, sys
, sysread
, 1);
1162 CPU_STATS_ADDQ(cp
, sys
, readch
, (ulong_t
)count
);
1164 ttolwp(curthread
)->lwp_ru
.ioch
+= (ulong_t
)count
;
1166 fop_rwunlock(vp
, rwflag
, NULL
);
1168 if (error
== EINTR
&& count
!= 0)
1175 kmem_free(aiov
, aiovlen
);
1177 return (set_errno(error
));
1182 pwritev(int fdes
, struct iovec
*iovp
, int iovcnt
, off_t offset
,
1183 off_t extended_offset
)
1186 struct iovec buf
[IOV_MAX_STACK
], *aiov
= buf
;
1189 register vnode_t
*vp
;
1191 int fflag
, ioflag
, rwflag
;
1192 ssize_t count
, bcount
;
1196 #if defined(_SYSCALL32_IMPL) || defined(_ILP32)
1197 uoff_t fileoff
= ((uoff_t
)extended_offset
<< 32) |
1199 #else /* _SYSCALL32_IMPL || _ILP32 */
1200 uoff_t fileoff
= (uoff_t
)(ulong_t
)offset
;
1201 #endif /* _SYSCALL32_IMPR || _ILP32 */
1202 const uoff_t maxoff
= MAXOFFSET_T
;
1206 if (iovcnt
<= 0 || iovcnt
> IOV_MAX
)
1207 return (set_errno(EINVAL
));
1209 if (iovcnt
> IOV_MAX_STACK
) {
1210 aiovlen
= iovcnt
* sizeof (iovec_t
);
1211 aiov
= kmem_alloc(aiovlen
, KM_SLEEP
);
1214 #ifdef _SYSCALL32_IMPL
1216 * 32-bit callers need to have their iovec expanded,
1217 * while ensuring that they can't move more than 2Gbytes
1218 * of data in a single call.
1220 if (get_udatamodel() == DATAMODEL_ILP32
) {
1221 struct iovec32 buf32
[IOV_MAX_STACK
], *aiov32
= buf32
;
1225 aiov32len
= iovcnt
* sizeof (iovec32_t
);
1227 aiov32
= kmem_alloc(aiov32len
, KM_SLEEP
);
1229 if (copyin(iovp
, aiov32
, aiov32len
)) {
1231 kmem_free(aiov32
, aiov32len
);
1232 kmem_free(aiov
, aiovlen
);
1234 return (set_errno(EFAULT
));
1238 for (i
= 0; i
< iovcnt
; i
++) {
1239 ssize32_t iovlen32
= aiov32
[i
].iov_len
;
1240 count32
+= iovlen32
;
1241 if (iovlen32
< 0 || count32
< 0) {
1243 kmem_free(aiov32
, aiov32len
);
1244 kmem_free(aiov
, aiovlen
);
1246 return (set_errno(EINVAL
));
1248 aiov
[i
].iov_len
= iovlen32
;
1250 (caddr_t
)(uintptr_t)aiov32
[i
].iov_base
;
1253 kmem_free(aiov32
, aiov32len
);
1255 #endif /* _SYSCALL32_IMPL */
1256 if (copyin(iovp
, aiov
, iovcnt
* sizeof (iovec_t
))) {
1258 kmem_free(aiov
, aiovlen
);
1259 return (set_errno(EFAULT
));
1263 for (i
= 0; i
< iovcnt
; i
++) {
1264 ssize_t iovlen
= aiov
[i
].iov_len
;
1266 if (iovlen
< 0 || count
< 0) {
1268 kmem_free(aiov
, aiovlen
);
1269 return (set_errno(EINVAL
));
1273 if ((bcount
= (ssize_t
)count
) < 0) {
1275 kmem_free(aiov
, aiovlen
);
1276 return (set_errno(EINVAL
));
1278 if ((fp
= getf(fdes
)) == NULL
) {
1280 kmem_free(aiov
, aiovlen
);
1281 return (set_errno(EBADF
));
1283 if (((fflag
= fp
->f_flag
) & FWRITE
) == 0) {
1289 if (vp
->v_type
== VREG
) {
1295 * return EINVAL for offsets that cannot be
1296 * represented in an off_t.
1298 if (fileoff
> maxoff
) {
1303 * Take appropriate action if we are trying
1304 * to write above the resource limit.
1306 if (fileoff
>= curproc
->p_fsz_ctl
) {
1307 mutex_enter(&curproc
->p_lock
);
1309 * Return value ignored because it lists
1310 * actions taken, but we are in an error case.
1311 * We don't have any actions that depend on
1312 * what could happen in this call, so we ignore
1316 rctlproc_legacy
[RLIMIT_FSIZE
],
1317 curproc
->p_rctls
, curproc
,
1318 RCA_UNSAFE_SIGINFO
);
1319 mutex_exit(&curproc
->p_lock
);
1325 * Don't allow pwritev to cause file sizes to exceed
1328 if (fileoff
== maxoff
) {
1333 if (fileoff
+ bcount
> maxoff
)
1334 bcount
= (ssize_t
)((uoff_t
)maxoff
- fileoff
);
1335 } else if (vp
->v_type
== VFIFO
) {
1340 * We have to enter the critical region before calling fop_rwlock
1341 * to avoid a deadlock with ufs.
1343 if (nbl_need_check(vp
)) {
1346 nbl_start_crit(vp
, RW_READER
);
1348 error
= nbl_svmand(vp
, fp
->f_cred
, &svmand
);
1351 if (nbl_conflict(vp
, NBL_WRITE
, fileoff
, count
, svmand
,
1358 (void) fop_rwlock(vp
, rwflag
, NULL
);
1362 * Behaviour is same as write(2). Please see comments for
1366 if (vp
->v_type
== VREG
) {
1367 if (fileoff
>= curproc
->p_fsz_ctl
) {
1368 fop_rwunlock(vp
, rwflag
, NULL
);
1369 mutex_enter(&curproc
->p_lock
);
1370 /* see above rctl_action comment */
1372 rctlproc_legacy
[RLIMIT_FSIZE
],
1374 curproc
, RCA_UNSAFE_SIGINFO
);
1375 mutex_exit(&curproc
->p_lock
);
1379 if (fileoff
>= OFFSET_MAX(fp
)) {
1380 fop_rwunlock(vp
, rwflag
, NULL
);
1384 if (fileoff
+ count
> OFFSET_MAX(fp
))
1385 count
= (ssize_t
)(OFFSET_MAX(fp
) - fileoff
);
1388 auio
.uio_loffset
= fileoff
;
1389 auio
.uio_iov
= aiov
;
1390 auio
.uio_iovcnt
= iovcnt
;
1391 auio
.uio_resid
= bcount
= count
;
1392 auio
.uio_segflg
= UIO_USERSPACE
;
1393 auio
.uio_llimit
= curproc
->p_fsz_ctl
;
1394 auio
.uio_fmode
= fflag
;
1395 auio
.uio_extflg
= UIO_COPY_CACHED
;
1396 ioflag
= auio
.uio_fmode
& (FSYNC
|FDSYNC
|FRSYNC
);
1397 error
= fop_write(vp
, &auio
, ioflag
, fp
->f_cred
, NULL
);
1398 count
-= auio
.uio_resid
;
1399 CPU_STATS_ENTER_K();
1401 CPU_STATS_ADDQ(cp
, sys
, syswrite
, 1);
1402 CPU_STATS_ADDQ(cp
, sys
, writech
, (ulong_t
)count
);
1404 ttolwp(curthread
)->lwp_ru
.ioch
+= (ulong_t
)count
;
1406 fop_rwunlock(vp
, rwflag
, NULL
);
1408 if (error
== EINTR
&& count
!= 0)
1415 kmem_free(aiov
, aiovlen
);
1417 return (set_errno(error
));
1421 #if defined(_SYSCALL32_IMPL) || defined(_ILP32)
1424 * This syscall supplies 64-bit file offsets to 32-bit applications only.
1427 pread64(int fdes
, void *cbuf
, size32_t count
, uint32_t offset_1
,
1433 register vnode_t
*vp
;
1435 int fflag
, ioflag
, rwflag
;
1441 #if defined(_LITTLE_ENDIAN)
1442 fileoff
= ((uoff_t
)offset_2
<< 32) | (uoff_t
)offset_1
;
1444 fileoff
= ((uoff_t
)offset_1
<< 32) | (uoff_t
)offset_2
;
1447 if ((bcount
= (ssize_t
)count
) < 0 || bcount
> INT32_MAX
)
1448 return (set_errno(EINVAL
));
1450 if ((fp
= getf(fdes
)) == NULL
)
1451 return (set_errno(EBADF
));
1452 if (((fflag
= fp
->f_flag
) & (FREAD
)) == 0) {
1460 if (vp
->v_type
== VREG
) {
1466 * Same as pread. See comments in pread.
1469 if (fileoff
> MAXOFFSET_T
) {
1473 if (fileoff
+ bcount
> MAXOFFSET_T
)
1474 bcount
= (ssize_t
)(MAXOFFSET_T
- fileoff
);
1475 } else if (vp
->v_type
== VFIFO
) {
1481 * We have to enter the critical region before calling fop_rwlock
1482 * to avoid a deadlock with ufs.
1484 if (nbl_need_check(vp
)) {
1487 nbl_start_crit(vp
, RW_READER
);
1489 error
= nbl_svmand(vp
, fp
->f_cred
, &svmand
);
1492 if (nbl_conflict(vp
, NBL_READ
, fileoff
, bcount
, svmand
,
1499 aiov
.iov_base
= cbuf
;
1500 aiov
.iov_len
= bcount
;
1501 (void) fop_rwlock(vp
, rwflag
, NULL
);
1502 auio
.uio_loffset
= fileoff
;
1505 * Note: File size can never be greater than MAXOFFSET_T.
1506 * If ever we start supporting 128 bit files the code
1507 * similar to the one in pread at this place should be here.
1508 * Here we avoid the unnecessary fop_getattr() when we
1509 * know that fileoff == MAXOFFSET_T implies that it is always
1510 * greater than or equal to file size.
1512 auio
.uio_iov
= &aiov
;
1513 auio
.uio_iovcnt
= 1;
1514 auio
.uio_resid
= bcount
;
1515 auio
.uio_segflg
= UIO_USERSPACE
;
1516 auio
.uio_llimit
= MAXOFFSET_T
;
1517 auio
.uio_fmode
= fflag
;
1518 auio
.uio_extflg
= UIO_COPY_CACHED
;
1520 ioflag
= auio
.uio_fmode
& (FAPPEND
|FSYNC
|FDSYNC
|FRSYNC
);
1522 /* If read sync is not asked for, filter sync flags */
1523 if ((ioflag
& FRSYNC
) == 0)
1524 ioflag
&= ~(FSYNC
|FDSYNC
);
1525 error
= fop_read(vp
, &auio
, ioflag
, fp
->f_cred
, NULL
);
1526 bcount
-= auio
.uio_resid
;
1527 CPU_STATS_ENTER_K();
1529 CPU_STATS_ADDQ(cp
, sys
, sysread
, 1);
1530 CPU_STATS_ADDQ(cp
, sys
, readch
, (ulong_t
)bcount
);
1532 ttolwp(curthread
)->lwp_ru
.ioch
+= (ulong_t
)bcount
;
1533 fop_rwunlock(vp
, rwflag
, NULL
);
1535 if (error
== EINTR
&& bcount
!= 0)
1542 return (set_errno(error
));
1547 * This syscall supplies 64-bit file offsets to 32-bit applications only.
1550 pwrite64(int fdes
, void *cbuf
, size32_t count
, uint32_t offset_1
,
1556 register vnode_t
*vp
;
1558 int fflag
, ioflag
, rwflag
;
1564 #if defined(_LITTLE_ENDIAN)
1565 fileoff
= ((uoff_t
)offset_2
<< 32) | (uoff_t
)offset_1
;
1567 fileoff
= ((uoff_t
)offset_1
<< 32) | (uoff_t
)offset_2
;
1570 if ((bcount
= (ssize_t
)count
) < 0 || bcount
> INT32_MAX
)
1571 return (set_errno(EINVAL
));
1572 if ((fp
= getf(fdes
)) == NULL
)
1573 return (set_errno(EBADF
));
1574 if (((fflag
= fp
->f_flag
) & (FWRITE
)) == 0) {
1582 if (vp
->v_type
== VREG
) {
1588 * See comments in pwrite.
1590 if (fileoff
> MAXOFFSET_T
) {
1594 if (fileoff
>= curproc
->p_fsz_ctl
) {
1595 mutex_enter(&curproc
->p_lock
);
1596 (void) rctl_action(rctlproc_legacy
[RLIMIT_FSIZE
],
1597 curproc
->p_rctls
, curproc
, RCA_SAFE
);
1598 mutex_exit(&curproc
->p_lock
);
1602 if (fileoff
== MAXOFFSET_T
) {
1606 if (fileoff
+ bcount
> MAXOFFSET_T
)
1607 bcount
= (ssize_t
)((uoff_t
)MAXOFFSET_T
- fileoff
);
1608 } else if (vp
->v_type
== VFIFO
) {
1614 * We have to enter the critical region before calling fop_rwlock
1615 * to avoid a deadlock with ufs.
1617 if (nbl_need_check(vp
)) {
1620 nbl_start_crit(vp
, RW_READER
);
1622 error
= nbl_svmand(vp
, fp
->f_cred
, &svmand
);
1625 if (nbl_conflict(vp
, NBL_WRITE
, fileoff
, bcount
, svmand
,
1632 aiov
.iov_base
= cbuf
;
1633 aiov
.iov_len
= bcount
;
1634 (void) fop_rwlock(vp
, rwflag
, NULL
);
1635 auio
.uio_loffset
= fileoff
;
1636 auio
.uio_iov
= &aiov
;
1637 auio
.uio_iovcnt
= 1;
1638 auio
.uio_resid
= bcount
;
1639 auio
.uio_segflg
= UIO_USERSPACE
;
1640 auio
.uio_llimit
= curproc
->p_fsz_ctl
;
1641 auio
.uio_fmode
= fflag
;
1642 auio
.uio_extflg
= UIO_COPY_CACHED
;
1645 * The SUSv4 POSIX specification states:
1646 * The pwrite() function shall be equivalent to write(), except
1647 * that it writes into a given position and does not change
1648 * the file offset (regardless of whether O_APPEND is set).
1649 * To make this be true, we omit the FAPPEND flag from ioflag.
1651 ioflag
= auio
.uio_fmode
& (FSYNC
|FDSYNC
|FRSYNC
);
1653 error
= fop_write(vp
, &auio
, ioflag
, fp
->f_cred
, NULL
);
1654 bcount
-= auio
.uio_resid
;
1655 CPU_STATS_ENTER_K();
1657 CPU_STATS_ADDQ(cp
, sys
, syswrite
, 1);
1658 CPU_STATS_ADDQ(cp
, sys
, writech
, (ulong_t
)bcount
);
1660 ttolwp(curthread
)->lwp_ru
.ioch
+= (ulong_t
)bcount
;
1661 fop_rwunlock(vp
, rwflag
, NULL
);
1663 if (error
== EINTR
&& bcount
!= 0)
1670 return (set_errno(error
));
1674 #endif /* _SYSCALL32_IMPL || _ILP32 */
1676 #ifdef _SYSCALL32_IMPL
1678 * Tail-call elimination of xxx32() down to xxx()
1680 * A number of xxx32 system calls take a len (or count) argument and
1681 * return a number in the range [0,len] or -1 on error.
1682 * Given an ssize32_t input len, the downcall xxx() will return
1683 * a 64-bit value that is -1 or in the range [0,len] which actually
1684 * is a proper return value for the xxx32 call. So even if the xxx32
1685 * calls can be considered as returning a ssize32_t, they are currently
1686 * declared as returning a ssize_t as this enables tail-call elimination.
1688 * The cast of len (or count) to ssize32_t is needed to ensure we pass
1689 * down negative input values as such and let the downcall handle error
1690 * reporting. Functions covered by this comments are:
1692 * rw.c: read32, write32, readv32, writev32.
1693 * socksyscall.c: recv32, recvfrom32, send32, sendto32.
1694 * readlink.c: readlink32.
1698 read32(int32_t fdes
, caddr32_t cbuf
, size32_t count
)
1701 (void *)(uintptr_t)cbuf
, (ssize32_t
)count
));
1705 write32(int32_t fdes
, caddr32_t cbuf
, size32_t count
)
1708 (void *)(uintptr_t)cbuf
, (ssize32_t
)count
));
1712 readv32(int32_t fdes
, caddr32_t iovp
, int32_t iovcnt
)
1714 return (readv(fdes
, (void *)(uintptr_t)iovp
, iovcnt
));
1718 writev32(int32_t fdes
, caddr32_t iovp
, int32_t iovcnt
)
1720 return (writev(fdes
, (void *)(uintptr_t)iovp
, iovcnt
));
1722 #endif /* _SYSCALL32_IMPL */