kernel: Remove the FFS_ROOT option. It was a no-op since 4.9.
[dragonfly.git] / sys / kern / uipc_syscalls.c
blob5344c7482a2e9f34aeb2cc18e7e05ad1cd3d09f1
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
5 * sendfile(2) and related extensions:
6 * Copyright (c) 1998, David Greenman. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94
33 * $FreeBSD: src/sys/kern/uipc_syscalls.c,v 1.65.2.17 2003/04/04 17:11:16 tegge Exp $
36 #include "opt_ktrace.h"
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/sysproto.h>
42 #include <sys/malloc.h>
43 #include <sys/filedesc.h>
44 #include <sys/event.h>
45 #include <sys/proc.h>
46 #include <sys/fcntl.h>
47 #include <sys/file.h>
48 #include <sys/filio.h>
49 #include <sys/kern_syscall.h>
50 #include <sys/mbuf.h>
51 #include <sys/protosw.h>
52 #include <sys/sfbuf.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/socketops.h>
56 #include <sys/uio.h>
57 #include <sys/vnode.h>
58 #include <sys/lock.h>
59 #include <sys/mount.h>
60 #ifdef KTRACE
61 #include <sys/ktrace.h>
62 #endif
63 #include <vm/vm.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_pageout.h>
67 #include <vm/vm_kern.h>
68 #include <vm/vm_extern.h>
69 #include <sys/file2.h>
70 #include <sys/signalvar.h>
71 #include <sys/serialize.h>
73 #include <sys/thread2.h>
74 #include <sys/msgport2.h>
75 #include <sys/socketvar2.h>
76 #include <net/netmsg2.h>
77 #include <vm/vm_page2.h>
79 extern int use_soaccept_pred_fast;
80 extern int use_sendfile_async;
81 extern int use_soconnect_async;
84 * System call interface to the socket abstraction.
87 extern struct fileops socketops;
90 * socket_args(int domain, int type, int protocol)
92 int
93 kern_socket(int domain, int type, int protocol, int *res)
95 struct thread *td = curthread;
96 struct filedesc *fdp = td->td_proc->p_fd;
97 struct socket *so;
98 struct file *fp;
99 int fd, error;
100 u_int fflags = 0;
101 int oflags = 0;
103 KKASSERT(td->td_lwp);
105 if (type & SOCK_NONBLOCK) {
106 type &= ~SOCK_NONBLOCK;
107 fflags |= FNONBLOCK;
109 if (type & SOCK_CLOEXEC) {
110 type &= ~SOCK_CLOEXEC;
111 oflags |= O_CLOEXEC;
114 error = falloc(td->td_lwp, &fp, &fd);
115 if (error)
116 return (error);
117 error = socreate(domain, &so, type, protocol, td);
118 if (error) {
119 fsetfd(fdp, NULL, fd);
120 } else {
121 fp->f_type = DTYPE_SOCKET;
122 fp->f_flag = FREAD | FWRITE | fflags;
123 fp->f_ops = &socketops;
124 fp->f_data = so;
125 if (oflags & O_CLOEXEC)
126 fdp->fd_files[fd].fileflags |= UF_EXCLOSE;
127 *res = fd;
128 fsetfd(fdp, fp, fd);
130 fdrop(fp);
131 return (error);
135 * MPALMOSTSAFE
138 sys_socket(struct socket_args *uap)
140 int error;
142 error = kern_socket(uap->domain, uap->type, uap->protocol,
143 &uap->sysmsg_iresult);
145 return (error);
149 kern_bind(int s, struct sockaddr *sa)
151 struct thread *td = curthread;
152 struct file *fp;
153 int error;
155 error = holdsock(td, s, &fp);
156 if (error)
157 return (error);
158 error = sobind((struct socket *)fp->f_data, sa, td);
159 dropfp(td, s, fp);
161 return (error);
165 * bind_args(int s, caddr_t name, int namelen)
167 * MPALMOSTSAFE
170 sys_bind(struct bind_args *uap)
172 struct sockaddr *sa;
173 int error;
175 error = getsockaddr(&sa, uap->name, uap->namelen);
176 if (error)
177 return (error);
178 error = kern_bind(uap->s, sa);
179 kfree(sa, M_SONAME);
181 return (error);
185 kern_listen(int s, int backlog)
187 struct thread *td = curthread;
188 struct file *fp;
189 int error;
191 error = holdsock(td, s, &fp);
192 if (error)
193 return (error);
194 error = solisten((struct socket *)fp->f_data, backlog, td);
195 dropfp(td, s, fp);
197 return (error);
201 * listen_args(int s, int backlog)
203 * MPALMOSTSAFE
206 sys_listen(struct listen_args *uap)
208 int error;
210 error = kern_listen(uap->s, uap->backlog);
211 return (error);
215 * Returns the accepted socket as well.
217 * NOTE! The sockets sitting on so_comp/so_incomp might have 0 refs, the
218 * pool token is absolutely required to avoid a sofree() race,
219 * as well as to avoid tailq handling races.
221 static boolean_t
222 soaccept_predicate(struct netmsg_so_notify *msg)
224 struct socket *head = msg->base.nm_so;
225 struct socket *so;
227 if (head->so_error != 0) {
228 msg->base.lmsg.ms_error = head->so_error;
229 return (TRUE);
231 lwkt_getpooltoken(head);
232 if (!TAILQ_EMPTY(&head->so_comp)) {
233 /* Abuse nm_so field as copy in/copy out parameter. XXX JH */
234 so = TAILQ_FIRST(&head->so_comp);
235 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == SS_COMP);
236 TAILQ_REMOVE(&head->so_comp, so, so_list);
237 head->so_qlen--;
238 soclrstate(so, SS_COMP);
241 * Keep a reference before clearing the so_head
242 * to avoid racing socket close in netisr.
244 soreference(so);
245 so->so_head = NULL;
247 lwkt_relpooltoken(head);
249 msg->base.lmsg.ms_error = 0;
250 msg->base.nm_so = so;
251 return (TRUE);
253 lwkt_relpooltoken(head);
254 if (head->so_state & SS_CANTRCVMORE) {
255 msg->base.lmsg.ms_error = ECONNABORTED;
256 return (TRUE);
258 if (msg->nm_fflags & FNONBLOCK) {
259 msg->base.lmsg.ms_error = EWOULDBLOCK;
260 return (TRUE);
263 return (FALSE);
267 * The second argument to kern_accept() is a handle to a struct sockaddr.
268 * This allows kern_accept() to return a pointer to an allocated struct
269 * sockaddr which must be freed later with FREE(). The caller must
270 * initialize *name to NULL.
273 kern_accept(int s, int fflags, struct sockaddr **name, int *namelen, int *res,
274 int sockflags)
276 struct thread *td = curthread;
277 struct filedesc *fdp = td->td_proc->p_fd;
278 struct file *lfp = NULL;
279 struct file *nfp = NULL;
280 struct sockaddr *sa;
281 struct socket *head, *so;
282 struct netmsg_so_notify msg;
283 int fd;
284 u_int fflag; /* type must match fp->f_flag */
285 int error, tmp;
287 *res = -1;
288 if (name && namelen && *namelen < 0)
289 return (EINVAL);
291 error = holdsock(td, s, &lfp);
292 if (error)
293 return (error);
295 error = falloc(td->td_lwp, &nfp, &fd);
296 if (error) { /* Probably ran out of file descriptors. */
297 fdrop(lfp);
298 return (error);
300 head = (struct socket *)lfp->f_data;
301 if ((head->so_options & SO_ACCEPTCONN) == 0) {
302 error = EINVAL;
303 goto done;
306 if (fflags & O_FBLOCKING)
307 fflags |= lfp->f_flag & ~FNONBLOCK;
308 else if (fflags & O_FNONBLOCKING)
309 fflags |= lfp->f_flag | FNONBLOCK;
310 else
311 fflags = lfp->f_flag;
313 if (use_soaccept_pred_fast) {
314 boolean_t pred;
316 /* Initialize necessary parts for soaccept_predicate() */
317 netmsg_init(&msg.base, head, &netisr_apanic_rport, 0, NULL);
318 msg.nm_fflags = fflags;
320 lwkt_getpooltoken(head);
321 pred = soaccept_predicate(&msg);
322 lwkt_relpooltoken(head);
324 if (pred) {
325 error = msg.base.lmsg.ms_error;
326 if (error)
327 goto done;
328 else
329 goto accepted;
333 /* optimize for uniprocessor case later XXX JH */
334 netmsg_init_abortable(&msg.base, head, &curthread->td_msgport,
335 0, netmsg_so_notify, netmsg_so_notify_doabort);
336 msg.nm_predicate = soaccept_predicate;
337 msg.nm_fflags = fflags;
338 msg.nm_etype = NM_REVENT;
339 error = lwkt_domsg(head->so_port, &msg.base.lmsg, PCATCH);
340 if (error)
341 goto done;
343 accepted:
345 * At this point we have the connection that's ready to be accepted.
347 * NOTE! soaccept_predicate() ref'd so for us, and soaccept() expects
348 * to eat the ref and turn it into a descriptor.
350 so = msg.base.nm_so;
352 fflag = lfp->f_flag;
354 /* connection has been removed from the listen queue */
355 KNOTE(&head->so_rcv.ssb_kq.ki_note, 0);
357 if (sockflags & SOCK_KERN_NOINHERIT) {
358 fflag &= ~(FASYNC | FNONBLOCK);
359 if (sockflags & SOCK_NONBLOCK)
360 fflag |= FNONBLOCK;
361 } else {
362 if (head->so_sigio != NULL)
363 fsetown(fgetown(&head->so_sigio), &so->so_sigio);
366 nfp->f_type = DTYPE_SOCKET;
367 nfp->f_flag = fflag;
368 nfp->f_ops = &socketops;
369 nfp->f_data = so;
370 /* Sync socket async state with file flags */
371 tmp = fflag & FASYNC;
372 fo_ioctl(nfp, FIOASYNC, (caddr_t)&tmp, td->td_ucred, NULL);
374 sa = NULL;
375 if (so->so_faddr != NULL) {
376 sa = so->so_faddr;
377 so->so_faddr = NULL;
379 soaccept_generic(so);
380 error = 0;
381 } else {
382 error = soaccept(so, &sa);
386 * Set the returned name and namelen as applicable. Set the returned
387 * namelen to 0 for older code which might ignore the return value
388 * from accept.
390 if (error == 0) {
391 if (sa && name && namelen) {
392 if (*namelen > sa->sa_len)
393 *namelen = sa->sa_len;
394 *name = sa;
395 } else {
396 if (sa)
397 kfree(sa, M_SONAME);
401 done:
403 * If an error occured clear the reserved descriptor, else associate
404 * nfp with it.
406 * Note that *res is normally ignored if an error is returned but
407 * a syscall message will still have access to the result code.
409 if (error) {
410 fsetfd(fdp, NULL, fd);
411 } else {
412 if (sockflags & SOCK_CLOEXEC)
413 fdp->fd_files[fd].fileflags |= UF_EXCLOSE;
414 *res = fd;
415 fsetfd(fdp, nfp, fd);
417 fdrop(nfp);
418 dropfp(td, s, lfp);
420 return (error);
424 * accept(int s, caddr_t name, int *anamelen)
426 * MPALMOSTSAFE
429 sys_accept(struct accept_args *uap)
431 struct sockaddr *sa = NULL;
432 int sa_len;
433 int error;
435 if (uap->name) {
436 error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
437 if (error)
438 return (error);
440 error = kern_accept(uap->s, 0, &sa, &sa_len,
441 &uap->sysmsg_iresult, 0);
443 if (error == 0)
444 error = copyout(sa, uap->name, sa_len);
445 if (error == 0) {
446 error = copyout(&sa_len, uap->anamelen,
447 sizeof(*uap->anamelen));
449 if (sa)
450 kfree(sa, M_SONAME);
451 } else {
452 error = kern_accept(uap->s, 0, NULL, 0,
453 &uap->sysmsg_iresult, 0);
455 return (error);
459 * extaccept(int s, int fflags, caddr_t name, int *anamelen)
461 * MPALMOSTSAFE
464 sys_extaccept(struct extaccept_args *uap)
466 struct sockaddr *sa = NULL;
467 int sa_len;
468 int error;
469 int fflags = uap->flags & O_FMASK;
471 if (uap->name) {
472 error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
473 if (error)
474 return (error);
476 error = kern_accept(uap->s, fflags, &sa, &sa_len,
477 &uap->sysmsg_iresult, 0);
479 if (error == 0)
480 error = copyout(sa, uap->name, sa_len);
481 if (error == 0) {
482 error = copyout(&sa_len, uap->anamelen,
483 sizeof(*uap->anamelen));
485 if (sa)
486 kfree(sa, M_SONAME);
487 } else {
488 error = kern_accept(uap->s, fflags, NULL, 0,
489 &uap->sysmsg_iresult, 0);
491 return (error);
495 * accept4(int s, caddr_t name, int *anamelen, int flags)
497 * MPALMOSTSAFE
500 sys_accept4(struct accept4_args *uap)
502 struct sockaddr *sa = NULL;
503 int sa_len;
504 int error;
505 int sockflags;
507 if (uap->flags & ~(SOCK_NONBLOCK | SOCK_CLOEXEC))
508 return (EINVAL);
509 sockflags = uap->flags | SOCK_KERN_NOINHERIT;
511 if (uap->name) {
512 error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
513 if (error)
514 return (error);
516 error = kern_accept(uap->s, 0, &sa, &sa_len,
517 &uap->sysmsg_iresult, sockflags);
519 if (error == 0)
520 error = copyout(sa, uap->name, sa_len);
521 if (error == 0) {
522 error = copyout(&sa_len, uap->anamelen,
523 sizeof(*uap->anamelen));
525 if (sa)
526 kfree(sa, M_SONAME);
527 } else {
528 error = kern_accept(uap->s, 0, NULL, 0,
529 &uap->sysmsg_iresult, sockflags);
531 return (error);
535 * Returns TRUE if predicate satisfied.
537 static boolean_t
538 soconnected_predicate(struct netmsg_so_notify *msg)
540 struct socket *so = msg->base.nm_so;
542 /* check predicate */
543 if (!(so->so_state & SS_ISCONNECTING) || so->so_error != 0) {
544 msg->base.lmsg.ms_error = so->so_error;
545 return (TRUE);
548 return (FALSE);
552 kern_connect(int s, int fflags, struct sockaddr *sa)
554 struct thread *td = curthread;
555 struct file *fp;
556 struct socket *so;
557 int error, interrupted = 0;
559 error = holdsock(td, s, &fp);
560 if (error)
561 return (error);
562 so = (struct socket *)fp->f_data;
564 if (fflags & O_FBLOCKING)
565 /* fflags &= ~FNONBLOCK; */;
566 else if (fflags & O_FNONBLOCKING)
567 fflags |= FNONBLOCK;
568 else
569 fflags = fp->f_flag;
571 if (so->so_state & SS_ISCONNECTING) {
572 error = EALREADY;
573 goto done;
575 error = soconnect(so, sa, td, use_soconnect_async ? FALSE : TRUE);
576 if (error)
577 goto bad;
578 if ((fflags & FNONBLOCK) && (so->so_state & SS_ISCONNECTING)) {
579 error = EINPROGRESS;
580 goto done;
582 if ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
583 struct netmsg_so_notify msg;
585 netmsg_init_abortable(&msg.base, so,
586 &curthread->td_msgport,
588 netmsg_so_notify,
589 netmsg_so_notify_doabort);
590 msg.nm_predicate = soconnected_predicate;
591 msg.nm_etype = NM_REVENT;
592 error = lwkt_domsg(so->so_port, &msg.base.lmsg, PCATCH);
593 if (error == EINTR || error == ERESTART)
594 interrupted = 1;
596 if (error == 0) {
597 error = so->so_error;
598 so->so_error = 0;
600 bad:
601 if (!interrupted)
602 soclrstate(so, SS_ISCONNECTING);
603 if (error == ERESTART)
604 error = EINTR;
605 done:
606 dropfp(td, s, fp);
608 return (error);
612 * connect_args(int s, caddr_t name, int namelen)
614 * MPALMOSTSAFE
617 sys_connect(struct connect_args *uap)
619 struct sockaddr *sa;
620 int error;
622 error = getsockaddr(&sa, uap->name, uap->namelen);
623 if (error)
624 return (error);
625 error = kern_connect(uap->s, 0, sa);
626 kfree(sa, M_SONAME);
628 return (error);
632 * connect_args(int s, int fflags, caddr_t name, int namelen)
634 * MPALMOSTSAFE
637 sys_extconnect(struct extconnect_args *uap)
639 struct sockaddr *sa;
640 int error;
641 int fflags = uap->flags & O_FMASK;
643 error = getsockaddr(&sa, uap->name, uap->namelen);
644 if (error)
645 return (error);
646 error = kern_connect(uap->s, fflags, sa);
647 kfree(sa, M_SONAME);
649 return (error);
653 kern_socketpair(int domain, int type, int protocol, int *sv)
655 struct thread *td = curthread;
656 struct filedesc *fdp;
657 struct file *fp1, *fp2;
658 struct socket *so1, *so2;
659 int fd1, fd2, error;
660 u_int fflags = 0;
661 int oflags = 0;
663 if (type & SOCK_NONBLOCK) {
664 type &= ~SOCK_NONBLOCK;
665 fflags |= FNONBLOCK;
667 if (type & SOCK_CLOEXEC) {
668 type &= ~SOCK_CLOEXEC;
669 oflags |= O_CLOEXEC;
672 fdp = td->td_proc->p_fd;
673 error = socreate(domain, &so1, type, protocol, td);
674 if (error)
675 return (error);
676 error = socreate(domain, &so2, type, protocol, td);
677 if (error)
678 goto free1;
679 error = falloc(td->td_lwp, &fp1, &fd1);
680 if (error)
681 goto free2;
682 sv[0] = fd1;
683 fp1->f_data = so1;
684 error = falloc(td->td_lwp, &fp2, &fd2);
685 if (error)
686 goto free3;
687 fp2->f_data = so2;
688 sv[1] = fd2;
689 error = soconnect2(so1, so2);
690 if (error)
691 goto free4;
692 if (type == SOCK_DGRAM) {
694 * Datagram socket connection is asymmetric.
696 error = soconnect2(so2, so1);
697 if (error)
698 goto free4;
700 fp1->f_type = fp2->f_type = DTYPE_SOCKET;
701 fp1->f_flag = fp2->f_flag = FREAD|FWRITE|fflags;
702 fp1->f_ops = fp2->f_ops = &socketops;
703 if (oflags & O_CLOEXEC) {
704 fdp->fd_files[fd1].fileflags |= UF_EXCLOSE;
705 fdp->fd_files[fd2].fileflags |= UF_EXCLOSE;
707 fsetfd(fdp, fp1, fd1);
708 fsetfd(fdp, fp2, fd2);
709 fdrop(fp1);
710 fdrop(fp2);
711 return (error);
712 free4:
713 fsetfd(fdp, NULL, fd2);
714 fdrop(fp2);
715 free3:
716 fsetfd(fdp, NULL, fd1);
717 fdrop(fp1);
718 free2:
719 (void)soclose(so2, 0);
720 free1:
721 (void)soclose(so1, 0);
722 return (error);
726 * socketpair(int domain, int type, int protocol, int *rsv)
729 sys_socketpair(struct socketpair_args *uap)
731 int error, sockv[2];
733 error = kern_socketpair(uap->domain, uap->type, uap->protocol, sockv);
735 if (error == 0) {
736 error = copyout(sockv, uap->rsv, sizeof(sockv));
738 if (error != 0) {
739 kern_close(sockv[0]);
740 kern_close(sockv[1]);
744 return (error);
748 kern_sendmsg(int s, struct sockaddr *sa, struct uio *auio,
749 struct mbuf *control, int flags, size_t *res)
751 struct thread *td = curthread;
752 struct lwp *lp = td->td_lwp;
753 struct proc *p = td->td_proc;
754 struct file *fp;
755 size_t len;
756 int error;
757 struct socket *so;
758 #ifdef KTRACE
759 struct iovec *ktriov = NULL;
760 struct uio ktruio;
761 #endif
763 error = holdsock(td, s, &fp);
764 if (error)
765 return (error);
766 #ifdef KTRACE
767 if (KTRPOINT(td, KTR_GENIO)) {
768 int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
770 ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK);
771 bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen);
772 ktruio = *auio;
774 #endif
775 len = auio->uio_resid;
776 so = (struct socket *)fp->f_data;
777 if ((flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) {
778 if (fp->f_flag & FNONBLOCK)
779 flags |= MSG_FNONBLOCKING;
781 error = so_pru_sosend(so, sa, auio, NULL, control, flags, td);
782 if (error) {
783 if (auio->uio_resid != len && (error == ERESTART ||
784 error == EINTR || error == EWOULDBLOCK))
785 error = 0;
786 if (error == EPIPE && !(flags & MSG_NOSIGNAL) &&
787 !(so->so_options & SO_NOSIGPIPE))
788 lwpsignal(p, lp, SIGPIPE);
790 #ifdef KTRACE
791 if (ktriov != NULL) {
792 if (error == 0) {
793 ktruio.uio_iov = ktriov;
794 ktruio.uio_resid = len - auio->uio_resid;
795 ktrgenio(lp, s, UIO_WRITE, &ktruio, error);
797 kfree(ktriov, M_TEMP);
799 #endif
800 if (error == 0)
801 *res = len - auio->uio_resid;
802 dropfp(td, s, fp);
804 return (error);
808 * sendto_args(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen)
810 * MPALMOSTSAFE
813 sys_sendto(struct sendto_args *uap)
815 struct thread *td = curthread;
816 struct uio auio;
817 struct iovec aiov;
818 struct sockaddr *sa = NULL;
819 int error;
821 if (uap->to) {
822 error = getsockaddr(&sa, uap->to, uap->tolen);
823 if (error)
824 return (error);
826 aiov.iov_base = uap->buf;
827 aiov.iov_len = uap->len;
828 auio.uio_iov = &aiov;
829 auio.uio_iovcnt = 1;
830 auio.uio_offset = 0;
831 auio.uio_resid = uap->len;
832 auio.uio_segflg = UIO_USERSPACE;
833 auio.uio_rw = UIO_WRITE;
834 auio.uio_td = td;
836 error = kern_sendmsg(uap->s, sa, &auio, NULL, uap->flags,
837 &uap->sysmsg_szresult);
839 if (sa)
840 kfree(sa, M_SONAME);
841 return (error);
845 * sendmsg_args(int s, caddr_t msg, int flags)
847 * MPALMOSTSAFE
850 sys_sendmsg(struct sendmsg_args *uap)
852 struct thread *td = curthread;
853 struct msghdr msg;
854 struct uio auio;
855 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
856 struct sockaddr *sa = NULL;
857 struct mbuf *control = NULL;
858 int error;
860 error = copyin(uap->msg, (caddr_t)&msg, sizeof(msg));
861 if (error)
862 return (error);
865 * Conditionally copyin msg.msg_name.
867 if (msg.msg_name) {
868 error = getsockaddr(&sa, msg.msg_name, msg.msg_namelen);
869 if (error)
870 return (error);
874 * Populate auio.
876 error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
877 &auio.uio_resid);
878 if (error)
879 goto cleanup2;
880 auio.uio_iov = iov;
881 auio.uio_iovcnt = msg.msg_iovlen;
882 auio.uio_offset = 0;
883 auio.uio_segflg = UIO_USERSPACE;
884 auio.uio_rw = UIO_WRITE;
885 auio.uio_td = td;
888 * Conditionally copyin msg.msg_control.
890 if (msg.msg_control) {
891 if (msg.msg_controllen < sizeof(struct cmsghdr) ||
892 msg.msg_controllen > MLEN) {
893 error = EINVAL;
894 goto cleanup;
896 control = m_get(M_WAITOK, MT_CONTROL);
897 if (control == NULL) {
898 error = ENOBUFS;
899 goto cleanup;
901 control->m_len = msg.msg_controllen;
902 error = copyin(msg.msg_control, mtod(control, caddr_t),
903 msg.msg_controllen);
904 if (error) {
905 m_free(control);
906 goto cleanup;
910 error = kern_sendmsg(uap->s, sa, &auio, control, uap->flags,
911 &uap->sysmsg_szresult);
913 cleanup:
914 iovec_free(&iov, aiov);
915 cleanup2:
916 if (sa)
917 kfree(sa, M_SONAME);
918 return (error);
922 * kern_recvmsg() takes a handle to sa and control. If the handle is non-
923 * null, it returns a dynamically allocated struct sockaddr and an mbuf.
924 * Don't forget to FREE() and m_free() these if they are returned.
927 kern_recvmsg(int s, struct sockaddr **sa, struct uio *auio,
928 struct mbuf **control, int *flags, size_t *res)
930 struct thread *td = curthread;
931 struct file *fp;
932 size_t len;
933 int error;
934 int lflags;
935 struct socket *so;
936 #ifdef KTRACE
937 struct iovec *ktriov = NULL;
938 struct uio ktruio;
939 #endif
941 error = holdsock(td, s, &fp);
942 if (error)
943 return (error);
944 #ifdef KTRACE
945 if (KTRPOINT(td, KTR_GENIO)) {
946 int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
948 ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK);
949 bcopy(auio->uio_iov, ktriov, iovlen);
950 ktruio = *auio;
952 #endif
953 len = auio->uio_resid;
954 so = (struct socket *)fp->f_data;
956 if (flags == NULL || (*flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) {
957 if (fp->f_flag & FNONBLOCK) {
958 if (flags) {
959 *flags |= MSG_FNONBLOCKING;
960 } else {
961 lflags = MSG_FNONBLOCKING;
962 flags = &lflags;
967 error = so_pru_soreceive(so, sa, auio, NULL, control, flags);
968 if (error) {
969 if (auio->uio_resid != len && (error == ERESTART ||
970 error == EINTR || error == EWOULDBLOCK))
971 error = 0;
973 #ifdef KTRACE
974 if (ktriov != NULL) {
975 if (error == 0) {
976 ktruio.uio_iov = ktriov;
977 ktruio.uio_resid = len - auio->uio_resid;
978 ktrgenio(td->td_lwp, s, UIO_READ, &ktruio, error);
980 kfree(ktriov, M_TEMP);
982 #endif
983 if (error == 0)
984 *res = len - auio->uio_resid;
985 dropfp(td, s, fp);
987 return (error);
991 * recvfrom_args(int s, caddr_t buf, size_t len, int flags,
992 * caddr_t from, int *fromlenaddr)
994 * MPALMOSTSAFE
997 sys_recvfrom(struct recvfrom_args *uap)
999 struct thread *td = curthread;
1000 struct uio auio;
1001 struct iovec aiov;
1002 struct sockaddr *sa = NULL;
1003 int error, fromlen;
1005 if (uap->from && uap->fromlenaddr) {
1006 error = copyin(uap->fromlenaddr, &fromlen, sizeof(fromlen));
1007 if (error)
1008 return (error);
1009 if (fromlen < 0)
1010 return (EINVAL);
1011 } else {
1012 fromlen = 0;
1014 aiov.iov_base = uap->buf;
1015 aiov.iov_len = uap->len;
1016 auio.uio_iov = &aiov;
1017 auio.uio_iovcnt = 1;
1018 auio.uio_offset = 0;
1019 auio.uio_resid = uap->len;
1020 auio.uio_segflg = UIO_USERSPACE;
1021 auio.uio_rw = UIO_READ;
1022 auio.uio_td = td;
1024 error = kern_recvmsg(uap->s, uap->from ? &sa : NULL, &auio, NULL,
1025 &uap->flags, &uap->sysmsg_szresult);
1027 if (error == 0 && uap->from) {
1028 /* note: sa may still be NULL */
1029 if (sa) {
1030 fromlen = MIN(fromlen, sa->sa_len);
1031 error = copyout(sa, uap->from, fromlen);
1032 } else {
1033 fromlen = 0;
1035 if (error == 0) {
1036 error = copyout(&fromlen, uap->fromlenaddr,
1037 sizeof(fromlen));
1040 if (sa)
1041 kfree(sa, M_SONAME);
1043 return (error);
1047 * recvmsg_args(int s, struct msghdr *msg, int flags)
1049 * MPALMOSTSAFE
1052 sys_recvmsg(struct recvmsg_args *uap)
1054 struct thread *td = curthread;
1055 struct msghdr msg;
1056 struct uio auio;
1057 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
1058 struct mbuf *m, *control = NULL;
1059 struct sockaddr *sa = NULL;
1060 caddr_t ctlbuf;
1061 socklen_t *ufromlenp, *ucontrollenp;
1062 int error, fromlen, controllen, len, flags, *uflagsp;
1065 * This copyin handles everything except the iovec.
1067 error = copyin(uap->msg, &msg, sizeof(msg));
1068 if (error)
1069 return (error);
1071 if (msg.msg_name && msg.msg_namelen < 0)
1072 return (EINVAL);
1073 if (msg.msg_control && msg.msg_controllen < 0)
1074 return (EINVAL);
1076 ufromlenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
1077 msg_namelen));
1078 ucontrollenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
1079 msg_controllen));
1080 uflagsp = (int *)((caddr_t)uap->msg + offsetof(struct msghdr,
1081 msg_flags));
1084 * Populate auio.
1086 error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
1087 &auio.uio_resid);
1088 if (error)
1089 return (error);
1090 auio.uio_iov = iov;
1091 auio.uio_iovcnt = msg.msg_iovlen;
1092 auio.uio_offset = 0;
1093 auio.uio_segflg = UIO_USERSPACE;
1094 auio.uio_rw = UIO_READ;
1095 auio.uio_td = td;
1097 flags = uap->flags;
1099 error = kern_recvmsg(uap->s,
1100 (msg.msg_name ? &sa : NULL), &auio,
1101 (msg.msg_control ? &control : NULL), &flags,
1102 &uap->sysmsg_szresult);
1105 * Conditionally copyout the name and populate the namelen field.
1107 if (error == 0 && msg.msg_name) {
1108 /* note: sa may still be NULL */
1109 if (sa != NULL) {
1110 fromlen = MIN(msg.msg_namelen, sa->sa_len);
1111 error = copyout(sa, msg.msg_name, fromlen);
1112 } else {
1113 fromlen = 0;
1115 if (error == 0)
1116 error = copyout(&fromlen, ufromlenp,
1117 sizeof(*ufromlenp));
1121 * Copyout msg.msg_control and msg.msg_controllen.
1123 if (error == 0 && msg.msg_control) {
1124 len = msg.msg_controllen;
1125 m = control;
1126 ctlbuf = (caddr_t)msg.msg_control;
1128 while(m && len > 0) {
1129 unsigned int tocopy;
1131 if (len >= m->m_len) {
1132 tocopy = m->m_len;
1133 } else {
1134 msg.msg_flags |= MSG_CTRUNC;
1135 tocopy = len;
1138 error = copyout(mtod(m, caddr_t), ctlbuf, tocopy);
1139 if (error)
1140 goto cleanup;
1142 ctlbuf += tocopy;
1143 len -= tocopy;
1144 m = m->m_next;
1146 controllen = ctlbuf - (caddr_t)msg.msg_control;
1147 error = copyout(&controllen, ucontrollenp,
1148 sizeof(*ucontrollenp));
1151 if (error == 0)
1152 error = copyout(&flags, uflagsp, sizeof(*uflagsp));
1154 cleanup:
1155 if (sa)
1156 kfree(sa, M_SONAME);
1157 iovec_free(&iov, aiov);
1158 if (control)
1159 m_freem(control);
1160 return (error);
1164 * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1165 * in kernel pointer instead of a userland pointer. This allows us
1166 * to manipulate socket options in the emulation code.
1169 kern_setsockopt(int s, struct sockopt *sopt)
1171 struct thread *td = curthread;
1172 struct file *fp;
1173 int error;
1175 if (sopt->sopt_val == NULL && sopt->sopt_valsize != 0)
1176 return (EFAULT);
1177 if (sopt->sopt_val != NULL && sopt->sopt_valsize == 0)
1178 return (EINVAL);
1179 if (sopt->sopt_valsize > SOMAXOPT_SIZE) /* unsigned */
1180 return (EINVAL);
1182 error = holdsock(td, s, &fp);
1183 if (error)
1184 return (error);
1186 error = sosetopt((struct socket *)fp->f_data, sopt);
1187 dropfp(td, s, fp);
1189 return (error);
1193 * setsockopt_args(int s, int level, int name, caddr_t val, int valsize)
1195 * MPALMOSTSAFE
1198 sys_setsockopt(struct setsockopt_args *uap)
1200 struct thread *td = curthread;
1201 struct sockopt sopt;
1202 int error;
1204 sopt.sopt_level = uap->level;
1205 sopt.sopt_name = uap->name;
1206 sopt.sopt_valsize = uap->valsize;
1207 sopt.sopt_td = td;
1208 sopt.sopt_val = NULL;
1210 if (sopt.sopt_valsize > SOMAXOPT_SIZE) /* unsigned */
1211 return (EINVAL);
1212 if (uap->val) {
1213 sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP, M_WAITOK);
1214 error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize);
1215 if (error)
1216 goto out;
1219 error = kern_setsockopt(uap->s, &sopt);
1220 out:
1221 if (uap->val)
1222 kfree(sopt.sopt_val, M_TEMP);
1223 return(error);
1227 * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1228 * in kernel pointer instead of a userland pointer. This allows us
1229 * to manipulate socket options in the emulation code.
1232 kern_getsockopt(int s, struct sockopt *sopt)
1234 struct thread *td = curthread;
1235 struct file *fp;
1236 int error;
1238 if (sopt->sopt_val == NULL && sopt->sopt_valsize != 0)
1239 return (EFAULT);
1240 if (sopt->sopt_val != NULL && sopt->sopt_valsize == 0)
1241 return (EINVAL);
1243 error = holdsock(td, s, &fp);
1244 if (error)
1245 return (error);
1247 error = sogetopt((struct socket *)fp->f_data, sopt);
1248 dropfp(td, s, fp);
1250 return (error);
1254 * getsockopt_args(int s, int level, int name, caddr_t val, int *avalsize)
1256 * MPALMOSTSAFE
1259 sys_getsockopt(struct getsockopt_args *uap)
1261 struct thread *td = curthread;
1262 struct sockopt sopt;
1263 int error, valsize, valszmax, mflag = 0;
1265 if (uap->val) {
1266 error = copyin(uap->avalsize, &valsize, sizeof(valsize));
1267 if (error)
1268 return (error);
1269 } else {
1270 valsize = 0;
1273 sopt.sopt_level = uap->level;
1274 sopt.sopt_name = uap->name;
1275 sopt.sopt_valsize = valsize;
1276 sopt.sopt_td = td;
1277 sopt.sopt_val = NULL;
1279 if (td->td_proc->p_ucred->cr_uid == 0) {
1280 valszmax = SOMAXOPT_SIZE0;
1281 mflag = M_NULLOK;
1282 } else {
1283 valszmax = SOMAXOPT_SIZE;
1285 if (sopt.sopt_valsize > valszmax) /* unsigned */
1286 return (EINVAL);
1287 if (uap->val) {
1288 sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP,
1289 M_WAITOK | mflag);
1290 if (sopt.sopt_val == NULL)
1291 return (ENOBUFS);
1292 error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize);
1293 if (error)
1294 goto out;
1297 error = kern_getsockopt(uap->s, &sopt);
1298 if (error)
1299 goto out;
1300 valsize = sopt.sopt_valsize;
1301 error = copyout(&valsize, uap->avalsize, sizeof(valsize));
1302 if (error)
1303 goto out;
1304 if (uap->val)
1305 error = copyout(sopt.sopt_val, uap->val, sopt.sopt_valsize);
1306 out:
1307 if (uap->val)
1308 kfree(sopt.sopt_val, M_TEMP);
1309 return (error);
1313 * The second argument to kern_getsockname() is a handle to a struct sockaddr.
1314 * This allows kern_getsockname() to return a pointer to an allocated struct
1315 * sockaddr which must be freed later with FREE(). The caller must
1316 * initialize *name to NULL.
1319 kern_getsockname(int s, struct sockaddr **name, int *namelen)
1321 struct thread *td = curthread;
1322 struct file *fp;
1323 struct socket *so;
1324 struct sockaddr *sa = NULL;
1325 int error;
1327 error = holdsock(td, s, &fp);
1328 if (error)
1329 return (error);
1330 if (*namelen < 0) {
1331 fdrop(fp);
1332 return (EINVAL);
1334 so = (struct socket *)fp->f_data;
1335 error = so_pru_sockaddr(so, &sa);
1336 if (error == 0) {
1337 if (sa == NULL) {
1338 *namelen = 0;
1339 } else {
1340 *namelen = MIN(*namelen, sa->sa_len);
1341 *name = sa;
1344 dropfp(td, s, fp);
1346 return (error);
1350 * getsockname_args(int fdes, caddr_t asa, int *alen)
1352 * Get socket name.
1354 * MPALMOSTSAFE
1357 sys_getsockname(struct getsockname_args *uap)
1359 struct sockaddr *sa = NULL;
1360 struct sockaddr satmp;
1361 int error, sa_len_in, sa_len_out;
1363 error = copyin(uap->alen, &sa_len_in, sizeof(sa_len_in));
1364 if (error)
1365 return (error);
1367 sa_len_out = sa_len_in;
1368 error = kern_getsockname(uap->fdes, &sa, &sa_len_out);
1370 if (error == 0) {
1371 if (sa) {
1372 error = copyout(sa, uap->asa, sa_len_out);
1373 } else {
1375 * unnamed uipc sockets don't bother storing
1376 * sockaddr, simulate an AF_LOCAL sockaddr.
1378 sa_len_out = sizeof(satmp);
1379 if (sa_len_out > sa_len_in)
1380 sa_len_out = sa_len_in;
1381 if (sa_len_out < 0)
1382 sa_len_out = 0;
1383 bzero(&satmp, sizeof(satmp));
1384 satmp.sa_len = sa_len_out;
1385 satmp.sa_family = AF_LOCAL;
1386 error = copyout(&satmp, uap->asa, sa_len_out);
1389 if (error == 0 && sa_len_out != sa_len_in)
1390 error = copyout(&sa_len_out, uap->alen, sizeof(*uap->alen));
1391 if (sa)
1392 kfree(sa, M_SONAME);
1393 return (error);
1397 * The second argument to kern_getpeername() is a handle to a struct sockaddr.
1398 * This allows kern_getpeername() to return a pointer to an allocated struct
1399 * sockaddr which must be freed later with FREE(). The caller must
1400 * initialize *name to NULL.
1403 kern_getpeername(int s, struct sockaddr **name, int *namelen)
1405 struct thread *td = curthread;
1406 struct file *fp;
1407 struct socket *so;
1408 struct sockaddr *sa = NULL;
1409 int error;
1411 error = holdsock(td, s, &fp);
1412 if (error)
1413 return (error);
1414 if (*namelen < 0) {
1415 fdrop(fp);
1416 return (EINVAL);
1418 so = (struct socket *)fp->f_data;
1419 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1420 fdrop(fp);
1421 return (ENOTCONN);
1423 error = so_pru_peeraddr(so, &sa);
1424 if (error == 0) {
1425 if (sa == NULL) {
1426 *namelen = 0;
1427 } else {
1428 *namelen = MIN(*namelen, sa->sa_len);
1429 *name = sa;
1432 dropfp(td, s, fp);
1434 return (error);
1438 * getpeername_args(int fdes, caddr_t asa, int *alen)
1440 * Get name of peer for connected socket.
1442 * MPALMOSTSAFE
1445 sys_getpeername(struct getpeername_args *uap)
1447 struct sockaddr *sa = NULL;
1448 int error, sa_len;
1450 error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1451 if (error)
1452 return (error);
1454 error = kern_getpeername(uap->fdes, &sa, &sa_len);
1456 if (error == 0)
1457 error = copyout(sa, uap->asa, sa_len);
1458 if (error == 0)
1459 error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1460 if (sa)
1461 kfree(sa, M_SONAME);
1462 return (error);
1466 getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len)
1468 struct sockaddr *sa;
1469 int error;
1471 *namp = NULL;
1472 if (len > SOCK_MAXADDRLEN)
1473 return ENAMETOOLONG;
1474 if (len < offsetof(struct sockaddr, sa_data[0]))
1475 return EDOM;
1476 sa = kmalloc(len, M_SONAME, M_WAITOK);
1477 error = copyin(uaddr, sa, len);
1478 if (error) {
1479 kfree(sa, M_SONAME);
1480 } else {
1481 #if BYTE_ORDER != BIG_ENDIAN
1483 * The bind(), connect(), and sendto() syscalls were not
1484 * versioned for COMPAT_43. Thus, this check must stay.
1486 if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1487 sa->sa_family = sa->sa_len;
1488 #endif
1489 sa->sa_len = len;
1490 *namp = sa;
1492 return error;
1496 * Detach a mapped page and release resources back to the system.
1497 * We must release our wiring and if the object is ripped out
1498 * from under the vm_page we become responsible for freeing the
1499 * page.
1501 * MPSAFE
1503 static void
1504 sf_buf_mfree(void *arg)
1506 struct sf_buf *sf = arg;
1507 vm_page_t m;
1509 m = sf_buf_page(sf);
1510 if (sf_buf_free(sf)) {
1511 /* sf invalid now */
1512 vm_page_sbusy_drop(m);
1513 #if 0
1514 if (m->object == NULL &&
1515 m->wire_count == 0 &&
1516 (m->flags & PG_NEED_COMMIT) == 0) {
1517 vm_page_free(m);
1518 } else {
1519 vm_page_wakeup(m);
1521 #endif
1526 * sendfile(2).
1527 * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1528 * struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1530 * Send a file specified by 'fd' and starting at 'offset' to a socket
1531 * specified by 's'. Send only 'nbytes' of the file or until EOF if
1532 * nbytes == 0. Optionally add a header and/or trailer to the socket
1533 * output. If specified, write the total number of bytes sent into *sbytes.
1535 * In FreeBSD kern/uipc_syscalls.c,v 1.103, a bug was fixed that caused
1536 * the headers to count against the remaining bytes to be sent from
1537 * the file descriptor. We may wish to implement a compatibility syscall
1538 * in the future.
1540 * MPALMOSTSAFE
1543 sys_sendfile(struct sendfile_args *uap)
1545 struct thread *td = curthread;
1546 struct file *fp;
1547 struct vnode *vp = NULL;
1548 struct sf_hdtr hdtr;
1549 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
1550 struct uio auio;
1551 struct mbuf *mheader = NULL;
1552 size_t hbytes = 0;
1553 size_t tbytes;
1554 off_t hdtr_size = 0;
1555 off_t sbytes;
1556 int error;
1559 * Do argument checking. Must be a regular file in, stream
1560 * type and connected socket out, positive offset.
1562 fp = holdfp(td, uap->fd, FREAD);
1563 if (fp == NULL) {
1564 return (EBADF);
1566 if (fp->f_type != DTYPE_VNODE) {
1567 fdrop(fp);
1568 return (EINVAL);
1570 vp = (struct vnode *)fp->f_data;
1571 vref(vp);
1572 dropfp(td, uap->fd, fp);
1575 * If specified, get the pointer to the sf_hdtr struct for
1576 * any headers/trailers.
1578 if (uap->hdtr) {
1579 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1580 if (error)
1581 goto done;
1583 * Send any headers.
1585 if (hdtr.headers) {
1586 error = iovec_copyin(hdtr.headers, &iov, aiov,
1587 hdtr.hdr_cnt, &hbytes);
1588 if (error)
1589 goto done;
1590 auio.uio_iov = iov;
1591 auio.uio_iovcnt = hdtr.hdr_cnt;
1592 auio.uio_offset = 0;
1593 auio.uio_segflg = UIO_USERSPACE;
1594 auio.uio_rw = UIO_WRITE;
1595 auio.uio_td = td;
1596 auio.uio_resid = hbytes;
1598 mheader = m_uiomove(&auio);
1600 iovec_free(&iov, aiov);
1601 if (mheader == NULL)
1602 goto done;
1606 error = kern_sendfile(vp, uap->s, uap->offset, uap->nbytes, mheader,
1607 &sbytes, uap->flags);
1608 if (error)
1609 goto done;
1612 * Send trailers. Wimp out and use writev(2).
1614 if (uap->hdtr != NULL && hdtr.trailers != NULL) {
1615 error = iovec_copyin(hdtr.trailers, &iov, aiov,
1616 hdtr.trl_cnt, &auio.uio_resid);
1617 if (error)
1618 goto done;
1619 auio.uio_iov = iov;
1620 auio.uio_iovcnt = hdtr.trl_cnt;
1621 auio.uio_offset = 0;
1622 auio.uio_segflg = UIO_USERSPACE;
1623 auio.uio_rw = UIO_WRITE;
1624 auio.uio_td = td;
1626 tbytes = 0; /* avoid gcc warnings */
1627 error = kern_sendmsg(uap->s, NULL, &auio, NULL, 0, &tbytes);
1629 iovec_free(&iov, aiov);
1630 if (error)
1631 goto done;
1632 hdtr_size += tbytes; /* trailer bytes successfully sent */
1635 done:
1636 if (vp)
1637 vrele(vp);
1638 if (uap->sbytes != NULL) {
1639 sbytes += hdtr_size;
1640 copyout(&sbytes, uap->sbytes, sizeof(off_t));
1642 return (error);
1646 kern_sendfile(struct vnode *vp, int sfd, off_t offset, size_t nbytes,
1647 struct mbuf *mheader, off_t *sbytes, int flags)
1649 struct thread *td = curthread;
1650 struct vm_object *obj;
1651 struct socket *so;
1652 struct file *fp;
1653 struct mbuf *m, *mp;
1654 struct sf_buf *sf;
1655 struct vm_page *pg;
1656 off_t off, xfsize, xbytes;
1657 off_t hbytes = 0;
1658 int error = 0;
1660 if (vp->v_type != VREG) {
1661 error = EINVAL;
1662 goto done0;
1664 if ((obj = vp->v_object) == NULL) {
1665 error = EINVAL;
1666 goto done0;
1668 error = holdsock(td, sfd, &fp);
1669 if (error)
1670 goto done0;
1671 so = (struct socket *)fp->f_data;
1672 if (so->so_type != SOCK_STREAM) {
1673 error = EINVAL;
1674 goto done1;
1676 if ((so->so_state & SS_ISCONNECTED) == 0) {
1677 error = ENOTCONN;
1678 goto done1;
1680 if (offset < 0) {
1681 error = EINVAL;
1682 goto done1;
1686 * preallocation is required for asynchronous passing of mbufs,
1687 * otherwise we can wind up building up an infinite number of
1688 * mbufs during the asynchronous latency.
1690 if ((so->so_snd.ssb_flags & (SSB_PREALLOC | SSB_STOPSUPP)) == 0) {
1691 error = EINVAL;
1692 goto done1;
1695 *sbytes = 0;
1696 xbytes = 0;
1699 * Protect against multiple writers to the socket.
1700 * We need at least a shared lock on the VM object
1702 ssb_lock(&so->so_snd, M_WAITOK);
1703 vm_object_hold_shared(obj);
1706 * Loop through the pages in the file, starting with the requested
1707 * offset. Get a file page (do I/O if necessary), map the file page
1708 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
1709 * it on the socket.
1711 for (off = offset; ;
1712 off += xfsize, *sbytes += xfsize + hbytes, xbytes += xfsize) {
1713 vm_pindex_t pindex;
1714 vm_offset_t pgoff;
1715 long space;
1716 int loops;
1718 pindex = OFF_TO_IDX(off);
1719 loops = 0;
1721 retry_lookup:
1723 * Calculate the amount to transfer. Not to exceed a page,
1724 * the EOF, or the passed in nbytes.
1726 xfsize = vp->v_filesize - off;
1727 if (xfsize > PAGE_SIZE)
1728 xfsize = PAGE_SIZE;
1729 pgoff = (vm_offset_t)(off & PAGE_MASK);
1730 if (PAGE_SIZE - pgoff < xfsize)
1731 xfsize = PAGE_SIZE - pgoff;
1732 if (nbytes && xfsize > (nbytes - xbytes))
1733 xfsize = nbytes - xbytes;
1734 if (xfsize <= 0)
1735 break;
1737 * Optimize the non-blocking case by looking at the socket space
1738 * before going to the extra work of constituting the sf_buf.
1740 if (so->so_snd.ssb_flags & SSB_PREALLOC)
1741 space = ssb_space_prealloc(&so->so_snd);
1742 else
1743 space = ssb_space(&so->so_snd);
1745 if ((fp->f_flag & FNONBLOCK) && space <= 0) {
1746 if (so->so_state & SS_CANTSENDMORE)
1747 error = EPIPE;
1748 else
1749 error = EAGAIN;
1750 goto done;
1754 * Attempt to look up the page.
1756 * Try to find the data using a shared vm_object token and
1757 * vm_page_lookup_sbusy_try() first.
1759 * If data is missing, use a UIO_NOCOPY VOP_READ to load
1760 * the missing data and loop back up. We avoid all sorts
1761 * of problems by not trying to hold onto the page during
1762 * the I/O.
1764 * NOTE: The soft-busy will temporary block filesystem
1765 * truncation operations when a file is removed
1766 * while the sendfile is running.
1768 pg = vm_page_lookup_sbusy_try(obj, pindex, pgoff, xfsize);
1769 if (pg == NULL) {
1770 struct uio auio;
1771 struct iovec aiov;
1772 int bsize;
1774 if (++loops > 100000) {
1775 kprintf("sendfile: VOP operation failed "
1776 "to retain page\n");
1777 error = EIO;
1778 goto done;
1781 vm_object_drop(obj);
1782 bsize = vp->v_mount->mnt_stat.f_iosize;
1783 auio.uio_iov = &aiov;
1784 auio.uio_iovcnt = 1;
1785 aiov.iov_base = 0;
1786 aiov.iov_len = MAXBSIZE;
1787 auio.uio_resid = MAXBSIZE;
1788 auio.uio_offset = trunc_page(off);
1789 auio.uio_segflg = UIO_NOCOPY;
1790 auio.uio_rw = UIO_READ;
1791 auio.uio_td = td;
1793 vn_lock(vp, LK_SHARED | LK_RETRY);
1794 error = VOP_READ(vp, &auio,
1795 IO_VMIO | ((MAXBSIZE / bsize) << 16),
1796 td->td_ucred);
1797 vn_unlock(vp);
1798 vm_object_hold_shared(obj);
1800 if (error)
1801 goto done;
1802 goto retry_lookup;
1806 * Get a sendfile buf. We usually wait as long as necessary,
1807 * but this wait can be interrupted.
1809 if ((sf = sf_buf_alloc(pg)) == NULL) {
1810 vm_page_sbusy_drop(pg);
1811 /* vm_page_try_to_free(pg); */
1812 error = EINTR;
1813 goto done;
1817 * Get an mbuf header and set it up as having external storage.
1819 MGETHDR(m, M_WAITOK, MT_DATA);
1820 if (m == NULL) {
1821 error = ENOBUFS;
1822 vm_page_sbusy_drop(pg);
1823 /* vm_page_try_to_free(pg); */
1824 sf_buf_free(sf);
1825 goto done;
1828 m->m_ext.ext_free = sf_buf_mfree;
1829 m->m_ext.ext_ref = sf_buf_ref;
1830 m->m_ext.ext_arg = sf;
1831 m->m_ext.ext_buf = (void *)sf_buf_kva(sf);
1832 m->m_ext.ext_size = PAGE_SIZE;
1833 m->m_data = (char *)sf_buf_kva(sf) + pgoff;
1834 m->m_flags |= M_EXT;
1835 m->m_pkthdr.len = m->m_len = xfsize;
1836 KKASSERT((m->m_flags & (M_EXT_CLUSTER)) == 0);
1838 if (mheader != NULL) {
1839 hbytes = mheader->m_pkthdr.len;
1840 mheader->m_pkthdr.len += m->m_pkthdr.len;
1841 m_cat(mheader, m);
1842 m = mheader;
1843 mheader = NULL;
1844 } else {
1845 hbytes = 0;
1849 * Add the buffer to the socket buffer chain.
1851 crit_enter();
1852 retry_space:
1854 * Make sure that the socket is still able to take more data.
1855 * CANTSENDMORE being true usually means that the connection
1856 * was closed. so_error is true when an error was sensed after
1857 * a previous send.
1858 * The state is checked after the page mapping and buffer
1859 * allocation above since those operations may block and make
1860 * any socket checks stale. From this point forward, nothing
1861 * blocks before the pru_send (or more accurately, any blocking
1862 * results in a loop back to here to re-check).
1864 if ((so->so_state & SS_CANTSENDMORE) || so->so_error) {
1865 if (so->so_state & SS_CANTSENDMORE) {
1866 error = EPIPE;
1867 } else {
1868 error = so->so_error;
1869 so->so_error = 0;
1871 m_freem(m);
1872 crit_exit();
1873 goto done;
1876 * Wait for socket space to become available. We do this just
1877 * after checking the connection state above in order to avoid
1878 * a race condition with ssb_wait().
1880 if (so->so_snd.ssb_flags & SSB_PREALLOC)
1881 space = ssb_space_prealloc(&so->so_snd);
1882 else
1883 space = ssb_space(&so->so_snd);
1885 if (space < m->m_pkthdr.len && space < so->so_snd.ssb_lowat) {
1886 if (fp->f_flag & FNONBLOCK) {
1887 m_freem(m);
1888 crit_exit();
1889 error = EAGAIN;
1890 goto done;
1892 error = ssb_wait(&so->so_snd);
1894 * An error from ssb_wait usually indicates that we've
1895 * been interrupted by a signal. If we've sent anything
1896 * then return bytes sent, otherwise return the error.
1898 if (error) {
1899 m_freem(m);
1900 crit_exit();
1901 goto done;
1903 goto retry_space;
1906 if (so->so_snd.ssb_flags & SSB_PREALLOC) {
1907 for (mp = m; mp != NULL; mp = mp->m_next)
1908 ssb_preallocstream(&so->so_snd, mp);
1910 if (use_sendfile_async)
1911 error = so_pru_senda(so, 0, m, NULL, NULL, td);
1912 else
1913 error = so_pru_send(so, 0, m, NULL, NULL, td);
1915 crit_exit();
1916 if (error)
1917 goto done;
1919 if (mheader != NULL) {
1920 *sbytes += mheader->m_pkthdr.len;
1922 if (so->so_snd.ssb_flags & SSB_PREALLOC) {
1923 for (mp = mheader; mp != NULL; mp = mp->m_next)
1924 ssb_preallocstream(&so->so_snd, mp);
1926 if (use_sendfile_async)
1927 error = so_pru_senda(so, 0, mheader, NULL, NULL, td);
1928 else
1929 error = so_pru_send(so, 0, mheader, NULL, NULL, td);
1931 mheader = NULL;
1933 done:
1934 vm_object_drop(obj);
1935 ssb_unlock(&so->so_snd);
1936 done1:
1937 dropfp(td, sfd, fp);
1938 done0:
1939 if (mheader != NULL)
1940 m_freem(mheader);
1941 return (error);