kernel/vm: Rename *_putpages()'s 'sync' argument to 'flags'.
[dragonfly.git] / sys / kern / uipc_syscalls.c
blobf403444926d84ade17a1b8eae22f2740641183e4
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
5 * sendfile(2) and related extensions:
6 * Copyright (c) 1998, David Greenman. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94
33 * $FreeBSD: src/sys/kern/uipc_syscalls.c,v 1.65.2.17 2003/04/04 17:11:16 tegge Exp $
36 #include "opt_ktrace.h"
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/sysproto.h>
42 #include <sys/malloc.h>
43 #include <sys/filedesc.h>
44 #include <sys/event.h>
45 #include <sys/proc.h>
46 #include <sys/fcntl.h>
47 #include <sys/file.h>
48 #include <sys/filio.h>
49 #include <sys/kern_syscall.h>
50 #include <sys/mbuf.h>
51 #include <sys/protosw.h>
52 #include <sys/sfbuf.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/socketops.h>
56 #include <sys/uio.h>
57 #include <sys/vnode.h>
58 #include <sys/lock.h>
59 #include <sys/mount.h>
60 #include <sys/jail.h>
61 #ifdef KTRACE
62 #include <sys/ktrace.h>
63 #endif
64 #include <vm/vm.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_pageout.h>
68 #include <vm/vm_kern.h>
69 #include <vm/vm_extern.h>
70 #include <sys/file2.h>
71 #include <sys/signalvar.h>
72 #include <sys/serialize.h>
74 #include <sys/thread2.h>
75 #include <sys/msgport2.h>
76 #include <sys/socketvar2.h>
77 #include <net/netmsg2.h>
78 #include <vm/vm_page2.h>
80 extern int use_soaccept_pred_fast;
81 extern int use_sendfile_async;
82 extern int use_soconnect_async;
85 * System call interface to the socket abstraction.
88 extern struct fileops socketops;
91 * socket_args(int domain, int type, int protocol)
93 int
94 kern_socket(int domain, int type, int protocol, int *res)
96 struct thread *td = curthread;
97 struct filedesc *fdp = td->td_proc->p_fd;
98 struct socket *so;
99 struct file *fp;
100 int fd, error;
101 u_int fflags = 0;
102 int oflags = 0;
104 KKASSERT(td->td_lwp);
106 if (type & SOCK_NONBLOCK) {
107 type &= ~SOCK_NONBLOCK;
108 fflags |= FNONBLOCK;
110 if (type & SOCK_CLOEXEC) {
111 type &= ~SOCK_CLOEXEC;
112 oflags |= O_CLOEXEC;
115 error = falloc(td->td_lwp, &fp, &fd);
116 if (error)
117 return (error);
118 error = socreate(domain, &so, type, protocol, td);
119 if (error) {
120 fsetfd(fdp, NULL, fd);
121 } else {
122 fp->f_type = DTYPE_SOCKET;
123 fp->f_flag = FREAD | FWRITE | fflags;
124 fp->f_ops = &socketops;
125 fp->f_data = so;
126 if (oflags & O_CLOEXEC)
127 fdp->fd_files[fd].fileflags |= UF_EXCLOSE;
128 *res = fd;
129 fsetfd(fdp, fp, fd);
131 fdrop(fp);
132 return (error);
136 * MPALMOSTSAFE
139 sys_socket(struct socket_args *uap)
141 int error;
143 error = kern_socket(uap->domain, uap->type, uap->protocol,
144 &uap->sysmsg_iresult);
146 return (error);
150 kern_bind(int s, struct sockaddr *sa)
152 struct thread *td = curthread;
153 struct file *fp;
154 int error;
156 error = holdsock(td, s, &fp);
157 if (error)
158 return (error);
159 error = sobind((struct socket *)fp->f_data, sa, td);
160 dropfp(td, s, fp);
162 return (error);
166 * bind_args(int s, caddr_t name, int namelen)
168 * MPALMOSTSAFE
171 sys_bind(struct bind_args *uap)
173 struct sockaddr *sa;
174 int error;
176 error = getsockaddr(&sa, uap->name, uap->namelen);
177 if (error)
178 return (error);
179 if (!prison_remote_ip(curthread, sa)) {
180 kfree(sa, M_SONAME);
181 return EAFNOSUPPORT;
183 error = kern_bind(uap->s, sa);
184 kfree(sa, M_SONAME);
186 return (error);
190 kern_listen(int s, int backlog)
192 struct thread *td = curthread;
193 struct file *fp;
194 int error;
196 error = holdsock(td, s, &fp);
197 if (error)
198 return (error);
199 error = solisten((struct socket *)fp->f_data, backlog, td);
200 dropfp(td, s, fp);
202 return (error);
206 * listen_args(int s, int backlog)
208 * MPALMOSTSAFE
211 sys_listen(struct listen_args *uap)
213 int error;
215 error = kern_listen(uap->s, uap->backlog);
216 return (error);
220 * Returns the accepted socket as well.
222 * NOTE! The sockets sitting on so_comp/so_incomp might have 0 refs, the
223 * pool token is absolutely required to avoid a sofree() race,
224 * as well as to avoid tailq handling races.
226 static boolean_t
227 soaccept_predicate(struct netmsg_so_notify *msg)
229 struct socket *head = msg->base.nm_so;
230 struct socket *so;
232 if (head->so_error != 0) {
233 msg->base.lmsg.ms_error = head->so_error;
234 return (TRUE);
236 lwkt_getpooltoken(head);
237 if (!TAILQ_EMPTY(&head->so_comp)) {
238 /* Abuse nm_so field as copy in/copy out parameter. XXX JH */
239 so = TAILQ_FIRST(&head->so_comp);
240 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == SS_COMP);
241 TAILQ_REMOVE(&head->so_comp, so, so_list);
242 head->so_qlen--;
243 soclrstate(so, SS_COMP);
246 * Keep a reference before clearing the so_head
247 * to avoid racing socket close in netisr.
249 soreference(so);
250 so->so_head = NULL;
252 lwkt_relpooltoken(head);
254 msg->base.lmsg.ms_error = 0;
255 msg->base.nm_so = so;
256 return (TRUE);
258 lwkt_relpooltoken(head);
259 if (head->so_state & SS_CANTRCVMORE) {
260 msg->base.lmsg.ms_error = ECONNABORTED;
261 return (TRUE);
263 if (msg->nm_fflags & FNONBLOCK) {
264 msg->base.lmsg.ms_error = EWOULDBLOCK;
265 return (TRUE);
268 return (FALSE);
272 * The second argument to kern_accept() is a handle to a struct sockaddr.
273 * This allows kern_accept() to return a pointer to an allocated struct
274 * sockaddr which must be freed later with FREE(). The caller must
275 * initialize *name to NULL.
278 kern_accept(int s, int fflags, struct sockaddr **name, int *namelen, int *res,
279 int sockflags)
281 struct thread *td = curthread;
282 struct filedesc *fdp = td->td_proc->p_fd;
283 struct file *lfp = NULL;
284 struct file *nfp = NULL;
285 struct sockaddr *sa;
286 struct socket *head, *so;
287 struct netmsg_so_notify msg;
288 int fd;
289 u_int fflag; /* type must match fp->f_flag */
290 int error, tmp;
292 *res = -1;
293 if (name && namelen && *namelen < 0)
294 return (EINVAL);
296 error = holdsock(td, s, &lfp);
297 if (error)
298 return (error);
300 error = falloc(td->td_lwp, &nfp, &fd);
301 if (error) { /* Probably ran out of file descriptors. */
302 fdrop(lfp);
303 return (error);
305 head = (struct socket *)lfp->f_data;
306 if ((head->so_options & SO_ACCEPTCONN) == 0) {
307 error = EINVAL;
308 goto done;
311 if (fflags & O_FBLOCKING)
312 fflags |= lfp->f_flag & ~FNONBLOCK;
313 else if (fflags & O_FNONBLOCKING)
314 fflags |= lfp->f_flag | FNONBLOCK;
315 else
316 fflags = lfp->f_flag;
318 if (use_soaccept_pred_fast) {
319 boolean_t pred;
321 /* Initialize necessary parts for soaccept_predicate() */
322 netmsg_init(&msg.base, head, &netisr_apanic_rport, 0, NULL);
323 msg.nm_fflags = fflags;
325 lwkt_getpooltoken(head);
326 pred = soaccept_predicate(&msg);
327 lwkt_relpooltoken(head);
329 if (pred) {
330 error = msg.base.lmsg.ms_error;
331 if (error)
332 goto done;
333 else
334 goto accepted;
338 /* optimize for uniprocessor case later XXX JH */
339 netmsg_init_abortable(&msg.base, head, &curthread->td_msgport,
340 0, netmsg_so_notify, netmsg_so_notify_doabort);
341 msg.nm_predicate = soaccept_predicate;
342 msg.nm_fflags = fflags;
343 msg.nm_etype = NM_REVENT;
344 error = lwkt_domsg(head->so_port, &msg.base.lmsg, PCATCH);
345 if (error)
346 goto done;
348 accepted:
350 * At this point we have the connection that's ready to be accepted.
352 * NOTE! soaccept_predicate() ref'd so for us, and soaccept() expects
353 * to eat the ref and turn it into a descriptor.
355 so = msg.base.nm_so;
357 fflag = lfp->f_flag;
359 /* connection has been removed from the listen queue */
360 KNOTE(&head->so_rcv.ssb_kq.ki_note, 0);
362 if (sockflags & SOCK_KERN_NOINHERIT) {
363 fflag &= ~(FASYNC | FNONBLOCK);
364 if (sockflags & SOCK_NONBLOCK)
365 fflag |= FNONBLOCK;
366 } else {
367 if (head->so_sigio != NULL)
368 fsetown(fgetown(&head->so_sigio), &so->so_sigio);
371 nfp->f_type = DTYPE_SOCKET;
372 nfp->f_flag = fflag;
373 nfp->f_ops = &socketops;
374 nfp->f_data = so;
375 /* Sync socket async state with file flags */
376 tmp = fflag & FASYNC;
377 fo_ioctl(nfp, FIOASYNC, (caddr_t)&tmp, td->td_ucred, NULL);
379 sa = NULL;
380 if (so->so_faddr != NULL) {
381 sa = so->so_faddr;
382 so->so_faddr = NULL;
384 soaccept_generic(so);
385 error = 0;
386 } else {
387 error = soaccept(so, &sa);
391 * Set the returned name and namelen as applicable. Set the returned
392 * namelen to 0 for older code which might ignore the return value
393 * from accept.
395 if (error == 0) {
396 if (sa && name && namelen) {
397 if (*namelen > sa->sa_len)
398 *namelen = sa->sa_len;
399 *name = sa;
400 } else {
401 if (sa)
402 kfree(sa, M_SONAME);
406 done:
408 * If an error occured clear the reserved descriptor, else associate
409 * nfp with it.
411 * Note that *res is normally ignored if an error is returned but
412 * a syscall message will still have access to the result code.
414 if (error) {
415 fsetfd(fdp, NULL, fd);
416 } else {
417 if (sockflags & SOCK_CLOEXEC)
418 fdp->fd_files[fd].fileflags |= UF_EXCLOSE;
419 *res = fd;
420 fsetfd(fdp, nfp, fd);
422 fdrop(nfp);
423 dropfp(td, s, lfp);
425 return (error);
429 * accept(int s, caddr_t name, int *anamelen)
431 * MPALMOSTSAFE
434 sys_accept(struct accept_args *uap)
436 struct sockaddr *sa = NULL;
437 int sa_len;
438 int error;
440 if (uap->name) {
441 error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
442 if (error)
443 return (error);
445 error = kern_accept(uap->s, 0, &sa, &sa_len,
446 &uap->sysmsg_iresult, 0);
448 if (error == 0) {
449 prison_local_ip(curthread, sa);
450 error = copyout(sa, uap->name, sa_len);
452 if (error == 0) {
453 error = copyout(&sa_len, uap->anamelen,
454 sizeof(*uap->anamelen));
456 if (sa)
457 kfree(sa, M_SONAME);
458 } else {
459 error = kern_accept(uap->s, 0, NULL, 0,
460 &uap->sysmsg_iresult, 0);
462 return (error);
466 * extaccept(int s, int fflags, caddr_t name, int *anamelen)
468 * MPALMOSTSAFE
471 sys_extaccept(struct extaccept_args *uap)
473 struct sockaddr *sa = NULL;
474 int sa_len;
475 int error;
476 int fflags = uap->flags & O_FMASK;
478 if (uap->name) {
479 error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
480 if (error)
481 return (error);
483 error = kern_accept(uap->s, fflags, &sa, &sa_len,
484 &uap->sysmsg_iresult, 0);
486 if (error == 0) {
487 prison_local_ip(curthread, sa);
488 error = copyout(sa, uap->name, sa_len);
490 if (error == 0) {
491 error = copyout(&sa_len, uap->anamelen,
492 sizeof(*uap->anamelen));
494 if (sa)
495 kfree(sa, M_SONAME);
496 } else {
497 error = kern_accept(uap->s, fflags, NULL, 0,
498 &uap->sysmsg_iresult, 0);
500 return (error);
504 * accept4(int s, caddr_t name, int *anamelen, int flags)
506 * MPALMOSTSAFE
509 sys_accept4(struct accept4_args *uap)
511 struct sockaddr *sa = NULL;
512 int sa_len;
513 int error;
514 int sockflags;
516 if (uap->flags & ~(SOCK_NONBLOCK | SOCK_CLOEXEC))
517 return (EINVAL);
518 sockflags = uap->flags | SOCK_KERN_NOINHERIT;
520 if (uap->name) {
521 error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
522 if (error)
523 return (error);
525 error = kern_accept(uap->s, 0, &sa, &sa_len,
526 &uap->sysmsg_iresult, sockflags);
528 if (error == 0) {
529 prison_local_ip(curthread, sa);
530 error = copyout(sa, uap->name, sa_len);
532 if (error == 0) {
533 error = copyout(&sa_len, uap->anamelen,
534 sizeof(*uap->anamelen));
536 if (sa)
537 kfree(sa, M_SONAME);
538 } else {
539 error = kern_accept(uap->s, 0, NULL, 0,
540 &uap->sysmsg_iresult, sockflags);
542 return (error);
546 * Returns TRUE if predicate satisfied.
548 static boolean_t
549 soconnected_predicate(struct netmsg_so_notify *msg)
551 struct socket *so = msg->base.nm_so;
553 /* check predicate */
554 if (!(so->so_state & SS_ISCONNECTING) || so->so_error != 0) {
555 msg->base.lmsg.ms_error = so->so_error;
556 return (TRUE);
559 return (FALSE);
563 kern_connect(int s, int fflags, struct sockaddr *sa)
565 struct thread *td = curthread;
566 struct file *fp;
567 struct socket *so;
568 int error, interrupted = 0;
570 error = holdsock(td, s, &fp);
571 if (error)
572 return (error);
573 so = (struct socket *)fp->f_data;
575 if (fflags & O_FBLOCKING)
576 /* fflags &= ~FNONBLOCK; */;
577 else if (fflags & O_FNONBLOCKING)
578 fflags |= FNONBLOCK;
579 else
580 fflags = fp->f_flag;
582 if (so->so_state & SS_ISCONNECTING) {
583 error = EALREADY;
584 goto done;
586 error = soconnect(so, sa, td, use_soconnect_async ? FALSE : TRUE);
587 if (error)
588 goto bad;
589 if ((fflags & FNONBLOCK) && (so->so_state & SS_ISCONNECTING)) {
590 error = EINPROGRESS;
591 goto done;
593 if ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
594 struct netmsg_so_notify msg;
596 netmsg_init_abortable(&msg.base, so,
597 &curthread->td_msgport,
599 netmsg_so_notify,
600 netmsg_so_notify_doabort);
601 msg.nm_predicate = soconnected_predicate;
602 msg.nm_etype = NM_REVENT;
603 error = lwkt_domsg(so->so_port, &msg.base.lmsg, PCATCH);
604 if (error == EINTR || error == ERESTART)
605 interrupted = 1;
607 if (error == 0) {
608 error = so->so_error;
609 so->so_error = 0;
611 bad:
612 if (!interrupted)
613 soclrstate(so, SS_ISCONNECTING);
614 if (error == ERESTART)
615 error = EINTR;
616 done:
617 dropfp(td, s, fp);
619 return (error);
623 * connect_args(int s, caddr_t name, int namelen)
625 * MPALMOSTSAFE
628 sys_connect(struct connect_args *uap)
630 struct sockaddr *sa;
631 int error;
633 error = getsockaddr(&sa, uap->name, uap->namelen);
634 if (error)
635 return (error);
636 if (!prison_remote_ip(curthread, sa)) {
637 kfree(sa, M_SONAME);
638 return EAFNOSUPPORT;
640 error = kern_connect(uap->s, 0, sa);
641 kfree(sa, M_SONAME);
643 return (error);
647 * connect_args(int s, int fflags, caddr_t name, int namelen)
649 * MPALMOSTSAFE
652 sys_extconnect(struct extconnect_args *uap)
654 struct sockaddr *sa;
655 int error;
656 int fflags = uap->flags & O_FMASK;
658 error = getsockaddr(&sa, uap->name, uap->namelen);
659 if (error)
660 return (error);
661 if (!prison_remote_ip(curthread, sa)) {
662 kfree(sa, M_SONAME);
663 return EAFNOSUPPORT;
665 error = kern_connect(uap->s, fflags, sa);
666 kfree(sa, M_SONAME);
668 return (error);
672 kern_socketpair(int domain, int type, int protocol, int *sv)
674 struct thread *td = curthread;
675 struct filedesc *fdp;
676 struct file *fp1, *fp2;
677 struct socket *so1, *so2;
678 int fd1, fd2, error;
679 u_int fflags = 0;
680 int oflags = 0;
682 if (type & SOCK_NONBLOCK) {
683 type &= ~SOCK_NONBLOCK;
684 fflags |= FNONBLOCK;
686 if (type & SOCK_CLOEXEC) {
687 type &= ~SOCK_CLOEXEC;
688 oflags |= O_CLOEXEC;
691 fdp = td->td_proc->p_fd;
692 error = socreate(domain, &so1, type, protocol, td);
693 if (error)
694 return (error);
695 error = socreate(domain, &so2, type, protocol, td);
696 if (error)
697 goto free1;
698 error = falloc(td->td_lwp, &fp1, &fd1);
699 if (error)
700 goto free2;
701 sv[0] = fd1;
702 fp1->f_data = so1;
703 error = falloc(td->td_lwp, &fp2, &fd2);
704 if (error)
705 goto free3;
706 fp2->f_data = so2;
707 sv[1] = fd2;
708 error = soconnect2(so1, so2);
709 if (error)
710 goto free4;
711 if (type == SOCK_DGRAM) {
713 * Datagram socket connection is asymmetric.
715 error = soconnect2(so2, so1);
716 if (error)
717 goto free4;
719 fp1->f_type = fp2->f_type = DTYPE_SOCKET;
720 fp1->f_flag = fp2->f_flag = FREAD|FWRITE|fflags;
721 fp1->f_ops = fp2->f_ops = &socketops;
722 if (oflags & O_CLOEXEC) {
723 fdp->fd_files[fd1].fileflags |= UF_EXCLOSE;
724 fdp->fd_files[fd2].fileflags |= UF_EXCLOSE;
726 fsetfd(fdp, fp1, fd1);
727 fsetfd(fdp, fp2, fd2);
728 fdrop(fp1);
729 fdrop(fp2);
730 return (error);
731 free4:
732 fsetfd(fdp, NULL, fd2);
733 fdrop(fp2);
734 free3:
735 fsetfd(fdp, NULL, fd1);
736 fdrop(fp1);
737 free2:
738 (void)soclose(so2, 0);
739 free1:
740 (void)soclose(so1, 0);
741 return (error);
745 * socketpair(int domain, int type, int protocol, int *rsv)
748 sys_socketpair(struct socketpair_args *uap)
750 int error, sockv[2];
752 error = kern_socketpair(uap->domain, uap->type, uap->protocol, sockv);
754 if (error == 0) {
755 error = copyout(sockv, uap->rsv, sizeof(sockv));
757 if (error != 0) {
758 kern_close(sockv[0]);
759 kern_close(sockv[1]);
763 return (error);
767 kern_sendmsg(int s, struct sockaddr *sa, struct uio *auio,
768 struct mbuf *control, int flags, size_t *res)
770 struct thread *td = curthread;
771 struct lwp *lp = td->td_lwp;
772 struct proc *p = td->td_proc;
773 struct file *fp;
774 size_t len;
775 int error;
776 struct socket *so;
777 #ifdef KTRACE
778 struct iovec *ktriov = NULL;
779 struct uio ktruio;
780 #endif
782 error = holdsock(td, s, &fp);
783 if (error)
784 return (error);
785 #ifdef KTRACE
786 if (KTRPOINT(td, KTR_GENIO)) {
787 int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
789 ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK);
790 bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen);
791 ktruio = *auio;
793 #endif
794 len = auio->uio_resid;
795 so = (struct socket *)fp->f_data;
796 if ((flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) {
797 if (fp->f_flag & FNONBLOCK)
798 flags |= MSG_FNONBLOCKING;
800 error = so_pru_sosend(so, sa, auio, NULL, control, flags, td);
801 if (error) {
802 if (auio->uio_resid != len && (error == ERESTART ||
803 error == EINTR || error == EWOULDBLOCK))
804 error = 0;
805 if (error == EPIPE && !(flags & MSG_NOSIGNAL) &&
806 !(so->so_options & SO_NOSIGPIPE))
807 lwpsignal(p, lp, SIGPIPE);
809 #ifdef KTRACE
810 if (ktriov != NULL) {
811 if (error == 0) {
812 ktruio.uio_iov = ktriov;
813 ktruio.uio_resid = len - auio->uio_resid;
814 ktrgenio(lp, s, UIO_WRITE, &ktruio, error);
816 kfree(ktriov, M_TEMP);
818 #endif
819 if (error == 0)
820 *res = len - auio->uio_resid;
821 dropfp(td, s, fp);
823 return (error);
827 * sendto_args(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen)
829 * MPALMOSTSAFE
832 sys_sendto(struct sendto_args *uap)
834 struct thread *td = curthread;
835 struct uio auio;
836 struct iovec aiov;
837 struct sockaddr *sa = NULL;
838 int error;
840 if (uap->to) {
841 error = getsockaddr(&sa, uap->to, uap->tolen);
842 if (error)
843 return (error);
844 if (!prison_remote_ip(curthread, sa)) {
845 kfree(sa, M_SONAME);
846 return EAFNOSUPPORT;
849 aiov.iov_base = uap->buf;
850 aiov.iov_len = uap->len;
851 auio.uio_iov = &aiov;
852 auio.uio_iovcnt = 1;
853 auio.uio_offset = 0;
854 auio.uio_resid = uap->len;
855 auio.uio_segflg = UIO_USERSPACE;
856 auio.uio_rw = UIO_WRITE;
857 auio.uio_td = td;
859 error = kern_sendmsg(uap->s, sa, &auio, NULL, uap->flags,
860 &uap->sysmsg_szresult);
862 if (sa)
863 kfree(sa, M_SONAME);
864 return (error);
868 * sendmsg_args(int s, caddr_t msg, int flags)
870 * MPALMOSTSAFE
873 sys_sendmsg(struct sendmsg_args *uap)
875 struct thread *td = curthread;
876 struct msghdr msg;
877 struct uio auio;
878 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
879 struct sockaddr *sa = NULL;
880 struct mbuf *control = NULL;
881 int error;
883 error = copyin(uap->msg, (caddr_t)&msg, sizeof(msg));
884 if (error)
885 return (error);
888 * Conditionally copyin msg.msg_name.
890 if (msg.msg_name) {
891 error = getsockaddr(&sa, msg.msg_name, msg.msg_namelen);
892 if (error)
893 return (error);
894 if (!prison_remote_ip(curthread, sa)) {
895 kfree(sa, M_SONAME);
896 return EAFNOSUPPORT;
901 * Populate auio.
903 error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
904 &auio.uio_resid);
905 if (error)
906 goto cleanup2;
907 auio.uio_iov = iov;
908 auio.uio_iovcnt = msg.msg_iovlen;
909 auio.uio_offset = 0;
910 auio.uio_segflg = UIO_USERSPACE;
911 auio.uio_rw = UIO_WRITE;
912 auio.uio_td = td;
915 * Conditionally copyin msg.msg_control.
917 if (msg.msg_control) {
918 if (msg.msg_controllen < sizeof(struct cmsghdr) ||
919 msg.msg_controllen > MLEN) {
920 error = EINVAL;
921 goto cleanup;
923 control = m_get(M_WAITOK, MT_CONTROL);
924 if (control == NULL) {
925 error = ENOBUFS;
926 goto cleanup;
928 control->m_len = msg.msg_controllen;
929 error = copyin(msg.msg_control, mtod(control, caddr_t),
930 msg.msg_controllen);
931 if (error) {
932 m_free(control);
933 goto cleanup;
937 error = kern_sendmsg(uap->s, sa, &auio, control, uap->flags,
938 &uap->sysmsg_szresult);
940 cleanup:
941 iovec_free(&iov, aiov);
942 cleanup2:
943 if (sa)
944 kfree(sa, M_SONAME);
945 return (error);
949 * kern_recvmsg() takes a handle to sa and control. If the handle is non-
950 * null, it returns a dynamically allocated struct sockaddr and an mbuf.
951 * Don't forget to FREE() and m_free() these if they are returned.
954 kern_recvmsg(int s, struct sockaddr **sa, struct uio *auio,
955 struct mbuf **control, int *flags, size_t *res)
957 struct thread *td = curthread;
958 struct file *fp;
959 size_t len;
960 int error;
961 int lflags;
962 struct socket *so;
963 #ifdef KTRACE
964 struct iovec *ktriov = NULL;
965 struct uio ktruio;
966 #endif
968 error = holdsock(td, s, &fp);
969 if (error)
970 return (error);
971 #ifdef KTRACE
972 if (KTRPOINT(td, KTR_GENIO)) {
973 int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
975 ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK);
976 bcopy(auio->uio_iov, ktriov, iovlen);
977 ktruio = *auio;
979 #endif
980 len = auio->uio_resid;
981 so = (struct socket *)fp->f_data;
983 if (flags == NULL || (*flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) {
984 if (fp->f_flag & FNONBLOCK) {
985 if (flags) {
986 *flags |= MSG_FNONBLOCKING;
987 } else {
988 lflags = MSG_FNONBLOCKING;
989 flags = &lflags;
994 error = so_pru_soreceive(so, sa, auio, NULL, control, flags);
995 if (error) {
996 if (auio->uio_resid != len && (error == ERESTART ||
997 error == EINTR || error == EWOULDBLOCK))
998 error = 0;
1000 #ifdef KTRACE
1001 if (ktriov != NULL) {
1002 if (error == 0) {
1003 ktruio.uio_iov = ktriov;
1004 ktruio.uio_resid = len - auio->uio_resid;
1005 ktrgenio(td->td_lwp, s, UIO_READ, &ktruio, error);
1007 kfree(ktriov, M_TEMP);
1009 #endif
1010 if (error == 0)
1011 *res = len - auio->uio_resid;
1012 dropfp(td, s, fp);
1014 return (error);
1018 * recvfrom_args(int s, caddr_t buf, size_t len, int flags,
1019 * caddr_t from, int *fromlenaddr)
1021 * MPALMOSTSAFE
1024 sys_recvfrom(struct recvfrom_args *uap)
1026 struct thread *td = curthread;
1027 struct uio auio;
1028 struct iovec aiov;
1029 struct sockaddr *sa = NULL;
1030 int error, fromlen;
1032 if (uap->from && uap->fromlenaddr) {
1033 error = copyin(uap->fromlenaddr, &fromlen, sizeof(fromlen));
1034 if (error)
1035 return (error);
1036 if (fromlen < 0)
1037 return (EINVAL);
1038 } else {
1039 fromlen = 0;
1041 aiov.iov_base = uap->buf;
1042 aiov.iov_len = uap->len;
1043 auio.uio_iov = &aiov;
1044 auio.uio_iovcnt = 1;
1045 auio.uio_offset = 0;
1046 auio.uio_resid = uap->len;
1047 auio.uio_segflg = UIO_USERSPACE;
1048 auio.uio_rw = UIO_READ;
1049 auio.uio_td = td;
1051 error = kern_recvmsg(uap->s, uap->from ? &sa : NULL, &auio, NULL,
1052 &uap->flags, &uap->sysmsg_szresult);
1054 if (error == 0 && uap->from) {
1055 /* note: sa may still be NULL */
1056 if (sa) {
1057 fromlen = MIN(fromlen, sa->sa_len);
1058 prison_local_ip(curthread, sa);
1059 error = copyout(sa, uap->from, fromlen);
1060 } else {
1061 fromlen = 0;
1063 if (error == 0) {
1064 error = copyout(&fromlen, uap->fromlenaddr,
1065 sizeof(fromlen));
1068 if (sa)
1069 kfree(sa, M_SONAME);
1071 return (error);
1075 * recvmsg_args(int s, struct msghdr *msg, int flags)
1077 * MPALMOSTSAFE
1080 sys_recvmsg(struct recvmsg_args *uap)
1082 struct thread *td = curthread;
1083 struct msghdr msg;
1084 struct uio auio;
1085 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
1086 struct mbuf *m, *control = NULL;
1087 struct sockaddr *sa = NULL;
1088 caddr_t ctlbuf;
1089 socklen_t *ufromlenp, *ucontrollenp;
1090 int error, fromlen, controllen, len, flags, *uflagsp;
1093 * This copyin handles everything except the iovec.
1095 error = copyin(uap->msg, &msg, sizeof(msg));
1096 if (error)
1097 return (error);
1099 if (msg.msg_name && msg.msg_namelen < 0)
1100 return (EINVAL);
1101 if (msg.msg_control && msg.msg_controllen < 0)
1102 return (EINVAL);
1104 ufromlenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
1105 msg_namelen));
1106 ucontrollenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
1107 msg_controllen));
1108 uflagsp = (int *)((caddr_t)uap->msg + offsetof(struct msghdr,
1109 msg_flags));
1112 * Populate auio.
1114 error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
1115 &auio.uio_resid);
1116 if (error)
1117 return (error);
1118 auio.uio_iov = iov;
1119 auio.uio_iovcnt = msg.msg_iovlen;
1120 auio.uio_offset = 0;
1121 auio.uio_segflg = UIO_USERSPACE;
1122 auio.uio_rw = UIO_READ;
1123 auio.uio_td = td;
1125 flags = uap->flags;
1127 error = kern_recvmsg(uap->s,
1128 (msg.msg_name ? &sa : NULL), &auio,
1129 (msg.msg_control ? &control : NULL), &flags,
1130 &uap->sysmsg_szresult);
1133 * Conditionally copyout the name and populate the namelen field.
1135 if (error == 0 && msg.msg_name) {
1136 /* note: sa may still be NULL */
1137 if (sa != NULL) {
1138 fromlen = MIN(msg.msg_namelen, sa->sa_len);
1139 prison_local_ip(curthread, sa);
1140 error = copyout(sa, msg.msg_name, fromlen);
1141 } else {
1142 fromlen = 0;
1144 if (error == 0)
1145 error = copyout(&fromlen, ufromlenp,
1146 sizeof(*ufromlenp));
1150 * Copyout msg.msg_control and msg.msg_controllen.
1152 if (error == 0 && msg.msg_control) {
1153 len = msg.msg_controllen;
1154 m = control;
1155 ctlbuf = (caddr_t)msg.msg_control;
1157 while(m && len > 0) {
1158 unsigned int tocopy;
1160 if (len >= m->m_len) {
1161 tocopy = m->m_len;
1162 } else {
1163 msg.msg_flags |= MSG_CTRUNC;
1164 tocopy = len;
1167 error = copyout(mtod(m, caddr_t), ctlbuf, tocopy);
1168 if (error)
1169 goto cleanup;
1171 ctlbuf += tocopy;
1172 len -= tocopy;
1173 m = m->m_next;
1175 controllen = ctlbuf - (caddr_t)msg.msg_control;
1176 error = copyout(&controllen, ucontrollenp,
1177 sizeof(*ucontrollenp));
1180 if (error == 0)
1181 error = copyout(&flags, uflagsp, sizeof(*uflagsp));
1183 cleanup:
1184 if (sa)
1185 kfree(sa, M_SONAME);
1186 iovec_free(&iov, aiov);
1187 if (control)
1188 m_freem(control);
1189 return (error);
1193 * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1194 * in kernel pointer instead of a userland pointer. This allows us
1195 * to manipulate socket options in the emulation code.
1198 kern_setsockopt(int s, struct sockopt *sopt)
1200 struct thread *td = curthread;
1201 struct file *fp;
1202 int error;
1204 if (sopt->sopt_val == NULL && sopt->sopt_valsize != 0)
1205 return (EFAULT);
1206 if (sopt->sopt_val != NULL && sopt->sopt_valsize == 0)
1207 return (EINVAL);
1208 if (sopt->sopt_valsize > SOMAXOPT_SIZE) /* unsigned */
1209 return (EINVAL);
1211 error = holdsock(td, s, &fp);
1212 if (error)
1213 return (error);
1215 error = sosetopt((struct socket *)fp->f_data, sopt);
1216 dropfp(td, s, fp);
1218 return (error);
1222 * setsockopt_args(int s, int level, int name, caddr_t val, int valsize)
1224 * MPALMOSTSAFE
1227 sys_setsockopt(struct setsockopt_args *uap)
1229 struct thread *td = curthread;
1230 struct sockopt sopt;
1231 int error;
1233 sopt.sopt_level = uap->level;
1234 sopt.sopt_name = uap->name;
1235 sopt.sopt_valsize = uap->valsize;
1236 sopt.sopt_td = td;
1237 sopt.sopt_val = NULL;
1239 if (sopt.sopt_valsize > SOMAXOPT_SIZE) /* unsigned */
1240 return (EINVAL);
1241 if (uap->val) {
1242 sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP, M_WAITOK);
1243 error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize);
1244 if (error)
1245 goto out;
1248 error = kern_setsockopt(uap->s, &sopt);
1249 out:
1250 if (uap->val)
1251 kfree(sopt.sopt_val, M_TEMP);
1252 return(error);
1256 * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1257 * in kernel pointer instead of a userland pointer. This allows us
1258 * to manipulate socket options in the emulation code.
1261 kern_getsockopt(int s, struct sockopt *sopt)
1263 struct thread *td = curthread;
1264 struct file *fp;
1265 int error;
1267 if (sopt->sopt_val == NULL && sopt->sopt_valsize != 0)
1268 return (EFAULT);
1269 if (sopt->sopt_val != NULL && sopt->sopt_valsize == 0)
1270 return (EINVAL);
1272 error = holdsock(td, s, &fp);
1273 if (error)
1274 return (error);
1276 error = sogetopt((struct socket *)fp->f_data, sopt);
1277 dropfp(td, s, fp);
1279 return (error);
1283 * getsockopt_args(int s, int level, int name, caddr_t val, int *avalsize)
1285 * MPALMOSTSAFE
1288 sys_getsockopt(struct getsockopt_args *uap)
1290 struct thread *td = curthread;
1291 struct sockopt sopt;
1292 int error, valsize, valszmax, mflag = 0;
1294 if (uap->val) {
1295 error = copyin(uap->avalsize, &valsize, sizeof(valsize));
1296 if (error)
1297 return (error);
1298 } else {
1299 valsize = 0;
1302 sopt.sopt_level = uap->level;
1303 sopt.sopt_name = uap->name;
1304 sopt.sopt_valsize = valsize;
1305 sopt.sopt_td = td;
1306 sopt.sopt_val = NULL;
1308 if (td->td_proc->p_ucred->cr_uid == 0) {
1309 valszmax = SOMAXOPT_SIZE0;
1310 mflag = M_NULLOK;
1311 } else {
1312 valszmax = SOMAXOPT_SIZE;
1314 if (sopt.sopt_valsize > valszmax) /* unsigned */
1315 return (EINVAL);
1316 if (uap->val) {
1317 sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP,
1318 M_WAITOK | mflag);
1319 if (sopt.sopt_val == NULL)
1320 return (ENOBUFS);
1321 error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize);
1322 if (error)
1323 goto out;
1326 error = kern_getsockopt(uap->s, &sopt);
1327 if (error)
1328 goto out;
1329 valsize = sopt.sopt_valsize;
1330 error = copyout(&valsize, uap->avalsize, sizeof(valsize));
1331 if (error)
1332 goto out;
1333 if (uap->val)
1334 error = copyout(sopt.sopt_val, uap->val, sopt.sopt_valsize);
1335 out:
1336 if (uap->val)
1337 kfree(sopt.sopt_val, M_TEMP);
1338 return (error);
1342 * The second argument to kern_getsockname() is a handle to a struct sockaddr.
1343 * This allows kern_getsockname() to return a pointer to an allocated struct
1344 * sockaddr which must be freed later with FREE(). The caller must
1345 * initialize *name to NULL.
1348 kern_getsockname(int s, struct sockaddr **name, int *namelen)
1350 struct thread *td = curthread;
1351 struct file *fp;
1352 struct socket *so;
1353 struct sockaddr *sa = NULL;
1354 int error;
1356 error = holdsock(td, s, &fp);
1357 if (error)
1358 return (error);
1359 if (*namelen < 0) {
1360 fdrop(fp);
1361 return (EINVAL);
1363 so = (struct socket *)fp->f_data;
1364 error = so_pru_sockaddr(so, &sa);
1365 if (error == 0) {
1366 if (sa == NULL) {
1367 *namelen = 0;
1368 } else {
1369 *namelen = MIN(*namelen, sa->sa_len);
1370 *name = sa;
1373 dropfp(td, s, fp);
1375 return (error);
1379 * getsockname_args(int fdes, caddr_t asa, int *alen)
1381 * Get socket name.
1383 * MPALMOSTSAFE
1386 sys_getsockname(struct getsockname_args *uap)
1388 struct sockaddr *sa = NULL;
1389 struct sockaddr satmp;
1390 int error, sa_len_in, sa_len_out;
1392 error = copyin(uap->alen, &sa_len_in, sizeof(sa_len_in));
1393 if (error)
1394 return (error);
1396 sa_len_out = sa_len_in;
1397 error = kern_getsockname(uap->fdes, &sa, &sa_len_out);
1399 if (error == 0) {
1400 if (sa) {
1401 prison_local_ip(curthread, sa);
1402 error = copyout(sa, uap->asa, sa_len_out);
1403 } else {
1405 * unnamed uipc sockets don't bother storing
1406 * sockaddr, simulate an AF_LOCAL sockaddr.
1408 sa_len_out = sizeof(satmp);
1409 if (sa_len_out > sa_len_in)
1410 sa_len_out = sa_len_in;
1411 if (sa_len_out < 0)
1412 sa_len_out = 0;
1413 bzero(&satmp, sizeof(satmp));
1414 satmp.sa_len = sa_len_out;
1415 satmp.sa_family = AF_LOCAL;
1416 error = copyout(&satmp, uap->asa, sa_len_out);
1419 if (error == 0 && sa_len_out != sa_len_in)
1420 error = copyout(&sa_len_out, uap->alen, sizeof(*uap->alen));
1421 if (sa)
1422 kfree(sa, M_SONAME);
1423 return (error);
1427 * The second argument to kern_getpeername() is a handle to a struct sockaddr.
1428 * This allows kern_getpeername() to return a pointer to an allocated struct
1429 * sockaddr which must be freed later with FREE(). The caller must
1430 * initialize *name to NULL.
1433 kern_getpeername(int s, struct sockaddr **name, int *namelen)
1435 struct thread *td = curthread;
1436 struct file *fp;
1437 struct socket *so;
1438 struct sockaddr *sa = NULL;
1439 int error;
1441 error = holdsock(td, s, &fp);
1442 if (error)
1443 return (error);
1444 if (*namelen < 0) {
1445 fdrop(fp);
1446 return (EINVAL);
1448 so = (struct socket *)fp->f_data;
1449 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1450 fdrop(fp);
1451 return (ENOTCONN);
1453 error = so_pru_peeraddr(so, &sa);
1454 if (error == 0) {
1455 if (sa == NULL) {
1456 *namelen = 0;
1457 } else {
1458 *namelen = MIN(*namelen, sa->sa_len);
1459 *name = sa;
1462 dropfp(td, s, fp);
1464 return (error);
1468 * getpeername_args(int fdes, caddr_t asa, int *alen)
1470 * Get name of peer for connected socket.
1472 * MPALMOSTSAFE
1475 sys_getpeername(struct getpeername_args *uap)
1477 struct sockaddr *sa = NULL;
1478 int error, sa_len;
1480 error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1481 if (error)
1482 return (error);
1484 error = kern_getpeername(uap->fdes, &sa, &sa_len);
1486 if (error == 0) {
1487 prison_local_ip(curthread, sa);
1488 error = copyout(sa, uap->asa, sa_len);
1490 if (error == 0)
1491 error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1492 if (sa)
1493 kfree(sa, M_SONAME);
1494 return (error);
1498 getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len)
1500 struct sockaddr *sa;
1501 int error;
1503 *namp = NULL;
1504 if (len > SOCK_MAXADDRLEN)
1505 return ENAMETOOLONG;
1506 if (len < offsetof(struct sockaddr, sa_data[0]))
1507 return EDOM;
1508 sa = kmalloc(len, M_SONAME, M_WAITOK);
1509 error = copyin(uaddr, sa, len);
1510 if (error) {
1511 kfree(sa, M_SONAME);
1512 } else {
1513 sa->sa_len = len;
1514 *namp = sa;
1516 return error;
1520 * Detach a mapped page and release resources back to the system.
1521 * We must release our wiring and if the object is ripped out
1522 * from under the vm_page we become responsible for freeing the
1523 * page.
1525 * MPSAFE
1527 static void
1528 sf_buf_mfree(void *arg)
1530 struct sf_buf *sf = arg;
1531 vm_page_t m;
1533 m = sf_buf_page(sf);
1534 if (sf_buf_free(sf)) {
1535 /* sf invalid now */
1536 vm_page_sbusy_drop(m);
1537 #if 0
1538 if (m->object == NULL &&
1539 m->wire_count == 0 &&
1540 (m->flags & PG_NEED_COMMIT) == 0) {
1541 vm_page_free(m);
1542 } else {
1543 vm_page_wakeup(m);
1545 #endif
1550 * sendfile(2).
1551 * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1552 * struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1554 * Send a file specified by 'fd' and starting at 'offset' to a socket
1555 * specified by 's'. Send only 'nbytes' of the file or until EOF if
1556 * nbytes == 0. Optionally add a header and/or trailer to the socket
1557 * output. If specified, write the total number of bytes sent into *sbytes.
1559 * In FreeBSD kern/uipc_syscalls.c,v 1.103, a bug was fixed that caused
1560 * the headers to count against the remaining bytes to be sent from
1561 * the file descriptor. We may wish to implement a compatibility syscall
1562 * in the future.
1564 * MPALMOSTSAFE
1567 sys_sendfile(struct sendfile_args *uap)
1569 struct thread *td = curthread;
1570 struct file *fp;
1571 struct vnode *vp = NULL;
1572 struct sf_hdtr hdtr;
1573 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
1574 struct uio auio;
1575 struct mbuf *mheader = NULL;
1576 size_t hbytes = 0;
1577 size_t tbytes;
1578 off_t hdtr_size = 0;
1579 off_t sbytes;
1580 int error;
1583 * Do argument checking. Must be a regular file in, stream
1584 * type and connected socket out, positive offset.
1586 fp = holdfp(td, uap->fd, FREAD);
1587 if (fp == NULL) {
1588 return (EBADF);
1590 if (fp->f_type != DTYPE_VNODE) {
1591 fdrop(fp);
1592 return (EINVAL);
1594 vp = (struct vnode *)fp->f_data;
1595 vref(vp);
1596 dropfp(td, uap->fd, fp);
1599 * If specified, get the pointer to the sf_hdtr struct for
1600 * any headers/trailers.
1602 if (uap->hdtr) {
1603 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1604 if (error)
1605 goto done;
1607 * Send any headers.
1609 if (hdtr.headers) {
1610 error = iovec_copyin(hdtr.headers, &iov, aiov,
1611 hdtr.hdr_cnt, &hbytes);
1612 if (error)
1613 goto done;
1614 auio.uio_iov = iov;
1615 auio.uio_iovcnt = hdtr.hdr_cnt;
1616 auio.uio_offset = 0;
1617 auio.uio_segflg = UIO_USERSPACE;
1618 auio.uio_rw = UIO_WRITE;
1619 auio.uio_td = td;
1620 auio.uio_resid = hbytes;
1622 mheader = m_uiomove(&auio);
1624 iovec_free(&iov, aiov);
1625 if (mheader == NULL)
1626 goto done;
1630 error = kern_sendfile(vp, uap->s, uap->offset, uap->nbytes, mheader,
1631 &sbytes, uap->flags);
1632 if (error)
1633 goto done;
1636 * Send trailers. Wimp out and use writev(2).
1638 if (uap->hdtr != NULL && hdtr.trailers != NULL) {
1639 error = iovec_copyin(hdtr.trailers, &iov, aiov,
1640 hdtr.trl_cnt, &auio.uio_resid);
1641 if (error)
1642 goto done;
1643 auio.uio_iov = iov;
1644 auio.uio_iovcnt = hdtr.trl_cnt;
1645 auio.uio_offset = 0;
1646 auio.uio_segflg = UIO_USERSPACE;
1647 auio.uio_rw = UIO_WRITE;
1648 auio.uio_td = td;
1650 tbytes = 0; /* avoid gcc warnings */
1651 error = kern_sendmsg(uap->s, NULL, &auio, NULL, 0, &tbytes);
1653 iovec_free(&iov, aiov);
1654 if (error)
1655 goto done;
1656 hdtr_size += tbytes; /* trailer bytes successfully sent */
1659 done:
1660 if (vp)
1661 vrele(vp);
1662 if (uap->sbytes != NULL) {
1663 sbytes += hdtr_size;
1664 copyout(&sbytes, uap->sbytes, sizeof(off_t));
1666 return (error);
1670 kern_sendfile(struct vnode *vp, int sfd, off_t offset, size_t nbytes,
1671 struct mbuf *mheader, off_t *sbytes, int flags)
1673 struct thread *td = curthread;
1674 struct vm_object *obj;
1675 struct socket *so;
1676 struct file *fp;
1677 struct mbuf *m, *mp;
1678 struct sf_buf *sf;
1679 struct vm_page *pg;
1680 off_t off, xfsize, xbytes;
1681 off_t hbytes = 0;
1682 int error = 0;
1684 if (vp->v_type != VREG) {
1685 error = EINVAL;
1686 goto done0;
1688 if ((obj = vp->v_object) == NULL) {
1689 error = EINVAL;
1690 goto done0;
1692 error = holdsock(td, sfd, &fp);
1693 if (error)
1694 goto done0;
1695 so = (struct socket *)fp->f_data;
1696 if (so->so_type != SOCK_STREAM) {
1697 error = EINVAL;
1698 goto done1;
1700 if ((so->so_state & SS_ISCONNECTED) == 0) {
1701 error = ENOTCONN;
1702 goto done1;
1704 if (offset < 0) {
1705 error = EINVAL;
1706 goto done1;
1710 * preallocation is required for asynchronous passing of mbufs,
1711 * otherwise we can wind up building up an infinite number of
1712 * mbufs during the asynchronous latency.
1714 if ((so->so_snd.ssb_flags & (SSB_PREALLOC | SSB_STOPSUPP)) == 0) {
1715 error = EINVAL;
1716 goto done1;
1719 *sbytes = 0;
1720 xbytes = 0;
1723 * Protect against multiple writers to the socket.
1724 * We need at least a shared lock on the VM object
1726 ssb_lock(&so->so_snd, M_WAITOK);
1727 vm_object_hold_shared(obj);
1730 * Loop through the pages in the file, starting with the requested
1731 * offset. Get a file page (do I/O if necessary), map the file page
1732 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
1733 * it on the socket.
1735 for (off = offset; ;
1736 off += xfsize, *sbytes += xfsize + hbytes, xbytes += xfsize) {
1737 vm_pindex_t pindex;
1738 vm_offset_t pgoff;
1739 long space;
1740 int loops;
1742 pindex = OFF_TO_IDX(off);
1743 loops = 0;
1745 retry_lookup:
1747 * Calculate the amount to transfer. Not to exceed a page,
1748 * the EOF, or the passed in nbytes.
1750 xfsize = vp->v_filesize - off;
1751 if (xfsize > PAGE_SIZE)
1752 xfsize = PAGE_SIZE;
1753 pgoff = (vm_offset_t)(off & PAGE_MASK);
1754 if (PAGE_SIZE - pgoff < xfsize)
1755 xfsize = PAGE_SIZE - pgoff;
1756 if (nbytes && xfsize > (nbytes - xbytes))
1757 xfsize = nbytes - xbytes;
1758 if (xfsize <= 0)
1759 break;
1761 * Optimize the non-blocking case by looking at the socket space
1762 * before going to the extra work of constituting the sf_buf.
1764 if (so->so_snd.ssb_flags & SSB_PREALLOC)
1765 space = ssb_space_prealloc(&so->so_snd);
1766 else
1767 space = ssb_space(&so->so_snd);
1769 if ((fp->f_flag & FNONBLOCK) && space <= 0) {
1770 if (so->so_state & SS_CANTSENDMORE)
1771 error = EPIPE;
1772 else
1773 error = EAGAIN;
1774 goto done;
1778 * Attempt to look up the page.
1780 * Try to find the data using a shared vm_object token and
1781 * vm_page_lookup_sbusy_try() first.
1783 * If data is missing, use a UIO_NOCOPY VOP_READ to load
1784 * the missing data and loop back up. We avoid all sorts
1785 * of problems by not trying to hold onto the page during
1786 * the I/O.
1788 * NOTE: The soft-busy will temporary block filesystem
1789 * truncation operations when a file is removed
1790 * while the sendfile is running.
1792 pg = vm_page_lookup_sbusy_try(obj, pindex, pgoff, xfsize);
1793 if (pg == NULL) {
1794 struct uio auio;
1795 struct iovec aiov;
1796 int bsize;
1798 if (++loops > 100000) {
1799 kprintf("sendfile: VOP operation failed "
1800 "to retain page\n");
1801 error = EIO;
1802 goto done;
1805 vm_object_drop(obj);
1806 bsize = vp->v_mount->mnt_stat.f_iosize;
1807 auio.uio_iov = &aiov;
1808 auio.uio_iovcnt = 1;
1809 aiov.iov_base = 0;
1810 aiov.iov_len = MAXBSIZE;
1811 auio.uio_resid = MAXBSIZE;
1812 auio.uio_offset = trunc_page(off);
1813 auio.uio_segflg = UIO_NOCOPY;
1814 auio.uio_rw = UIO_READ;
1815 auio.uio_td = td;
1817 vn_lock(vp, LK_SHARED | LK_RETRY);
1818 error = VOP_READ_FP(vp, &auio,
1819 IO_VMIO | ((MAXBSIZE / bsize) << 16),
1820 td->td_ucred, fp);
1821 vn_unlock(vp);
1822 vm_object_hold_shared(obj);
1824 if (error)
1825 goto done;
1826 goto retry_lookup;
1830 * Get a sendfile buf. We usually wait as long as necessary,
1831 * but this wait can be interrupted.
1833 if ((sf = sf_buf_alloc(pg)) == NULL) {
1834 vm_page_sbusy_drop(pg);
1835 /* vm_page_try_to_free(pg); */
1836 error = EINTR;
1837 goto done;
1841 * Get an mbuf header and set it up as having external storage.
1843 MGETHDR(m, M_WAITOK, MT_DATA);
1844 if (m == NULL) {
1845 error = ENOBUFS;
1846 vm_page_sbusy_drop(pg);
1847 /* vm_page_try_to_free(pg); */
1848 sf_buf_free(sf);
1849 goto done;
1852 m->m_ext.ext_free = sf_buf_mfree;
1853 m->m_ext.ext_ref = sf_buf_ref;
1854 m->m_ext.ext_arg = sf;
1855 m->m_ext.ext_buf = (void *)sf_buf_kva(sf);
1856 m->m_ext.ext_size = PAGE_SIZE;
1857 m->m_data = (char *)sf_buf_kva(sf) + pgoff;
1858 m->m_flags |= M_EXT;
1859 m->m_pkthdr.len = m->m_len = xfsize;
1860 KKASSERT((m->m_flags & (M_EXT_CLUSTER)) == 0);
1862 if (mheader != NULL) {
1863 hbytes = mheader->m_pkthdr.len;
1864 mheader->m_pkthdr.len += m->m_pkthdr.len;
1865 m_cat(mheader, m);
1866 m = mheader;
1867 mheader = NULL;
1868 } else {
1869 hbytes = 0;
1873 * Add the buffer to the socket buffer chain.
1875 crit_enter();
1876 retry_space:
1878 * Make sure that the socket is still able to take more data.
1879 * CANTSENDMORE being true usually means that the connection
1880 * was closed. so_error is true when an error was sensed after
1881 * a previous send.
1882 * The state is checked after the page mapping and buffer
1883 * allocation above since those operations may block and make
1884 * any socket checks stale. From this point forward, nothing
1885 * blocks before the pru_send (or more accurately, any blocking
1886 * results in a loop back to here to re-check).
1888 if ((so->so_state & SS_CANTSENDMORE) || so->so_error) {
1889 if (so->so_state & SS_CANTSENDMORE) {
1890 error = EPIPE;
1891 } else {
1892 error = so->so_error;
1893 so->so_error = 0;
1895 m_freem(m);
1896 crit_exit();
1897 goto done;
1900 * Wait for socket space to become available. We do this just
1901 * after checking the connection state above in order to avoid
1902 * a race condition with ssb_wait().
1904 if (so->so_snd.ssb_flags & SSB_PREALLOC)
1905 space = ssb_space_prealloc(&so->so_snd);
1906 else
1907 space = ssb_space(&so->so_snd);
1909 if (space < m->m_pkthdr.len && space < so->so_snd.ssb_lowat) {
1910 if (fp->f_flag & FNONBLOCK) {
1911 m_freem(m);
1912 crit_exit();
1913 error = EAGAIN;
1914 goto done;
1916 error = ssb_wait(&so->so_snd);
1918 * An error from ssb_wait usually indicates that we've
1919 * been interrupted by a signal. If we've sent anything
1920 * then return bytes sent, otherwise return the error.
1922 if (error) {
1923 m_freem(m);
1924 crit_exit();
1925 goto done;
1927 goto retry_space;
1930 if (so->so_snd.ssb_flags & SSB_PREALLOC) {
1931 for (mp = m; mp != NULL; mp = mp->m_next)
1932 ssb_preallocstream(&so->so_snd, mp);
1934 if (use_sendfile_async)
1935 error = so_pru_senda(so, 0, m, NULL, NULL, td);
1936 else
1937 error = so_pru_send(so, 0, m, NULL, NULL, td);
1939 crit_exit();
1940 if (error)
1941 goto done;
1943 if (mheader != NULL) {
1944 *sbytes += mheader->m_pkthdr.len;
1946 if (so->so_snd.ssb_flags & SSB_PREALLOC) {
1947 for (mp = mheader; mp != NULL; mp = mp->m_next)
1948 ssb_preallocstream(&so->so_snd, mp);
1950 if (use_sendfile_async)
1951 error = so_pru_senda(so, 0, mheader, NULL, NULL, td);
1952 else
1953 error = so_pru_send(so, 0, mheader, NULL, NULL, td);
1955 mheader = NULL;
1957 done:
1958 vm_object_drop(obj);
1959 ssb_unlock(&so->so_snd);
1960 done1:
1961 dropfp(td, sfd, fp);
1962 done0:
1963 if (mheader != NULL)
1964 m_freem(mheader);
1965 return (error);