socket: Allow root to pass large buffer to getsockopt.
[dragonfly.git] / sys / kern / uipc_syscalls.c
blob07452b2f07898ea5ac7afedfcadd0553fa5d7156
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
5 * sendfile(2) and related extensions:
6 * Copyright (c) 1998, David Greenman. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94
33 * $FreeBSD: src/sys/kern/uipc_syscalls.c,v 1.65.2.17 2003/04/04 17:11:16 tegge Exp $
36 #include "opt_ktrace.h"
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/sysproto.h>
42 #include <sys/malloc.h>
43 #include <sys/filedesc.h>
44 #include <sys/event.h>
45 #include <sys/proc.h>
46 #include <sys/fcntl.h>
47 #include <sys/file.h>
48 #include <sys/filio.h>
49 #include <sys/kern_syscall.h>
50 #include <sys/mbuf.h>
51 #include <sys/protosw.h>
52 #include <sys/sfbuf.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/socketops.h>
56 #include <sys/uio.h>
57 #include <sys/vnode.h>
58 #include <sys/lock.h>
59 #include <sys/mount.h>
60 #ifdef KTRACE
61 #include <sys/ktrace.h>
62 #endif
63 #include <vm/vm.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_pageout.h>
67 #include <vm/vm_kern.h>
68 #include <vm/vm_extern.h>
69 #include <sys/file2.h>
70 #include <sys/signalvar.h>
71 #include <sys/serialize.h>
73 #include <sys/thread2.h>
74 #include <sys/msgport2.h>
75 #include <sys/socketvar2.h>
76 #include <net/netmsg2.h>
77 #include <vm/vm_page2.h>
79 extern int use_soaccept_pred_fast;
80 extern int use_sendfile_async;
81 extern int use_soconnect_async;
84 * System call interface to the socket abstraction.
87 extern struct fileops socketops;
90 * socket_args(int domain, int type, int protocol)
92 int
93 kern_socket(int domain, int type, int protocol, int *res)
95 struct thread *td = curthread;
96 struct filedesc *fdp = td->td_proc->p_fd;
97 struct socket *so;
98 struct file *fp;
99 int fd, error;
100 u_int fflags = 0;
101 int oflags = 0;
103 KKASSERT(td->td_lwp);
105 if (type & SOCK_NONBLOCK) {
106 type &= ~SOCK_NONBLOCK;
107 fflags |= FNONBLOCK;
109 if (type & SOCK_CLOEXEC) {
110 type &= ~SOCK_CLOEXEC;
111 oflags |= O_CLOEXEC;
114 error = falloc(td->td_lwp, &fp, &fd);
115 if (error)
116 return (error);
117 error = socreate(domain, &so, type, protocol, td);
118 if (error) {
119 fsetfd(fdp, NULL, fd);
120 } else {
121 fp->f_type = DTYPE_SOCKET;
122 fp->f_flag = FREAD | FWRITE | fflags;
123 fp->f_ops = &socketops;
124 fp->f_data = so;
125 if (oflags & O_CLOEXEC)
126 fdp->fd_files[fd].fileflags |= UF_EXCLOSE;
127 *res = fd;
128 fsetfd(fdp, fp, fd);
130 fdrop(fp);
131 return (error);
135 * MPALMOSTSAFE
138 sys_socket(struct socket_args *uap)
140 int error;
142 error = kern_socket(uap->domain, uap->type, uap->protocol,
143 &uap->sysmsg_iresult);
145 return (error);
149 kern_bind(int s, struct sockaddr *sa)
151 struct thread *td = curthread;
152 struct proc *p = td->td_proc;
153 struct file *fp;
154 int error;
156 KKASSERT(p);
157 error = holdsock(p->p_fd, s, &fp);
158 if (error)
159 return (error);
160 error = sobind((struct socket *)fp->f_data, sa, td);
161 fdrop(fp);
162 return (error);
166 * bind_args(int s, caddr_t name, int namelen)
168 * MPALMOSTSAFE
171 sys_bind(struct bind_args *uap)
173 struct sockaddr *sa;
174 int error;
176 error = getsockaddr(&sa, uap->name, uap->namelen);
177 if (error)
178 return (error);
179 error = kern_bind(uap->s, sa);
180 kfree(sa, M_SONAME);
182 return (error);
186 kern_listen(int s, int backlog)
188 struct thread *td = curthread;
189 struct proc *p = td->td_proc;
190 struct file *fp;
191 int error;
193 KKASSERT(p);
194 error = holdsock(p->p_fd, s, &fp);
195 if (error)
196 return (error);
197 error = solisten((struct socket *)fp->f_data, backlog, td);
198 fdrop(fp);
199 return(error);
203 * listen_args(int s, int backlog)
205 * MPALMOSTSAFE
208 sys_listen(struct listen_args *uap)
210 int error;
212 error = kern_listen(uap->s, uap->backlog);
213 return (error);
217 * Returns the accepted socket as well.
219 * NOTE! The sockets sitting on so_comp/so_incomp might have 0 refs, the
220 * pool token is absolutely required to avoid a sofree() race,
221 * as well as to avoid tailq handling races.
223 static boolean_t
224 soaccept_predicate(struct netmsg_so_notify *msg)
226 struct socket *head = msg->base.nm_so;
227 struct socket *so;
229 if (head->so_error != 0) {
230 msg->base.lmsg.ms_error = head->so_error;
231 return (TRUE);
233 lwkt_getpooltoken(head);
234 if (!TAILQ_EMPTY(&head->so_comp)) {
235 /* Abuse nm_so field as copy in/copy out parameter. XXX JH */
236 so = TAILQ_FIRST(&head->so_comp);
237 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == SS_COMP);
238 TAILQ_REMOVE(&head->so_comp, so, so_list);
239 head->so_qlen--;
240 soclrstate(so, SS_COMP);
243 * Keep a reference before clearing the so_head
244 * to avoid racing socket close in netisr.
246 soreference(so);
247 so->so_head = NULL;
249 lwkt_relpooltoken(head);
251 msg->base.lmsg.ms_error = 0;
252 msg->base.nm_so = so;
253 return (TRUE);
255 lwkt_relpooltoken(head);
256 if (head->so_state & SS_CANTRCVMORE) {
257 msg->base.lmsg.ms_error = ECONNABORTED;
258 return (TRUE);
260 if (msg->nm_fflags & FNONBLOCK) {
261 msg->base.lmsg.ms_error = EWOULDBLOCK;
262 return (TRUE);
265 return (FALSE);
269 * The second argument to kern_accept() is a handle to a struct sockaddr.
270 * This allows kern_accept() to return a pointer to an allocated struct
271 * sockaddr which must be freed later with FREE(). The caller must
272 * initialize *name to NULL.
275 kern_accept(int s, int fflags, struct sockaddr **name, int *namelen, int *res,
276 int sockflags)
278 struct thread *td = curthread;
279 struct filedesc *fdp = td->td_proc->p_fd;
280 struct file *lfp = NULL;
281 struct file *nfp = NULL;
282 struct sockaddr *sa;
283 struct socket *head, *so;
284 struct netmsg_so_notify msg;
285 int fd;
286 u_int fflag; /* type must match fp->f_flag */
287 int error, tmp;
289 *res = -1;
290 if (name && namelen && *namelen < 0)
291 return (EINVAL);
293 error = holdsock(td->td_proc->p_fd, s, &lfp);
294 if (error)
295 return (error);
297 error = falloc(td->td_lwp, &nfp, &fd);
298 if (error) { /* Probably ran out of file descriptors. */
299 fdrop(lfp);
300 return (error);
302 head = (struct socket *)lfp->f_data;
303 if ((head->so_options & SO_ACCEPTCONN) == 0) {
304 error = EINVAL;
305 goto done;
308 if (fflags & O_FBLOCKING)
309 fflags |= lfp->f_flag & ~FNONBLOCK;
310 else if (fflags & O_FNONBLOCKING)
311 fflags |= lfp->f_flag | FNONBLOCK;
312 else
313 fflags = lfp->f_flag;
315 if (use_soaccept_pred_fast) {
316 boolean_t pred;
318 /* Initialize necessary parts for soaccept_predicate() */
319 netmsg_init(&msg.base, head, &netisr_apanic_rport, 0, NULL);
320 msg.nm_fflags = fflags;
322 lwkt_getpooltoken(head);
323 pred = soaccept_predicate(&msg);
324 lwkt_relpooltoken(head);
326 if (pred) {
327 error = msg.base.lmsg.ms_error;
328 if (error)
329 goto done;
330 else
331 goto accepted;
335 /* optimize for uniprocessor case later XXX JH */
336 netmsg_init_abortable(&msg.base, head, &curthread->td_msgport,
337 0, netmsg_so_notify, netmsg_so_notify_doabort);
338 msg.nm_predicate = soaccept_predicate;
339 msg.nm_fflags = fflags;
340 msg.nm_etype = NM_REVENT;
341 error = lwkt_domsg(head->so_port, &msg.base.lmsg, PCATCH);
342 if (error)
343 goto done;
345 accepted:
347 * At this point we have the connection that's ready to be accepted.
349 * NOTE! soaccept_predicate() ref'd so for us, and soaccept() expects
350 * to eat the ref and turn it into a descriptor.
352 so = msg.base.nm_so;
354 fflag = lfp->f_flag;
356 /* connection has been removed from the listen queue */
357 KNOTE(&head->so_rcv.ssb_kq.ki_note, 0);
359 if (sockflags & SOCK_KERN_NOINHERIT) {
360 fflag &= ~(FASYNC | FNONBLOCK);
361 if (sockflags & SOCK_NONBLOCK)
362 fflag |= FNONBLOCK;
363 } else {
364 if (head->so_sigio != NULL)
365 fsetown(fgetown(&head->so_sigio), &so->so_sigio);
368 nfp->f_type = DTYPE_SOCKET;
369 nfp->f_flag = fflag;
370 nfp->f_ops = &socketops;
371 nfp->f_data = so;
372 /* Sync socket async state with file flags */
373 tmp = fflag & FASYNC;
374 fo_ioctl(nfp, FIOASYNC, (caddr_t)&tmp, td->td_ucred, NULL);
376 sa = NULL;
377 if (so->so_faddr != NULL) {
378 sa = so->so_faddr;
379 so->so_faddr = NULL;
381 soaccept_generic(so);
382 error = 0;
383 } else {
384 error = soaccept(so, &sa);
388 * Set the returned name and namelen as applicable. Set the returned
389 * namelen to 0 for older code which might ignore the return value
390 * from accept.
392 if (error == 0) {
393 if (sa && name && namelen) {
394 if (*namelen > sa->sa_len)
395 *namelen = sa->sa_len;
396 *name = sa;
397 } else {
398 if (sa)
399 kfree(sa, M_SONAME);
403 done:
405 * If an error occured clear the reserved descriptor, else associate
406 * nfp with it.
408 * Note that *res is normally ignored if an error is returned but
409 * a syscall message will still have access to the result code.
411 if (error) {
412 fsetfd(fdp, NULL, fd);
413 } else {
414 if (sockflags & SOCK_CLOEXEC)
415 fdp->fd_files[fd].fileflags |= UF_EXCLOSE;
416 *res = fd;
417 fsetfd(fdp, nfp, fd);
419 fdrop(nfp);
420 fdrop(lfp);
421 return (error);
425 * accept(int s, caddr_t name, int *anamelen)
427 * MPALMOSTSAFE
430 sys_accept(struct accept_args *uap)
432 struct sockaddr *sa = NULL;
433 int sa_len;
434 int error;
436 if (uap->name) {
437 error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
438 if (error)
439 return (error);
441 error = kern_accept(uap->s, 0, &sa, &sa_len,
442 &uap->sysmsg_iresult, 0);
444 if (error == 0)
445 error = copyout(sa, uap->name, sa_len);
446 if (error == 0) {
447 error = copyout(&sa_len, uap->anamelen,
448 sizeof(*uap->anamelen));
450 if (sa)
451 kfree(sa, M_SONAME);
452 } else {
453 error = kern_accept(uap->s, 0, NULL, 0,
454 &uap->sysmsg_iresult, 0);
456 return (error);
460 * extaccept(int s, int fflags, caddr_t name, int *anamelen)
462 * MPALMOSTSAFE
465 sys_extaccept(struct extaccept_args *uap)
467 struct sockaddr *sa = NULL;
468 int sa_len;
469 int error;
470 int fflags = uap->flags & O_FMASK;
472 if (uap->name) {
473 error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
474 if (error)
475 return (error);
477 error = kern_accept(uap->s, fflags, &sa, &sa_len,
478 &uap->sysmsg_iresult, 0);
480 if (error == 0)
481 error = copyout(sa, uap->name, sa_len);
482 if (error == 0) {
483 error = copyout(&sa_len, uap->anamelen,
484 sizeof(*uap->anamelen));
486 if (sa)
487 kfree(sa, M_SONAME);
488 } else {
489 error = kern_accept(uap->s, fflags, NULL, 0,
490 &uap->sysmsg_iresult, 0);
492 return (error);
496 * accept4(int s, caddr_t name, int *anamelen, int flags)
498 * MPALMOSTSAFE
501 sys_accept4(struct accept4_args *uap)
503 struct sockaddr *sa = NULL;
504 int sa_len;
505 int error;
506 int sockflags;
508 if (uap->flags & ~(SOCK_NONBLOCK | SOCK_CLOEXEC))
509 return (EINVAL);
510 sockflags = uap->flags | SOCK_KERN_NOINHERIT;
512 if (uap->name) {
513 error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
514 if (error)
515 return (error);
517 error = kern_accept(uap->s, 0, &sa, &sa_len,
518 &uap->sysmsg_iresult, sockflags);
520 if (error == 0)
521 error = copyout(sa, uap->name, sa_len);
522 if (error == 0) {
523 error = copyout(&sa_len, uap->anamelen,
524 sizeof(*uap->anamelen));
526 if (sa)
527 kfree(sa, M_SONAME);
528 } else {
529 error = kern_accept(uap->s, 0, NULL, 0,
530 &uap->sysmsg_iresult, sockflags);
532 return (error);
536 * Returns TRUE if predicate satisfied.
538 static boolean_t
539 soconnected_predicate(struct netmsg_so_notify *msg)
541 struct socket *so = msg->base.nm_so;
543 /* check predicate */
544 if (!(so->so_state & SS_ISCONNECTING) || so->so_error != 0) {
545 msg->base.lmsg.ms_error = so->so_error;
546 return (TRUE);
549 return (FALSE);
553 kern_connect(int s, int fflags, struct sockaddr *sa)
555 struct thread *td = curthread;
556 struct proc *p = td->td_proc;
557 struct file *fp;
558 struct socket *so;
559 int error, interrupted = 0;
561 error = holdsock(p->p_fd, s, &fp);
562 if (error)
563 return (error);
564 so = (struct socket *)fp->f_data;
566 if (fflags & O_FBLOCKING)
567 /* fflags &= ~FNONBLOCK; */;
568 else if (fflags & O_FNONBLOCKING)
569 fflags |= FNONBLOCK;
570 else
571 fflags = fp->f_flag;
573 if (so->so_state & SS_ISCONNECTING) {
574 error = EALREADY;
575 goto done;
577 error = soconnect(so, sa, td, use_soconnect_async ? FALSE : TRUE);
578 if (error)
579 goto bad;
580 if ((fflags & FNONBLOCK) && (so->so_state & SS_ISCONNECTING)) {
581 error = EINPROGRESS;
582 goto done;
584 if ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
585 struct netmsg_so_notify msg;
587 netmsg_init_abortable(&msg.base, so,
588 &curthread->td_msgport,
590 netmsg_so_notify,
591 netmsg_so_notify_doabort);
592 msg.nm_predicate = soconnected_predicate;
593 msg.nm_etype = NM_REVENT;
594 error = lwkt_domsg(so->so_port, &msg.base.lmsg, PCATCH);
595 if (error == EINTR || error == ERESTART)
596 interrupted = 1;
598 if (error == 0) {
599 error = so->so_error;
600 so->so_error = 0;
602 bad:
603 if (!interrupted)
604 soclrstate(so, SS_ISCONNECTING);
605 if (error == ERESTART)
606 error = EINTR;
607 done:
608 fdrop(fp);
609 return (error);
613 * connect_args(int s, caddr_t name, int namelen)
615 * MPALMOSTSAFE
618 sys_connect(struct connect_args *uap)
620 struct sockaddr *sa;
621 int error;
623 error = getsockaddr(&sa, uap->name, uap->namelen);
624 if (error)
625 return (error);
626 error = kern_connect(uap->s, 0, sa);
627 kfree(sa, M_SONAME);
629 return (error);
633 * connect_args(int s, int fflags, caddr_t name, int namelen)
635 * MPALMOSTSAFE
638 sys_extconnect(struct extconnect_args *uap)
640 struct sockaddr *sa;
641 int error;
642 int fflags = uap->flags & O_FMASK;
644 error = getsockaddr(&sa, uap->name, uap->namelen);
645 if (error)
646 return (error);
647 error = kern_connect(uap->s, fflags, sa);
648 kfree(sa, M_SONAME);
650 return (error);
654 kern_socketpair(int domain, int type, int protocol, int *sv)
656 struct thread *td = curthread;
657 struct filedesc *fdp;
658 struct file *fp1, *fp2;
659 struct socket *so1, *so2;
660 int fd1, fd2, error;
661 u_int fflags = 0;
662 int oflags = 0;
664 if (type & SOCK_NONBLOCK) {
665 type &= ~SOCK_NONBLOCK;
666 fflags |= FNONBLOCK;
668 if (type & SOCK_CLOEXEC) {
669 type &= ~SOCK_CLOEXEC;
670 oflags |= O_CLOEXEC;
673 fdp = td->td_proc->p_fd;
674 error = socreate(domain, &so1, type, protocol, td);
675 if (error)
676 return (error);
677 error = socreate(domain, &so2, type, protocol, td);
678 if (error)
679 goto free1;
680 error = falloc(td->td_lwp, &fp1, &fd1);
681 if (error)
682 goto free2;
683 sv[0] = fd1;
684 fp1->f_data = so1;
685 error = falloc(td->td_lwp, &fp2, &fd2);
686 if (error)
687 goto free3;
688 fp2->f_data = so2;
689 sv[1] = fd2;
690 error = soconnect2(so1, so2);
691 if (error)
692 goto free4;
693 if (type == SOCK_DGRAM) {
695 * Datagram socket connection is asymmetric.
697 error = soconnect2(so2, so1);
698 if (error)
699 goto free4;
701 fp1->f_type = fp2->f_type = DTYPE_SOCKET;
702 fp1->f_flag = fp2->f_flag = FREAD|FWRITE|fflags;
703 fp1->f_ops = fp2->f_ops = &socketops;
704 if (oflags & O_CLOEXEC) {
705 fdp->fd_files[fd1].fileflags |= UF_EXCLOSE;
706 fdp->fd_files[fd2].fileflags |= UF_EXCLOSE;
708 fsetfd(fdp, fp1, fd1);
709 fsetfd(fdp, fp2, fd2);
710 fdrop(fp1);
711 fdrop(fp2);
712 return (error);
713 free4:
714 fsetfd(fdp, NULL, fd2);
715 fdrop(fp2);
716 free3:
717 fsetfd(fdp, NULL, fd1);
718 fdrop(fp1);
719 free2:
720 (void)soclose(so2, 0);
721 free1:
722 (void)soclose(so1, 0);
723 return (error);
727 * socketpair(int domain, int type, int protocol, int *rsv)
730 sys_socketpair(struct socketpair_args *uap)
732 int error, sockv[2];
734 error = kern_socketpair(uap->domain, uap->type, uap->protocol, sockv);
736 if (error == 0) {
737 error = copyout(sockv, uap->rsv, sizeof(sockv));
739 if (error != 0) {
740 kern_close(sockv[0]);
741 kern_close(sockv[1]);
745 return (error);
749 kern_sendmsg(int s, struct sockaddr *sa, struct uio *auio,
750 struct mbuf *control, int flags, size_t *res)
752 struct thread *td = curthread;
753 struct lwp *lp = td->td_lwp;
754 struct proc *p = td->td_proc;
755 struct file *fp;
756 size_t len;
757 int error;
758 struct socket *so;
759 #ifdef KTRACE
760 struct iovec *ktriov = NULL;
761 struct uio ktruio;
762 #endif
764 error = holdsock(p->p_fd, s, &fp);
765 if (error)
766 return (error);
767 #ifdef KTRACE
768 if (KTRPOINT(td, KTR_GENIO)) {
769 int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
771 ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK);
772 bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen);
773 ktruio = *auio;
775 #endif
776 len = auio->uio_resid;
777 so = (struct socket *)fp->f_data;
778 if ((flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) {
779 if (fp->f_flag & FNONBLOCK)
780 flags |= MSG_FNONBLOCKING;
782 error = so_pru_sosend(so, sa, auio, NULL, control, flags, td);
783 if (error) {
784 if (auio->uio_resid != len && (error == ERESTART ||
785 error == EINTR || error == EWOULDBLOCK))
786 error = 0;
787 if (error == EPIPE && !(flags & MSG_NOSIGNAL) &&
788 !(so->so_options & SO_NOSIGPIPE))
789 lwpsignal(p, lp, SIGPIPE);
791 #ifdef KTRACE
792 if (ktriov != NULL) {
793 if (error == 0) {
794 ktruio.uio_iov = ktriov;
795 ktruio.uio_resid = len - auio->uio_resid;
796 ktrgenio(lp, s, UIO_WRITE, &ktruio, error);
798 kfree(ktriov, M_TEMP);
800 #endif
801 if (error == 0)
802 *res = len - auio->uio_resid;
803 fdrop(fp);
804 return (error);
808 * sendto_args(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen)
810 * MPALMOSTSAFE
813 sys_sendto(struct sendto_args *uap)
815 struct thread *td = curthread;
816 struct uio auio;
817 struct iovec aiov;
818 struct sockaddr *sa = NULL;
819 int error;
821 if (uap->to) {
822 error = getsockaddr(&sa, uap->to, uap->tolen);
823 if (error)
824 return (error);
826 aiov.iov_base = uap->buf;
827 aiov.iov_len = uap->len;
828 auio.uio_iov = &aiov;
829 auio.uio_iovcnt = 1;
830 auio.uio_offset = 0;
831 auio.uio_resid = uap->len;
832 auio.uio_segflg = UIO_USERSPACE;
833 auio.uio_rw = UIO_WRITE;
834 auio.uio_td = td;
836 error = kern_sendmsg(uap->s, sa, &auio, NULL, uap->flags,
837 &uap->sysmsg_szresult);
839 if (sa)
840 kfree(sa, M_SONAME);
841 return (error);
845 * sendmsg_args(int s, caddr_t msg, int flags)
847 * MPALMOSTSAFE
850 sys_sendmsg(struct sendmsg_args *uap)
852 struct thread *td = curthread;
853 struct msghdr msg;
854 struct uio auio;
855 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
856 struct sockaddr *sa = NULL;
857 struct mbuf *control = NULL;
858 int error;
860 error = copyin(uap->msg, (caddr_t)&msg, sizeof(msg));
861 if (error)
862 return (error);
865 * Conditionally copyin msg.msg_name.
867 if (msg.msg_name) {
868 error = getsockaddr(&sa, msg.msg_name, msg.msg_namelen);
869 if (error)
870 return (error);
874 * Populate auio.
876 error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
877 &auio.uio_resid);
878 if (error)
879 goto cleanup2;
880 auio.uio_iov = iov;
881 auio.uio_iovcnt = msg.msg_iovlen;
882 auio.uio_offset = 0;
883 auio.uio_segflg = UIO_USERSPACE;
884 auio.uio_rw = UIO_WRITE;
885 auio.uio_td = td;
888 * Conditionally copyin msg.msg_control.
890 if (msg.msg_control) {
891 if (msg.msg_controllen < sizeof(struct cmsghdr) ||
892 msg.msg_controllen > MLEN) {
893 error = EINVAL;
894 goto cleanup;
896 control = m_get(M_WAITOK, MT_CONTROL);
897 if (control == NULL) {
898 error = ENOBUFS;
899 goto cleanup;
901 control->m_len = msg.msg_controllen;
902 error = copyin(msg.msg_control, mtod(control, caddr_t),
903 msg.msg_controllen);
904 if (error) {
905 m_free(control);
906 goto cleanup;
910 error = kern_sendmsg(uap->s, sa, &auio, control, uap->flags,
911 &uap->sysmsg_szresult);
913 cleanup:
914 iovec_free(&iov, aiov);
915 cleanup2:
916 if (sa)
917 kfree(sa, M_SONAME);
918 return (error);
922 * kern_recvmsg() takes a handle to sa and control. If the handle is non-
923 * null, it returns a dynamically allocated struct sockaddr and an mbuf.
924 * Don't forget to FREE() and m_free() these if they are returned.
927 kern_recvmsg(int s, struct sockaddr **sa, struct uio *auio,
928 struct mbuf **control, int *flags, size_t *res)
930 struct thread *td = curthread;
931 struct proc *p = td->td_proc;
932 struct file *fp;
933 size_t len;
934 int error;
935 int lflags;
936 struct socket *so;
937 #ifdef KTRACE
938 struct iovec *ktriov = NULL;
939 struct uio ktruio;
940 #endif
942 error = holdsock(p->p_fd, s, &fp);
943 if (error)
944 return (error);
945 #ifdef KTRACE
946 if (KTRPOINT(td, KTR_GENIO)) {
947 int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
949 ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK);
950 bcopy(auio->uio_iov, ktriov, iovlen);
951 ktruio = *auio;
953 #endif
954 len = auio->uio_resid;
955 so = (struct socket *)fp->f_data;
957 if (flags == NULL || (*flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) {
958 if (fp->f_flag & FNONBLOCK) {
959 if (flags) {
960 *flags |= MSG_FNONBLOCKING;
961 } else {
962 lflags = MSG_FNONBLOCKING;
963 flags = &lflags;
968 error = so_pru_soreceive(so, sa, auio, NULL, control, flags);
969 if (error) {
970 if (auio->uio_resid != len && (error == ERESTART ||
971 error == EINTR || error == EWOULDBLOCK))
972 error = 0;
974 #ifdef KTRACE
975 if (ktriov != NULL) {
976 if (error == 0) {
977 ktruio.uio_iov = ktriov;
978 ktruio.uio_resid = len - auio->uio_resid;
979 ktrgenio(td->td_lwp, s, UIO_READ, &ktruio, error);
981 kfree(ktriov, M_TEMP);
983 #endif
984 if (error == 0)
985 *res = len - auio->uio_resid;
986 fdrop(fp);
987 return (error);
991 * recvfrom_args(int s, caddr_t buf, size_t len, int flags,
992 * caddr_t from, int *fromlenaddr)
994 * MPALMOSTSAFE
997 sys_recvfrom(struct recvfrom_args *uap)
999 struct thread *td = curthread;
1000 struct uio auio;
1001 struct iovec aiov;
1002 struct sockaddr *sa = NULL;
1003 int error, fromlen;
1005 if (uap->from && uap->fromlenaddr) {
1006 error = copyin(uap->fromlenaddr, &fromlen, sizeof(fromlen));
1007 if (error)
1008 return (error);
1009 if (fromlen < 0)
1010 return (EINVAL);
1011 } else {
1012 fromlen = 0;
1014 aiov.iov_base = uap->buf;
1015 aiov.iov_len = uap->len;
1016 auio.uio_iov = &aiov;
1017 auio.uio_iovcnt = 1;
1018 auio.uio_offset = 0;
1019 auio.uio_resid = uap->len;
1020 auio.uio_segflg = UIO_USERSPACE;
1021 auio.uio_rw = UIO_READ;
1022 auio.uio_td = td;
1024 error = kern_recvmsg(uap->s, uap->from ? &sa : NULL, &auio, NULL,
1025 &uap->flags, &uap->sysmsg_szresult);
1027 if (error == 0 && uap->from) {
1028 /* note: sa may still be NULL */
1029 if (sa) {
1030 fromlen = MIN(fromlen, sa->sa_len);
1031 error = copyout(sa, uap->from, fromlen);
1032 } else {
1033 fromlen = 0;
1035 if (error == 0) {
1036 error = copyout(&fromlen, uap->fromlenaddr,
1037 sizeof(fromlen));
1040 if (sa)
1041 kfree(sa, M_SONAME);
1043 return (error);
1047 * recvmsg_args(int s, struct msghdr *msg, int flags)
1049 * MPALMOSTSAFE
1052 sys_recvmsg(struct recvmsg_args *uap)
1054 struct thread *td = curthread;
1055 struct msghdr msg;
1056 struct uio auio;
1057 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
1058 struct mbuf *m, *control = NULL;
1059 struct sockaddr *sa = NULL;
1060 caddr_t ctlbuf;
1061 socklen_t *ufromlenp, *ucontrollenp;
1062 int error, fromlen, controllen, len, flags, *uflagsp;
1065 * This copyin handles everything except the iovec.
1067 error = copyin(uap->msg, &msg, sizeof(msg));
1068 if (error)
1069 return (error);
1071 if (msg.msg_name && msg.msg_namelen < 0)
1072 return (EINVAL);
1073 if (msg.msg_control && msg.msg_controllen < 0)
1074 return (EINVAL);
1076 ufromlenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
1077 msg_namelen));
1078 ucontrollenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
1079 msg_controllen));
1080 uflagsp = (int *)((caddr_t)uap->msg + offsetof(struct msghdr,
1081 msg_flags));
1084 * Populate auio.
1086 error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
1087 &auio.uio_resid);
1088 if (error)
1089 return (error);
1090 auio.uio_iov = iov;
1091 auio.uio_iovcnt = msg.msg_iovlen;
1092 auio.uio_offset = 0;
1093 auio.uio_segflg = UIO_USERSPACE;
1094 auio.uio_rw = UIO_READ;
1095 auio.uio_td = td;
1097 flags = uap->flags;
1099 error = kern_recvmsg(uap->s,
1100 (msg.msg_name ? &sa : NULL), &auio,
1101 (msg.msg_control ? &control : NULL), &flags,
1102 &uap->sysmsg_szresult);
1105 * Conditionally copyout the name and populate the namelen field.
1107 if (error == 0 && msg.msg_name) {
1108 /* note: sa may still be NULL */
1109 if (sa != NULL) {
1110 fromlen = MIN(msg.msg_namelen, sa->sa_len);
1111 error = copyout(sa, msg.msg_name, fromlen);
1112 } else {
1113 fromlen = 0;
1115 if (error == 0)
1116 error = copyout(&fromlen, ufromlenp,
1117 sizeof(*ufromlenp));
1121 * Copyout msg.msg_control and msg.msg_controllen.
1123 if (error == 0 && msg.msg_control) {
1124 len = msg.msg_controllen;
1125 m = control;
1126 ctlbuf = (caddr_t)msg.msg_control;
1128 while(m && len > 0) {
1129 unsigned int tocopy;
1131 if (len >= m->m_len) {
1132 tocopy = m->m_len;
1133 } else {
1134 msg.msg_flags |= MSG_CTRUNC;
1135 tocopy = len;
1138 error = copyout(mtod(m, caddr_t), ctlbuf, tocopy);
1139 if (error)
1140 goto cleanup;
1142 ctlbuf += tocopy;
1143 len -= tocopy;
1144 m = m->m_next;
1146 controllen = ctlbuf - (caddr_t)msg.msg_control;
1147 error = copyout(&controllen, ucontrollenp,
1148 sizeof(*ucontrollenp));
1151 if (error == 0)
1152 error = copyout(&flags, uflagsp, sizeof(*uflagsp));
1154 cleanup:
1155 if (sa)
1156 kfree(sa, M_SONAME);
1157 iovec_free(&iov, aiov);
1158 if (control)
1159 m_freem(control);
1160 return (error);
1164 * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1165 * in kernel pointer instead of a userland pointer. This allows us
1166 * to manipulate socket options in the emulation code.
1169 kern_setsockopt(int s, struct sockopt *sopt)
1171 struct thread *td = curthread;
1172 struct proc *p = td->td_proc;
1173 struct file *fp;
1174 int error;
1176 if (sopt->sopt_val == NULL && sopt->sopt_valsize != 0)
1177 return (EFAULT);
1178 if (sopt->sopt_val != NULL && sopt->sopt_valsize == 0)
1179 return (EINVAL);
1180 if (sopt->sopt_valsize > SOMAXOPT_SIZE) /* unsigned */
1181 return (EINVAL);
1183 error = holdsock(p->p_fd, s, &fp);
1184 if (error)
1185 return (error);
1187 error = sosetopt((struct socket *)fp->f_data, sopt);
1188 fdrop(fp);
1189 return (error);
1193 * setsockopt_args(int s, int level, int name, caddr_t val, int valsize)
1195 * MPALMOSTSAFE
1198 sys_setsockopt(struct setsockopt_args *uap)
1200 struct thread *td = curthread;
1201 struct sockopt sopt;
1202 int error;
1204 sopt.sopt_level = uap->level;
1205 sopt.sopt_name = uap->name;
1206 sopt.sopt_valsize = uap->valsize;
1207 sopt.sopt_td = td;
1208 sopt.sopt_val = NULL;
1210 if (sopt.sopt_valsize > SOMAXOPT_SIZE) /* unsigned */
1211 return (EINVAL);
1212 if (uap->val) {
1213 sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP, M_WAITOK);
1214 error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize);
1215 if (error)
1216 goto out;
1219 error = kern_setsockopt(uap->s, &sopt);
1220 out:
1221 if (uap->val)
1222 kfree(sopt.sopt_val, M_TEMP);
1223 return(error);
1227 * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1228 * in kernel pointer instead of a userland pointer. This allows us
1229 * to manipulate socket options in the emulation code.
1232 kern_getsockopt(int s, struct sockopt *sopt)
1234 struct thread *td = curthread;
1235 struct proc *p = td->td_proc;
1236 struct file *fp;
1237 int error;
1239 if (sopt->sopt_val == NULL && sopt->sopt_valsize != 0)
1240 return (EFAULT);
1241 if (sopt->sopt_val != NULL && sopt->sopt_valsize == 0)
1242 return (EINVAL);
1244 error = holdsock(p->p_fd, s, &fp);
1245 if (error)
1246 return (error);
1248 error = sogetopt((struct socket *)fp->f_data, sopt);
1249 fdrop(fp);
1250 return (error);
1254 * getsockopt_args(int s, int level, int name, caddr_t val, int *avalsize)
1256 * MPALMOSTSAFE
1259 sys_getsockopt(struct getsockopt_args *uap)
1261 struct thread *td = curthread;
1262 struct sockopt sopt;
1263 int error, valsize, valszmax, mflag = 0;
1265 if (uap->val) {
1266 error = copyin(uap->avalsize, &valsize, sizeof(valsize));
1267 if (error)
1268 return (error);
1269 } else {
1270 valsize = 0;
1273 sopt.sopt_level = uap->level;
1274 sopt.sopt_name = uap->name;
1275 sopt.sopt_valsize = valsize;
1276 sopt.sopt_td = td;
1277 sopt.sopt_val = NULL;
1279 if (td->td_proc->p_ucred->cr_uid == 0) {
1280 valszmax = SOMAXOPT_SIZE0;
1281 mflag = M_NULLOK;
1282 } else {
1283 valszmax = SOMAXOPT_SIZE;
1285 if (sopt.sopt_valsize > valszmax) /* unsigned */
1286 return (EINVAL);
1287 if (uap->val) {
1288 sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP,
1289 M_WAITOK | mflag);
1290 if (sopt.sopt_val == NULL)
1291 return (ENOBUFS);
1292 error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize);
1293 if (error)
1294 goto out;
1297 error = kern_getsockopt(uap->s, &sopt);
1298 if (error)
1299 goto out;
1300 valsize = sopt.sopt_valsize;
1301 error = copyout(&valsize, uap->avalsize, sizeof(valsize));
1302 if (error)
1303 goto out;
1304 if (uap->val)
1305 error = copyout(sopt.sopt_val, uap->val, sopt.sopt_valsize);
1306 out:
1307 if (uap->val)
1308 kfree(sopt.sopt_val, M_TEMP);
1309 return (error);
1313 * The second argument to kern_getsockname() is a handle to a struct sockaddr.
1314 * This allows kern_getsockname() to return a pointer to an allocated struct
1315 * sockaddr which must be freed later with FREE(). The caller must
1316 * initialize *name to NULL.
1319 kern_getsockname(int s, struct sockaddr **name, int *namelen)
1321 struct thread *td = curthread;
1322 struct proc *p = td->td_proc;
1323 struct file *fp;
1324 struct socket *so;
1325 struct sockaddr *sa = NULL;
1326 int error;
1328 error = holdsock(p->p_fd, s, &fp);
1329 if (error)
1330 return (error);
1331 if (*namelen < 0) {
1332 fdrop(fp);
1333 return (EINVAL);
1335 so = (struct socket *)fp->f_data;
1336 error = so_pru_sockaddr(so, &sa);
1337 if (error == 0) {
1338 if (sa == NULL) {
1339 *namelen = 0;
1340 } else {
1341 *namelen = MIN(*namelen, sa->sa_len);
1342 *name = sa;
1346 fdrop(fp);
1347 return (error);
1351 * getsockname_args(int fdes, caddr_t asa, int *alen)
1353 * Get socket name.
1355 * MPALMOSTSAFE
1358 sys_getsockname(struct getsockname_args *uap)
1360 struct sockaddr *sa = NULL;
1361 int error, sa_len;
1363 error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1364 if (error)
1365 return (error);
1367 error = kern_getsockname(uap->fdes, &sa, &sa_len);
1369 if (error == 0)
1370 error = copyout(sa, uap->asa, sa_len);
1371 if (error == 0)
1372 error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1373 if (sa)
1374 kfree(sa, M_SONAME);
1375 return (error);
1379 * The second argument to kern_getpeername() is a handle to a struct sockaddr.
1380 * This allows kern_getpeername() to return a pointer to an allocated struct
1381 * sockaddr which must be freed later with FREE(). The caller must
1382 * initialize *name to NULL.
1385 kern_getpeername(int s, struct sockaddr **name, int *namelen)
1387 struct thread *td = curthread;
1388 struct proc *p = td->td_proc;
1389 struct file *fp;
1390 struct socket *so;
1391 struct sockaddr *sa = NULL;
1392 int error;
1394 error = holdsock(p->p_fd, s, &fp);
1395 if (error)
1396 return (error);
1397 if (*namelen < 0) {
1398 fdrop(fp);
1399 return (EINVAL);
1401 so = (struct socket *)fp->f_data;
1402 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1403 fdrop(fp);
1404 return (ENOTCONN);
1406 error = so_pru_peeraddr(so, &sa);
1407 if (error == 0) {
1408 if (sa == NULL) {
1409 *namelen = 0;
1410 } else {
1411 *namelen = MIN(*namelen, sa->sa_len);
1412 *name = sa;
1416 fdrop(fp);
1417 return (error);
1421 * getpeername_args(int fdes, caddr_t asa, int *alen)
1423 * Get name of peer for connected socket.
1425 * MPALMOSTSAFE
1428 sys_getpeername(struct getpeername_args *uap)
1430 struct sockaddr *sa = NULL;
1431 int error, sa_len;
1433 error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1434 if (error)
1435 return (error);
1437 error = kern_getpeername(uap->fdes, &sa, &sa_len);
1439 if (error == 0)
1440 error = copyout(sa, uap->asa, sa_len);
1441 if (error == 0)
1442 error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1443 if (sa)
1444 kfree(sa, M_SONAME);
1445 return (error);
1449 getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len)
1451 struct sockaddr *sa;
1452 int error;
1454 *namp = NULL;
1455 if (len > SOCK_MAXADDRLEN)
1456 return ENAMETOOLONG;
1457 if (len < offsetof(struct sockaddr, sa_data[0]))
1458 return EDOM;
1459 sa = kmalloc(len, M_SONAME, M_WAITOK);
1460 error = copyin(uaddr, sa, len);
1461 if (error) {
1462 kfree(sa, M_SONAME);
1463 } else {
1464 #if BYTE_ORDER != BIG_ENDIAN
1466 * The bind(), connect(), and sendto() syscalls were not
1467 * versioned for COMPAT_43. Thus, this check must stay.
1469 if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1470 sa->sa_family = sa->sa_len;
1471 #endif
1472 sa->sa_len = len;
1473 *namp = sa;
1475 return error;
1479 * Detach a mapped page and release resources back to the system.
1480 * We must release our wiring and if the object is ripped out
1481 * from under the vm_page we become responsible for freeing the
1482 * page.
1484 * MPSAFE
1486 static void
1487 sf_buf_mfree(void *arg)
1489 struct sf_buf *sf = arg;
1490 vm_page_t m;
1492 m = sf_buf_page(sf);
1493 if (sf_buf_free(sf)) {
1494 /* sf invalid now */
1496 vm_page_busy_wait(m, FALSE, "sockpgf");
1497 vm_page_wakeup(m);
1499 vm_page_unhold(m);
1500 #if 0
1501 if (m->object == NULL &&
1502 m->wire_count == 0 &&
1503 (m->flags & PG_NEED_COMMIT) == 0) {
1504 vm_page_free(m);
1505 } else {
1506 vm_page_wakeup(m);
1508 #endif
1513 * sendfile(2).
1514 * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1515 * struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1517 * Send a file specified by 'fd' and starting at 'offset' to a socket
1518 * specified by 's'. Send only 'nbytes' of the file or until EOF if
1519 * nbytes == 0. Optionally add a header and/or trailer to the socket
1520 * output. If specified, write the total number of bytes sent into *sbytes.
1522 * In FreeBSD kern/uipc_syscalls.c,v 1.103, a bug was fixed that caused
1523 * the headers to count against the remaining bytes to be sent from
1524 * the file descriptor. We may wish to implement a compatibility syscall
1525 * in the future.
1527 * MPALMOSTSAFE
1530 sys_sendfile(struct sendfile_args *uap)
1532 struct thread *td = curthread;
1533 struct proc *p = td->td_proc;
1534 struct file *fp;
1535 struct vnode *vp = NULL;
1536 struct sf_hdtr hdtr;
1537 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
1538 struct uio auio;
1539 struct mbuf *mheader = NULL;
1540 size_t hbytes = 0;
1541 size_t tbytes;
1542 off_t hdtr_size = 0;
1543 off_t sbytes;
1544 int error;
1546 KKASSERT(p);
1549 * Do argument checking. Must be a regular file in, stream
1550 * type and connected socket out, positive offset.
1552 fp = holdfp(p->p_fd, uap->fd, FREAD);
1553 if (fp == NULL) {
1554 return (EBADF);
1556 if (fp->f_type != DTYPE_VNODE) {
1557 fdrop(fp);
1558 return (EINVAL);
1560 vp = (struct vnode *)fp->f_data;
1561 vref(vp);
1562 fdrop(fp);
1565 * If specified, get the pointer to the sf_hdtr struct for
1566 * any headers/trailers.
1568 if (uap->hdtr) {
1569 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1570 if (error)
1571 goto done;
1573 * Send any headers.
1575 if (hdtr.headers) {
1576 error = iovec_copyin(hdtr.headers, &iov, aiov,
1577 hdtr.hdr_cnt, &hbytes);
1578 if (error)
1579 goto done;
1580 auio.uio_iov = iov;
1581 auio.uio_iovcnt = hdtr.hdr_cnt;
1582 auio.uio_offset = 0;
1583 auio.uio_segflg = UIO_USERSPACE;
1584 auio.uio_rw = UIO_WRITE;
1585 auio.uio_td = td;
1586 auio.uio_resid = hbytes;
1588 mheader = m_uiomove(&auio);
1590 iovec_free(&iov, aiov);
1591 if (mheader == NULL)
1592 goto done;
1596 error = kern_sendfile(vp, uap->s, uap->offset, uap->nbytes, mheader,
1597 &sbytes, uap->flags);
1598 if (error)
1599 goto done;
1602 * Send trailers. Wimp out and use writev(2).
1604 if (uap->hdtr != NULL && hdtr.trailers != NULL) {
1605 error = iovec_copyin(hdtr.trailers, &iov, aiov,
1606 hdtr.trl_cnt, &auio.uio_resid);
1607 if (error)
1608 goto done;
1609 auio.uio_iov = iov;
1610 auio.uio_iovcnt = hdtr.trl_cnt;
1611 auio.uio_offset = 0;
1612 auio.uio_segflg = UIO_USERSPACE;
1613 auio.uio_rw = UIO_WRITE;
1614 auio.uio_td = td;
1616 tbytes = 0; /* avoid gcc warnings */
1617 error = kern_sendmsg(uap->s, NULL, &auio, NULL, 0, &tbytes);
1619 iovec_free(&iov, aiov);
1620 if (error)
1621 goto done;
1622 hdtr_size += tbytes; /* trailer bytes successfully sent */
1625 done:
1626 if (vp)
1627 vrele(vp);
1628 if (uap->sbytes != NULL) {
1629 sbytes += hdtr_size;
1630 copyout(&sbytes, uap->sbytes, sizeof(off_t));
1632 return (error);
1636 kern_sendfile(struct vnode *vp, int sfd, off_t offset, size_t nbytes,
1637 struct mbuf *mheader, off_t *sbytes, int flags)
1639 struct thread *td = curthread;
1640 struct proc *p = td->td_proc;
1641 struct vm_object *obj;
1642 struct socket *so;
1643 struct file *fp;
1644 struct mbuf *m, *mp;
1645 struct sf_buf *sf;
1646 struct vm_page *pg;
1647 off_t off, xfsize, xbytes;
1648 off_t hbytes = 0;
1649 int error = 0;
1651 if (vp->v_type != VREG) {
1652 error = EINVAL;
1653 goto done0;
1655 if ((obj = vp->v_object) == NULL) {
1656 error = EINVAL;
1657 goto done0;
1659 error = holdsock(p->p_fd, sfd, &fp);
1660 if (error)
1661 goto done0;
1662 so = (struct socket *)fp->f_data;
1663 if (so->so_type != SOCK_STREAM) {
1664 error = EINVAL;
1665 goto done;
1667 if ((so->so_state & SS_ISCONNECTED) == 0) {
1668 error = ENOTCONN;
1669 goto done;
1671 if (offset < 0) {
1672 error = EINVAL;
1673 goto done;
1677 * preallocation is required for asynchronous passing of mbufs,
1678 * otherwise we can wind up building up an infinite number of
1679 * mbufs during the asynchronous latency.
1681 if ((so->so_snd.ssb_flags & (SSB_PREALLOC | SSB_STOPSUPP)) == 0) {
1682 error = EINVAL;
1683 goto done;
1686 *sbytes = 0;
1687 xbytes = 0;
1689 * Protect against multiple writers to the socket.
1691 ssb_lock(&so->so_snd, M_WAITOK);
1694 * Loop through the pages in the file, starting with the requested
1695 * offset. Get a file page (do I/O if necessary), map the file page
1696 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
1697 * it on the socket.
1699 for (off = offset; ; off += xfsize, *sbytes += xfsize + hbytes, xbytes += xfsize) {
1700 vm_pindex_t pindex;
1701 vm_offset_t pgoff;
1702 long space;
1704 pindex = OFF_TO_IDX(off);
1705 retry_lookup:
1707 * Calculate the amount to transfer. Not to exceed a page,
1708 * the EOF, or the passed in nbytes.
1710 xfsize = vp->v_filesize - off;
1711 if (xfsize > PAGE_SIZE)
1712 xfsize = PAGE_SIZE;
1713 pgoff = (vm_offset_t)(off & PAGE_MASK);
1714 if (PAGE_SIZE - pgoff < xfsize)
1715 xfsize = PAGE_SIZE - pgoff;
1716 if (nbytes && xfsize > (nbytes - xbytes))
1717 xfsize = nbytes - xbytes;
1718 if (xfsize <= 0)
1719 break;
1721 * Optimize the non-blocking case by looking at the socket space
1722 * before going to the extra work of constituting the sf_buf.
1724 if (so->so_snd.ssb_flags & SSB_PREALLOC)
1725 space = ssb_space_prealloc(&so->so_snd);
1726 else
1727 space = ssb_space(&so->so_snd);
1729 if ((fp->f_flag & FNONBLOCK) && space <= 0) {
1730 if (so->so_state & SS_CANTSENDMORE)
1731 error = EPIPE;
1732 else
1733 error = EAGAIN;
1734 ssb_unlock(&so->so_snd);
1735 goto done;
1738 * Attempt to look up the page.
1740 * Allocate if not found, wait and loop if busy, then hold the page.
1741 * We hold rather than wire the page because we do not want to prevent
1742 * filesystem truncation operations from occuring on the file. This
1743 * can happen even under normal operation if the file being sent is
1744 * remove()d after the sendfile() call completes, because the socket buffer
1745 * may still be draining. tmpfs will crash if we try to use wire.
1747 vm_object_hold(obj);
1748 pg = vm_page_lookup_busy_try(obj, pindex, TRUE, &error);
1749 if (error) {
1750 vm_page_sleep_busy(pg, TRUE, "sfpbsy");
1751 vm_object_drop(obj);
1752 goto retry_lookup;
1754 if (pg == NULL) {
1755 pg = vm_page_alloc(obj, pindex, VM_ALLOC_NORMAL |
1756 VM_ALLOC_NULL_OK);
1757 if (pg == NULL) {
1758 vm_wait(0);
1759 vm_object_drop(obj);
1760 goto retry_lookup;
1763 vm_page_hold(pg);
1764 vm_object_drop(obj);
1767 * If page is not valid for what we need, initiate I/O
1770 if (!pg->valid || !vm_page_is_valid(pg, pgoff, xfsize)) {
1771 struct uio auio;
1772 struct iovec aiov;
1773 int bsize;
1776 * Ensure that our page is still around when the I/O
1777 * completes.
1779 * Ensure that our page is not modified while part of
1780 * a mbuf as this could mess up tcp checksums, DMA,
1781 * etc (XXX NEEDS WORK). The softbusy is supposed to
1782 * help here but it actually doesn't.
1784 * XXX THIS HAS MULTIPLE PROBLEMS. The underlying
1785 * VM pages are not protected by the soft-busy
1786 * unless we vm_page_protect... READ them, and
1787 * they STILL aren't protected against
1788 * modification via the buffer cache (VOP_WRITE).
1790 * Fixing the second issue is particularly
1791 * difficult.
1793 * XXX We also can't soft-busy anyway because it can
1794 * deadlock against the syncer doing a vfs_msync(),
1795 * vfs_msync->vmntvnodesca->vfs_msync_scan2->
1796 * vm_object_page_clean->(scan)-> ... page
1797 * busy-wait.
1799 /*vm_page_io_start(pg);*/
1800 vm_page_wakeup(pg);
1803 * Get the page from backing store.
1805 bsize = vp->v_mount->mnt_stat.f_iosize;
1806 auio.uio_iov = &aiov;
1807 auio.uio_iovcnt = 1;
1808 aiov.iov_base = 0;
1809 aiov.iov_len = MAXBSIZE;
1810 auio.uio_resid = MAXBSIZE;
1811 auio.uio_offset = trunc_page(off);
1812 auio.uio_segflg = UIO_NOCOPY;
1813 auio.uio_rw = UIO_READ;
1814 auio.uio_td = td;
1815 vn_lock(vp, LK_SHARED | LK_RETRY);
1816 error = VOP_READ(vp, &auio,
1817 IO_VMIO | ((MAXBSIZE / bsize) << 16),
1818 td->td_ucred);
1819 vn_unlock(vp);
1820 vm_page_busy_wait(pg, FALSE, "sockpg");
1821 /*vm_page_io_finish(pg);*/
1822 if (error) {
1823 vm_page_wakeup(pg);
1824 vm_page_unhold(pg);
1825 /* vm_page_try_to_free(pg); */
1826 ssb_unlock(&so->so_snd);
1827 goto done;
1833 * Get a sendfile buf. We usually wait as long as necessary,
1834 * but this wait can be interrupted.
1836 if ((sf = sf_buf_alloc(pg)) == NULL) {
1837 vm_page_wakeup(pg);
1838 vm_page_unhold(pg);
1839 /* vm_page_try_to_free(pg); */
1840 ssb_unlock(&so->so_snd);
1841 error = EINTR;
1842 goto done;
1846 * Get an mbuf header and set it up as having external storage.
1848 MGETHDR(m, M_WAITOK, MT_DATA);
1849 if (m == NULL) {
1850 error = ENOBUFS;
1851 vm_page_wakeup(pg);
1852 vm_page_unhold(pg);
1853 /* vm_page_try_to_free(pg); */
1854 sf_buf_free(sf);
1855 ssb_unlock(&so->so_snd);
1856 goto done;
1859 vm_page_wakeup(pg);
1861 m->m_ext.ext_free = sf_buf_mfree;
1862 m->m_ext.ext_ref = sf_buf_ref;
1863 m->m_ext.ext_arg = sf;
1864 m->m_ext.ext_buf = (void *)sf_buf_kva(sf);
1865 m->m_ext.ext_size = PAGE_SIZE;
1866 m->m_data = (char *)sf_buf_kva(sf) + pgoff;
1867 m->m_flags |= M_EXT;
1868 m->m_pkthdr.len = m->m_len = xfsize;
1869 KKASSERT((m->m_flags & (M_EXT_CLUSTER)) == 0);
1871 if (mheader != NULL) {
1872 hbytes = mheader->m_pkthdr.len;
1873 mheader->m_pkthdr.len += m->m_pkthdr.len;
1874 m_cat(mheader, m);
1875 m = mheader;
1876 mheader = NULL;
1877 } else
1878 hbytes = 0;
1881 * Add the buffer to the socket buffer chain.
1883 crit_enter();
1884 retry_space:
1886 * Make sure that the socket is still able to take more data.
1887 * CANTSENDMORE being true usually means that the connection
1888 * was closed. so_error is true when an error was sensed after
1889 * a previous send.
1890 * The state is checked after the page mapping and buffer
1891 * allocation above since those operations may block and make
1892 * any socket checks stale. From this point forward, nothing
1893 * blocks before the pru_send (or more accurately, any blocking
1894 * results in a loop back to here to re-check).
1896 if ((so->so_state & SS_CANTSENDMORE) || so->so_error) {
1897 if (so->so_state & SS_CANTSENDMORE) {
1898 error = EPIPE;
1899 } else {
1900 error = so->so_error;
1901 so->so_error = 0;
1903 m_freem(m);
1904 ssb_unlock(&so->so_snd);
1905 crit_exit();
1906 goto done;
1909 * Wait for socket space to become available. We do this just
1910 * after checking the connection state above in order to avoid
1911 * a race condition with ssb_wait().
1913 if (so->so_snd.ssb_flags & SSB_PREALLOC)
1914 space = ssb_space_prealloc(&so->so_snd);
1915 else
1916 space = ssb_space(&so->so_snd);
1918 if (space < m->m_pkthdr.len && space < so->so_snd.ssb_lowat) {
1919 if (fp->f_flag & FNONBLOCK) {
1920 m_freem(m);
1921 ssb_unlock(&so->so_snd);
1922 crit_exit();
1923 error = EAGAIN;
1924 goto done;
1926 error = ssb_wait(&so->so_snd);
1928 * An error from ssb_wait usually indicates that we've
1929 * been interrupted by a signal. If we've sent anything
1930 * then return bytes sent, otherwise return the error.
1932 if (error) {
1933 m_freem(m);
1934 ssb_unlock(&so->so_snd);
1935 crit_exit();
1936 goto done;
1938 goto retry_space;
1941 if (so->so_snd.ssb_flags & SSB_PREALLOC) {
1942 for (mp = m; mp != NULL; mp = mp->m_next)
1943 ssb_preallocstream(&so->so_snd, mp);
1945 if (use_sendfile_async)
1946 error = so_pru_senda(so, 0, m, NULL, NULL, td);
1947 else
1948 error = so_pru_send(so, 0, m, NULL, NULL, td);
1950 crit_exit();
1951 if (error) {
1952 ssb_unlock(&so->so_snd);
1953 goto done;
1956 if (mheader != NULL) {
1957 *sbytes += mheader->m_pkthdr.len;
1959 if (so->so_snd.ssb_flags & SSB_PREALLOC) {
1960 for (mp = mheader; mp != NULL; mp = mp->m_next)
1961 ssb_preallocstream(&so->so_snd, mp);
1963 if (use_sendfile_async)
1964 error = so_pru_senda(so, 0, mheader, NULL, NULL, td);
1965 else
1966 error = so_pru_send(so, 0, mheader, NULL, NULL, td);
1968 mheader = NULL;
1970 ssb_unlock(&so->so_snd);
1972 done:
1973 fdrop(fp);
1974 done0:
1975 if (mheader != NULL)
1976 m_freem(mheader);
1977 return (error);