kernel - Cleanup macros
[dragonfly.git] / sys / kern / uipc_syscalls.c
blobdae04de16048ec795ad13714deb5f26be2d49646
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
5 * sendfile(2) and related extensions:
6 * Copyright (c) 1998, David Greenman. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94
33 * $FreeBSD: src/sys/kern/uipc_syscalls.c,v 1.65.2.17 2003/04/04 17:11:16 tegge Exp $
36 #include "opt_ktrace.h"
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/sysproto.h>
42 #include <sys/malloc.h>
43 #include <sys/filedesc.h>
44 #include <sys/event.h>
45 #include <sys/proc.h>
46 #include <sys/fcntl.h>
47 #include <sys/file.h>
48 #include <sys/filio.h>
49 #include <sys/kern_syscall.h>
50 #include <sys/mbuf.h>
51 #include <sys/protosw.h>
52 #include <sys/sfbuf.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/socketops.h>
56 #include <sys/uio.h>
57 #include <sys/vnode.h>
58 #include <sys/lock.h>
59 #include <sys/mount.h>
60 #ifdef KTRACE
61 #include <sys/ktrace.h>
62 #endif
63 #include <vm/vm.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_pageout.h>
67 #include <vm/vm_kern.h>
68 #include <vm/vm_extern.h>
69 #include <sys/file2.h>
70 #include <sys/signalvar.h>
71 #include <sys/serialize.h>
73 #include <sys/thread2.h>
74 #include <sys/msgport2.h>
75 #include <sys/socketvar2.h>
76 #include <net/netmsg2.h>
77 #include <vm/vm_page2.h>
79 extern int use_soaccept_pred_fast;
80 extern int use_sendfile_async;
81 extern int use_soconnect_async;
84 * System call interface to the socket abstraction.
87 extern struct fileops socketops;
90 * socket_args(int domain, int type, int protocol)
92 int
93 kern_socket(int domain, int type, int protocol, int *res)
95 struct thread *td = curthread;
96 struct filedesc *fdp = td->td_proc->p_fd;
97 struct socket *so;
98 struct file *fp;
99 int fd, error;
100 u_int fflags = 0;
101 int oflags = 0;
103 KKASSERT(td->td_lwp);
105 if (type & SOCK_NONBLOCK) {
106 type &= ~SOCK_NONBLOCK;
107 fflags |= FNONBLOCK;
109 if (type & SOCK_CLOEXEC) {
110 type &= ~SOCK_CLOEXEC;
111 oflags |= O_CLOEXEC;
114 error = falloc(td->td_lwp, &fp, &fd);
115 if (error)
116 return (error);
117 error = socreate(domain, &so, type, protocol, td);
118 if (error) {
119 fsetfd(fdp, NULL, fd);
120 } else {
121 fp->f_type = DTYPE_SOCKET;
122 fp->f_flag = FREAD | FWRITE | fflags;
123 fp->f_ops = &socketops;
124 fp->f_data = so;
125 if (oflags & O_CLOEXEC)
126 fdp->fd_files[fd].fileflags |= UF_EXCLOSE;
127 *res = fd;
128 fsetfd(fdp, fp, fd);
130 fdrop(fp);
131 return (error);
135 * MPALMOSTSAFE
138 sys_socket(struct socket_args *uap)
140 int error;
142 error = kern_socket(uap->domain, uap->type, uap->protocol,
143 &uap->sysmsg_iresult);
145 return (error);
149 kern_bind(int s, struct sockaddr *sa)
151 struct thread *td = curthread;
152 struct proc *p = td->td_proc;
153 struct file *fp;
154 int error;
156 KKASSERT(p);
157 error = holdsock(p->p_fd, s, &fp);
158 if (error)
159 return (error);
160 error = sobind((struct socket *)fp->f_data, sa, td);
161 fdrop(fp);
162 return (error);
166 * bind_args(int s, caddr_t name, int namelen)
168 * MPALMOSTSAFE
171 sys_bind(struct bind_args *uap)
173 struct sockaddr *sa;
174 int error;
176 error = getsockaddr(&sa, uap->name, uap->namelen);
177 if (error)
178 return (error);
179 error = kern_bind(uap->s, sa);
180 kfree(sa, M_SONAME);
182 return (error);
186 kern_listen(int s, int backlog)
188 struct thread *td = curthread;
189 struct proc *p = td->td_proc;
190 struct file *fp;
191 int error;
193 KKASSERT(p);
194 error = holdsock(p->p_fd, s, &fp);
195 if (error)
196 return (error);
197 error = solisten((struct socket *)fp->f_data, backlog, td);
198 fdrop(fp);
199 return(error);
203 * listen_args(int s, int backlog)
205 * MPALMOSTSAFE
208 sys_listen(struct listen_args *uap)
210 int error;
212 error = kern_listen(uap->s, uap->backlog);
213 return (error);
217 * Returns the accepted socket as well.
219 * NOTE! The sockets sitting on so_comp/so_incomp might have 0 refs, the
220 * pool token is absolutely required to avoid a sofree() race,
221 * as well as to avoid tailq handling races.
223 static boolean_t
224 soaccept_predicate(struct netmsg_so_notify *msg)
226 struct socket *head = msg->base.nm_so;
227 struct socket *so;
229 if (head->so_error != 0) {
230 msg->base.lmsg.ms_error = head->so_error;
231 return (TRUE);
233 lwkt_getpooltoken(head);
234 if (!TAILQ_EMPTY(&head->so_comp)) {
235 /* Abuse nm_so field as copy in/copy out parameter. XXX JH */
236 so = TAILQ_FIRST(&head->so_comp);
237 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == SS_COMP);
238 TAILQ_REMOVE(&head->so_comp, so, so_list);
239 head->so_qlen--;
240 soclrstate(so, SS_COMP);
243 * Keep a reference before clearing the so_head
244 * to avoid racing socket close in netisr.
246 soreference(so);
247 so->so_head = NULL;
249 lwkt_relpooltoken(head);
251 msg->base.lmsg.ms_error = 0;
252 msg->base.nm_so = so;
253 return (TRUE);
255 lwkt_relpooltoken(head);
256 if (head->so_state & SS_CANTRCVMORE) {
257 msg->base.lmsg.ms_error = ECONNABORTED;
258 return (TRUE);
260 if (msg->nm_fflags & FNONBLOCK) {
261 msg->base.lmsg.ms_error = EWOULDBLOCK;
262 return (TRUE);
265 return (FALSE);
269 * The second argument to kern_accept() is a handle to a struct sockaddr.
270 * This allows kern_accept() to return a pointer to an allocated struct
271 * sockaddr which must be freed later with FREE(). The caller must
272 * initialize *name to NULL.
275 kern_accept(int s, int fflags, struct sockaddr **name, int *namelen, int *res,
276 int sockflags)
278 struct thread *td = curthread;
279 struct filedesc *fdp = td->td_proc->p_fd;
280 struct file *lfp = NULL;
281 struct file *nfp = NULL;
282 struct sockaddr *sa;
283 struct socket *head, *so;
284 struct netmsg_so_notify msg;
285 int fd;
286 u_int fflag; /* type must match fp->f_flag */
287 int error, tmp;
289 *res = -1;
290 if (name && namelen && *namelen < 0)
291 return (EINVAL);
293 error = holdsock(td->td_proc->p_fd, s, &lfp);
294 if (error)
295 return (error);
297 error = falloc(td->td_lwp, &nfp, &fd);
298 if (error) { /* Probably ran out of file descriptors. */
299 fdrop(lfp);
300 return (error);
302 head = (struct socket *)lfp->f_data;
303 if ((head->so_options & SO_ACCEPTCONN) == 0) {
304 error = EINVAL;
305 goto done;
308 if (fflags & O_FBLOCKING)
309 fflags |= lfp->f_flag & ~FNONBLOCK;
310 else if (fflags & O_FNONBLOCKING)
311 fflags |= lfp->f_flag | FNONBLOCK;
312 else
313 fflags = lfp->f_flag;
315 if (use_soaccept_pred_fast) {
316 boolean_t pred;
318 /* Initialize necessary parts for soaccept_predicate() */
319 netmsg_init(&msg.base, head, &netisr_apanic_rport, 0, NULL);
320 msg.nm_fflags = fflags;
322 lwkt_getpooltoken(head);
323 pred = soaccept_predicate(&msg);
324 lwkt_relpooltoken(head);
326 if (pred) {
327 error = msg.base.lmsg.ms_error;
328 if (error)
329 goto done;
330 else
331 goto accepted;
335 /* optimize for uniprocessor case later XXX JH */
336 netmsg_init_abortable(&msg.base, head, &curthread->td_msgport,
337 0, netmsg_so_notify, netmsg_so_notify_doabort);
338 msg.nm_predicate = soaccept_predicate;
339 msg.nm_fflags = fflags;
340 msg.nm_etype = NM_REVENT;
341 error = lwkt_domsg(head->so_port, &msg.base.lmsg, PCATCH);
342 if (error)
343 goto done;
345 accepted:
347 * At this point we have the connection that's ready to be accepted.
349 * NOTE! soaccept_predicate() ref'd so for us, and soaccept() expects
350 * to eat the ref and turn it into a descriptor.
352 so = msg.base.nm_so;
354 fflag = lfp->f_flag;
356 /* connection has been removed from the listen queue */
357 KNOTE(&head->so_rcv.ssb_kq.ki_note, 0);
359 if (sockflags & SOCK_KERN_NOINHERIT) {
360 fflag &= ~(FASYNC | FNONBLOCK);
361 if (sockflags & SOCK_NONBLOCK)
362 fflag |= FNONBLOCK;
363 } else {
364 if (head->so_sigio != NULL)
365 fsetown(fgetown(&head->so_sigio), &so->so_sigio);
368 nfp->f_type = DTYPE_SOCKET;
369 nfp->f_flag = fflag;
370 nfp->f_ops = &socketops;
371 nfp->f_data = so;
372 /* Sync socket async state with file flags */
373 tmp = fflag & FASYNC;
374 fo_ioctl(nfp, FIOASYNC, (caddr_t)&tmp, td->td_ucred, NULL);
376 sa = NULL;
377 if (so->so_faddr != NULL) {
378 sa = so->so_faddr;
379 so->so_faddr = NULL;
381 soaccept_generic(so);
382 error = 0;
383 } else {
384 error = soaccept(so, &sa);
388 * Set the returned name and namelen as applicable. Set the returned
389 * namelen to 0 for older code which might ignore the return value
390 * from accept.
392 if (error == 0) {
393 if (sa && name && namelen) {
394 if (*namelen > sa->sa_len)
395 *namelen = sa->sa_len;
396 *name = sa;
397 } else {
398 if (sa)
399 kfree(sa, M_SONAME);
403 done:
405 * If an error occured clear the reserved descriptor, else associate
406 * nfp with it.
408 * Note that *res is normally ignored if an error is returned but
409 * a syscall message will still have access to the result code.
411 if (error) {
412 fsetfd(fdp, NULL, fd);
413 } else {
414 if (sockflags & SOCK_CLOEXEC)
415 fdp->fd_files[fd].fileflags |= UF_EXCLOSE;
416 *res = fd;
417 fsetfd(fdp, nfp, fd);
419 fdrop(nfp);
420 fdrop(lfp);
421 return (error);
425 * accept(int s, caddr_t name, int *anamelen)
427 * MPALMOSTSAFE
430 sys_accept(struct accept_args *uap)
432 struct sockaddr *sa = NULL;
433 int sa_len;
434 int error;
436 if (uap->name) {
437 error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
438 if (error)
439 return (error);
441 error = kern_accept(uap->s, 0, &sa, &sa_len,
442 &uap->sysmsg_iresult, 0);
444 if (error == 0)
445 error = copyout(sa, uap->name, sa_len);
446 if (error == 0) {
447 error = copyout(&sa_len, uap->anamelen,
448 sizeof(*uap->anamelen));
450 if (sa)
451 kfree(sa, M_SONAME);
452 } else {
453 error = kern_accept(uap->s, 0, NULL, 0,
454 &uap->sysmsg_iresult, 0);
456 return (error);
460 * extaccept(int s, int fflags, caddr_t name, int *anamelen)
462 * MPALMOSTSAFE
465 sys_extaccept(struct extaccept_args *uap)
467 struct sockaddr *sa = NULL;
468 int sa_len;
469 int error;
470 int fflags = uap->flags & O_FMASK;
472 if (uap->name) {
473 error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
474 if (error)
475 return (error);
477 error = kern_accept(uap->s, fflags, &sa, &sa_len,
478 &uap->sysmsg_iresult, 0);
480 if (error == 0)
481 error = copyout(sa, uap->name, sa_len);
482 if (error == 0) {
483 error = copyout(&sa_len, uap->anamelen,
484 sizeof(*uap->anamelen));
486 if (sa)
487 kfree(sa, M_SONAME);
488 } else {
489 error = kern_accept(uap->s, fflags, NULL, 0,
490 &uap->sysmsg_iresult, 0);
492 return (error);
496 * accept4(int s, caddr_t name, int *anamelen, int flags)
498 * MPALMOSTSAFE
501 sys_accept4(struct accept4_args *uap)
503 struct sockaddr *sa = NULL;
504 int sa_len;
505 int error;
506 int sockflags;
508 if (uap->flags & ~(SOCK_NONBLOCK | SOCK_CLOEXEC))
509 return (EINVAL);
510 sockflags = uap->flags | SOCK_KERN_NOINHERIT;
512 if (uap->name) {
513 error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
514 if (error)
515 return (error);
517 error = kern_accept(uap->s, 0, &sa, &sa_len,
518 &uap->sysmsg_iresult, sockflags);
520 if (error == 0)
521 error = copyout(sa, uap->name, sa_len);
522 if (error == 0) {
523 error = copyout(&sa_len, uap->anamelen,
524 sizeof(*uap->anamelen));
526 if (sa)
527 kfree(sa, M_SONAME);
528 } else {
529 error = kern_accept(uap->s, 0, NULL, 0,
530 &uap->sysmsg_iresult, sockflags);
532 return (error);
536 * Returns TRUE if predicate satisfied.
538 static boolean_t
539 soconnected_predicate(struct netmsg_so_notify *msg)
541 struct socket *so = msg->base.nm_so;
543 /* check predicate */
544 if (!(so->so_state & SS_ISCONNECTING) || so->so_error != 0) {
545 msg->base.lmsg.ms_error = so->so_error;
546 return (TRUE);
549 return (FALSE);
553 kern_connect(int s, int fflags, struct sockaddr *sa)
555 struct thread *td = curthread;
556 struct proc *p = td->td_proc;
557 struct file *fp;
558 struct socket *so;
559 int error, interrupted = 0;
561 error = holdsock(p->p_fd, s, &fp);
562 if (error)
563 return (error);
564 so = (struct socket *)fp->f_data;
566 if (fflags & O_FBLOCKING)
567 /* fflags &= ~FNONBLOCK; */;
568 else if (fflags & O_FNONBLOCKING)
569 fflags |= FNONBLOCK;
570 else
571 fflags = fp->f_flag;
573 if (so->so_state & SS_ISCONNECTING) {
574 error = EALREADY;
575 goto done;
577 error = soconnect(so, sa, td, use_soconnect_async ? FALSE : TRUE);
578 if (error)
579 goto bad;
580 if ((fflags & FNONBLOCK) && (so->so_state & SS_ISCONNECTING)) {
581 error = EINPROGRESS;
582 goto done;
584 if ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
585 struct netmsg_so_notify msg;
587 netmsg_init_abortable(&msg.base, so,
588 &curthread->td_msgport,
590 netmsg_so_notify,
591 netmsg_so_notify_doabort);
592 msg.nm_predicate = soconnected_predicate;
593 msg.nm_etype = NM_REVENT;
594 error = lwkt_domsg(so->so_port, &msg.base.lmsg, PCATCH);
595 if (error == EINTR || error == ERESTART)
596 interrupted = 1;
598 if (error == 0) {
599 error = so->so_error;
600 so->so_error = 0;
602 bad:
603 if (!interrupted)
604 soclrstate(so, SS_ISCONNECTING);
605 if (error == ERESTART)
606 error = EINTR;
607 done:
608 fdrop(fp);
609 return (error);
613 * connect_args(int s, caddr_t name, int namelen)
615 * MPALMOSTSAFE
618 sys_connect(struct connect_args *uap)
620 struct sockaddr *sa;
621 int error;
623 error = getsockaddr(&sa, uap->name, uap->namelen);
624 if (error)
625 return (error);
626 error = kern_connect(uap->s, 0, sa);
627 kfree(sa, M_SONAME);
629 return (error);
633 * connect_args(int s, int fflags, caddr_t name, int namelen)
635 * MPALMOSTSAFE
638 sys_extconnect(struct extconnect_args *uap)
640 struct sockaddr *sa;
641 int error;
642 int fflags = uap->flags & O_FMASK;
644 error = getsockaddr(&sa, uap->name, uap->namelen);
645 if (error)
646 return (error);
647 error = kern_connect(uap->s, fflags, sa);
648 kfree(sa, M_SONAME);
650 return (error);
654 kern_socketpair(int domain, int type, int protocol, int *sv)
656 struct thread *td = curthread;
657 struct filedesc *fdp;
658 struct file *fp1, *fp2;
659 struct socket *so1, *so2;
660 int fd1, fd2, error;
661 u_int fflags = 0;
662 int oflags = 0;
664 if (type & SOCK_NONBLOCK) {
665 type &= ~SOCK_NONBLOCK;
666 fflags |= FNONBLOCK;
668 if (type & SOCK_CLOEXEC) {
669 type &= ~SOCK_CLOEXEC;
670 oflags |= O_CLOEXEC;
673 fdp = td->td_proc->p_fd;
674 error = socreate(domain, &so1, type, protocol, td);
675 if (error)
676 return (error);
677 error = socreate(domain, &so2, type, protocol, td);
678 if (error)
679 goto free1;
680 error = falloc(td->td_lwp, &fp1, &fd1);
681 if (error)
682 goto free2;
683 sv[0] = fd1;
684 fp1->f_data = so1;
685 error = falloc(td->td_lwp, &fp2, &fd2);
686 if (error)
687 goto free3;
688 fp2->f_data = so2;
689 sv[1] = fd2;
690 error = soconnect2(so1, so2);
691 if (error)
692 goto free4;
693 if (type == SOCK_DGRAM) {
695 * Datagram socket connection is asymmetric.
697 error = soconnect2(so2, so1);
698 if (error)
699 goto free4;
701 fp1->f_type = fp2->f_type = DTYPE_SOCKET;
702 fp1->f_flag = fp2->f_flag = FREAD|FWRITE|fflags;
703 fp1->f_ops = fp2->f_ops = &socketops;
704 if (oflags & O_CLOEXEC) {
705 fdp->fd_files[fd1].fileflags |= UF_EXCLOSE;
706 fdp->fd_files[fd2].fileflags |= UF_EXCLOSE;
708 fsetfd(fdp, fp1, fd1);
709 fsetfd(fdp, fp2, fd2);
710 fdrop(fp1);
711 fdrop(fp2);
712 return (error);
713 free4:
714 fsetfd(fdp, NULL, fd2);
715 fdrop(fp2);
716 free3:
717 fsetfd(fdp, NULL, fd1);
718 fdrop(fp1);
719 free2:
720 (void)soclose(so2, 0);
721 free1:
722 (void)soclose(so1, 0);
723 return (error);
727 * socketpair(int domain, int type, int protocol, int *rsv)
730 sys_socketpair(struct socketpair_args *uap)
732 int error, sockv[2];
734 error = kern_socketpair(uap->domain, uap->type, uap->protocol, sockv);
736 if (error == 0) {
737 error = copyout(sockv, uap->rsv, sizeof(sockv));
739 if (error != 0) {
740 kern_close(sockv[0]);
741 kern_close(sockv[1]);
745 return (error);
749 kern_sendmsg(int s, struct sockaddr *sa, struct uio *auio,
750 struct mbuf *control, int flags, size_t *res)
752 struct thread *td = curthread;
753 struct lwp *lp = td->td_lwp;
754 struct proc *p = td->td_proc;
755 struct file *fp;
756 size_t len;
757 int error;
758 struct socket *so;
759 #ifdef KTRACE
760 struct iovec *ktriov = NULL;
761 struct uio ktruio;
762 #endif
764 error = holdsock(p->p_fd, s, &fp);
765 if (error)
766 return (error);
767 #ifdef KTRACE
768 if (KTRPOINT(td, KTR_GENIO)) {
769 int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
771 ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK);
772 bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen);
773 ktruio = *auio;
775 #endif
776 len = auio->uio_resid;
777 so = (struct socket *)fp->f_data;
778 if ((flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) {
779 if (fp->f_flag & FNONBLOCK)
780 flags |= MSG_FNONBLOCKING;
782 error = so_pru_sosend(so, sa, auio, NULL, control, flags, td);
783 if (error) {
784 if (auio->uio_resid != len && (error == ERESTART ||
785 error == EINTR || error == EWOULDBLOCK))
786 error = 0;
787 if (error == EPIPE && !(flags & MSG_NOSIGNAL) &&
788 !(so->so_options & SO_NOSIGPIPE))
789 lwpsignal(p, lp, SIGPIPE);
791 #ifdef KTRACE
792 if (ktriov != NULL) {
793 if (error == 0) {
794 ktruio.uio_iov = ktriov;
795 ktruio.uio_resid = len - auio->uio_resid;
796 ktrgenio(lp, s, UIO_WRITE, &ktruio, error);
798 kfree(ktriov, M_TEMP);
800 #endif
801 if (error == 0)
802 *res = len - auio->uio_resid;
803 fdrop(fp);
804 return (error);
808 * sendto_args(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen)
810 * MPALMOSTSAFE
813 sys_sendto(struct sendto_args *uap)
815 struct thread *td = curthread;
816 struct uio auio;
817 struct iovec aiov;
818 struct sockaddr *sa = NULL;
819 int error;
821 if (uap->to) {
822 error = getsockaddr(&sa, uap->to, uap->tolen);
823 if (error)
824 return (error);
826 aiov.iov_base = uap->buf;
827 aiov.iov_len = uap->len;
828 auio.uio_iov = &aiov;
829 auio.uio_iovcnt = 1;
830 auio.uio_offset = 0;
831 auio.uio_resid = uap->len;
832 auio.uio_segflg = UIO_USERSPACE;
833 auio.uio_rw = UIO_WRITE;
834 auio.uio_td = td;
836 error = kern_sendmsg(uap->s, sa, &auio, NULL, uap->flags,
837 &uap->sysmsg_szresult);
839 if (sa)
840 kfree(sa, M_SONAME);
841 return (error);
845 * sendmsg_args(int s, caddr_t msg, int flags)
847 * MPALMOSTSAFE
850 sys_sendmsg(struct sendmsg_args *uap)
852 struct thread *td = curthread;
853 struct msghdr msg;
854 struct uio auio;
855 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
856 struct sockaddr *sa = NULL;
857 struct mbuf *control = NULL;
858 int error;
860 error = copyin(uap->msg, (caddr_t)&msg, sizeof(msg));
861 if (error)
862 return (error);
865 * Conditionally copyin msg.msg_name.
867 if (msg.msg_name) {
868 error = getsockaddr(&sa, msg.msg_name, msg.msg_namelen);
869 if (error)
870 return (error);
874 * Populate auio.
876 error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
877 &auio.uio_resid);
878 if (error)
879 goto cleanup2;
880 auio.uio_iov = iov;
881 auio.uio_iovcnt = msg.msg_iovlen;
882 auio.uio_offset = 0;
883 auio.uio_segflg = UIO_USERSPACE;
884 auio.uio_rw = UIO_WRITE;
885 auio.uio_td = td;
888 * Conditionally copyin msg.msg_control.
890 if (msg.msg_control) {
891 if (msg.msg_controllen < sizeof(struct cmsghdr) ||
892 msg.msg_controllen > MLEN) {
893 error = EINVAL;
894 goto cleanup;
896 control = m_get(M_WAITOK, MT_CONTROL);
897 if (control == NULL) {
898 error = ENOBUFS;
899 goto cleanup;
901 control->m_len = msg.msg_controllen;
902 error = copyin(msg.msg_control, mtod(control, caddr_t),
903 msg.msg_controllen);
904 if (error) {
905 m_free(control);
906 goto cleanup;
910 error = kern_sendmsg(uap->s, sa, &auio, control, uap->flags,
911 &uap->sysmsg_szresult);
913 cleanup:
914 iovec_free(&iov, aiov);
915 cleanup2:
916 if (sa)
917 kfree(sa, M_SONAME);
918 return (error);
922 * kern_recvmsg() takes a handle to sa and control. If the handle is non-
923 * null, it returns a dynamically allocated struct sockaddr and an mbuf.
924 * Don't forget to FREE() and m_free() these if they are returned.
927 kern_recvmsg(int s, struct sockaddr **sa, struct uio *auio,
928 struct mbuf **control, int *flags, size_t *res)
930 struct thread *td = curthread;
931 struct proc *p = td->td_proc;
932 struct file *fp;
933 size_t len;
934 int error;
935 int lflags;
936 struct socket *so;
937 #ifdef KTRACE
938 struct iovec *ktriov = NULL;
939 struct uio ktruio;
940 #endif
942 error = holdsock(p->p_fd, s, &fp);
943 if (error)
944 return (error);
945 #ifdef KTRACE
946 if (KTRPOINT(td, KTR_GENIO)) {
947 int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
949 ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK);
950 bcopy(auio->uio_iov, ktriov, iovlen);
951 ktruio = *auio;
953 #endif
954 len = auio->uio_resid;
955 so = (struct socket *)fp->f_data;
957 if (flags == NULL || (*flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) {
958 if (fp->f_flag & FNONBLOCK) {
959 if (flags) {
960 *flags |= MSG_FNONBLOCKING;
961 } else {
962 lflags = MSG_FNONBLOCKING;
963 flags = &lflags;
968 error = so_pru_soreceive(so, sa, auio, NULL, control, flags);
969 if (error) {
970 if (auio->uio_resid != len && (error == ERESTART ||
971 error == EINTR || error == EWOULDBLOCK))
972 error = 0;
974 #ifdef KTRACE
975 if (ktriov != NULL) {
976 if (error == 0) {
977 ktruio.uio_iov = ktriov;
978 ktruio.uio_resid = len - auio->uio_resid;
979 ktrgenio(td->td_lwp, s, UIO_READ, &ktruio, error);
981 kfree(ktriov, M_TEMP);
983 #endif
984 if (error == 0)
985 *res = len - auio->uio_resid;
986 fdrop(fp);
987 return (error);
991 * recvfrom_args(int s, caddr_t buf, size_t len, int flags,
992 * caddr_t from, int *fromlenaddr)
994 * MPALMOSTSAFE
997 sys_recvfrom(struct recvfrom_args *uap)
999 struct thread *td = curthread;
1000 struct uio auio;
1001 struct iovec aiov;
1002 struct sockaddr *sa = NULL;
1003 int error, fromlen;
1005 if (uap->from && uap->fromlenaddr) {
1006 error = copyin(uap->fromlenaddr, &fromlen, sizeof(fromlen));
1007 if (error)
1008 return (error);
1009 if (fromlen < 0)
1010 return (EINVAL);
1011 } else {
1012 fromlen = 0;
1014 aiov.iov_base = uap->buf;
1015 aiov.iov_len = uap->len;
1016 auio.uio_iov = &aiov;
1017 auio.uio_iovcnt = 1;
1018 auio.uio_offset = 0;
1019 auio.uio_resid = uap->len;
1020 auio.uio_segflg = UIO_USERSPACE;
1021 auio.uio_rw = UIO_READ;
1022 auio.uio_td = td;
1024 error = kern_recvmsg(uap->s, uap->from ? &sa : NULL, &auio, NULL,
1025 &uap->flags, &uap->sysmsg_szresult);
1027 if (error == 0 && uap->from) {
1028 /* note: sa may still be NULL */
1029 if (sa) {
1030 fromlen = MIN(fromlen, sa->sa_len);
1031 error = copyout(sa, uap->from, fromlen);
1032 } else {
1033 fromlen = 0;
1035 if (error == 0) {
1036 error = copyout(&fromlen, uap->fromlenaddr,
1037 sizeof(fromlen));
1040 if (sa)
1041 kfree(sa, M_SONAME);
1043 return (error);
1047 * recvmsg_args(int s, struct msghdr *msg, int flags)
1049 * MPALMOSTSAFE
1052 sys_recvmsg(struct recvmsg_args *uap)
1054 struct thread *td = curthread;
1055 struct msghdr msg;
1056 struct uio auio;
1057 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
1058 struct mbuf *m, *control = NULL;
1059 struct sockaddr *sa = NULL;
1060 caddr_t ctlbuf;
1061 socklen_t *ufromlenp, *ucontrollenp;
1062 int error, fromlen, controllen, len, flags, *uflagsp;
1065 * This copyin handles everything except the iovec.
1067 error = copyin(uap->msg, &msg, sizeof(msg));
1068 if (error)
1069 return (error);
1071 if (msg.msg_name && msg.msg_namelen < 0)
1072 return (EINVAL);
1073 if (msg.msg_control && msg.msg_controllen < 0)
1074 return (EINVAL);
1076 ufromlenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
1077 msg_namelen));
1078 ucontrollenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
1079 msg_controllen));
1080 uflagsp = (int *)((caddr_t)uap->msg + offsetof(struct msghdr,
1081 msg_flags));
1084 * Populate auio.
1086 error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
1087 &auio.uio_resid);
1088 if (error)
1089 return (error);
1090 auio.uio_iov = iov;
1091 auio.uio_iovcnt = msg.msg_iovlen;
1092 auio.uio_offset = 0;
1093 auio.uio_segflg = UIO_USERSPACE;
1094 auio.uio_rw = UIO_READ;
1095 auio.uio_td = td;
1097 flags = uap->flags;
1099 error = kern_recvmsg(uap->s,
1100 (msg.msg_name ? &sa : NULL), &auio,
1101 (msg.msg_control ? &control : NULL), &flags,
1102 &uap->sysmsg_szresult);
1105 * Conditionally copyout the name and populate the namelen field.
1107 if (error == 0 && msg.msg_name) {
1108 /* note: sa may still be NULL */
1109 if (sa != NULL) {
1110 fromlen = MIN(msg.msg_namelen, sa->sa_len);
1111 error = copyout(sa, msg.msg_name, fromlen);
1112 } else {
1113 fromlen = 0;
1115 if (error == 0)
1116 error = copyout(&fromlen, ufromlenp,
1117 sizeof(*ufromlenp));
1121 * Copyout msg.msg_control and msg.msg_controllen.
1123 if (error == 0 && msg.msg_control) {
1124 len = msg.msg_controllen;
1125 m = control;
1126 ctlbuf = (caddr_t)msg.msg_control;
1128 while(m && len > 0) {
1129 unsigned int tocopy;
1131 if (len >= m->m_len) {
1132 tocopy = m->m_len;
1133 } else {
1134 msg.msg_flags |= MSG_CTRUNC;
1135 tocopy = len;
1138 error = copyout(mtod(m, caddr_t), ctlbuf, tocopy);
1139 if (error)
1140 goto cleanup;
1142 ctlbuf += tocopy;
1143 len -= tocopy;
1144 m = m->m_next;
1146 controllen = ctlbuf - (caddr_t)msg.msg_control;
1147 error = copyout(&controllen, ucontrollenp,
1148 sizeof(*ucontrollenp));
1151 if (error == 0)
1152 error = copyout(&flags, uflagsp, sizeof(*uflagsp));
1154 cleanup:
1155 if (sa)
1156 kfree(sa, M_SONAME);
1157 iovec_free(&iov, aiov);
1158 if (control)
1159 m_freem(control);
1160 return (error);
1164 * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1165 * in kernel pointer instead of a userland pointer. This allows us
1166 * to manipulate socket options in the emulation code.
1169 kern_setsockopt(int s, struct sockopt *sopt)
1171 struct thread *td = curthread;
1172 struct proc *p = td->td_proc;
1173 struct file *fp;
1174 int error;
1176 if (sopt->sopt_val == NULL && sopt->sopt_valsize != 0)
1177 return (EFAULT);
1178 if (sopt->sopt_val != NULL && sopt->sopt_valsize == 0)
1179 return (EINVAL);
1180 if (sopt->sopt_valsize > SOMAXOPT_SIZE) /* unsigned */
1181 return (EINVAL);
1183 error = holdsock(p->p_fd, s, &fp);
1184 if (error)
1185 return (error);
1187 error = sosetopt((struct socket *)fp->f_data, sopt);
1188 fdrop(fp);
1189 return (error);
1193 * setsockopt_args(int s, int level, int name, caddr_t val, int valsize)
1195 * MPALMOSTSAFE
1198 sys_setsockopt(struct setsockopt_args *uap)
1200 struct thread *td = curthread;
1201 struct sockopt sopt;
1202 int error;
1204 sopt.sopt_level = uap->level;
1205 sopt.sopt_name = uap->name;
1206 sopt.sopt_valsize = uap->valsize;
1207 sopt.sopt_td = td;
1208 sopt.sopt_val = NULL;
1210 if (sopt.sopt_valsize > SOMAXOPT_SIZE) /* unsigned */
1211 return (EINVAL);
1212 if (uap->val) {
1213 sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP, M_WAITOK);
1214 error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize);
1215 if (error)
1216 goto out;
1219 error = kern_setsockopt(uap->s, &sopt);
1220 out:
1221 if (uap->val)
1222 kfree(sopt.sopt_val, M_TEMP);
1223 return(error);
1227 * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1228 * in kernel pointer instead of a userland pointer. This allows us
1229 * to manipulate socket options in the emulation code.
1232 kern_getsockopt(int s, struct sockopt *sopt)
1234 struct thread *td = curthread;
1235 struct proc *p = td->td_proc;
1236 struct file *fp;
1237 int error;
1239 if (sopt->sopt_val == NULL && sopt->sopt_valsize != 0)
1240 return (EFAULT);
1241 if (sopt->sopt_val != NULL && sopt->sopt_valsize == 0)
1242 return (EINVAL);
1243 if (sopt->sopt_valsize > SOMAXOPT_SIZE) /* unsigned */
1244 return (EINVAL);
1246 error = holdsock(p->p_fd, s, &fp);
1247 if (error)
1248 return (error);
1250 error = sogetopt((struct socket *)fp->f_data, sopt);
1251 fdrop(fp);
1252 return (error);
1256 * getsockopt_args(int s, int level, int name, caddr_t val, int *avalsize)
1258 * MPALMOSTSAFE
1261 sys_getsockopt(struct getsockopt_args *uap)
1263 struct thread *td = curthread;
1264 struct sockopt sopt;
1265 int error, valsize;
1267 if (uap->val) {
1268 error = copyin(uap->avalsize, &valsize, sizeof(valsize));
1269 if (error)
1270 return (error);
1271 } else {
1272 valsize = 0;
1275 sopt.sopt_level = uap->level;
1276 sopt.sopt_name = uap->name;
1277 sopt.sopt_valsize = valsize;
1278 sopt.sopt_td = td;
1279 sopt.sopt_val = NULL;
1281 if (sopt.sopt_valsize > SOMAXOPT_SIZE) /* unsigned */
1282 return (EINVAL);
1283 if (uap->val) {
1284 sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP, M_WAITOK);
1285 error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize);
1286 if (error)
1287 goto out;
1290 error = kern_getsockopt(uap->s, &sopt);
1291 if (error)
1292 goto out;
1293 valsize = sopt.sopt_valsize;
1294 error = copyout(&valsize, uap->avalsize, sizeof(valsize));
1295 if (error)
1296 goto out;
1297 if (uap->val)
1298 error = copyout(sopt.sopt_val, uap->val, sopt.sopt_valsize);
1299 out:
1300 if (uap->val)
1301 kfree(sopt.sopt_val, M_TEMP);
1302 return (error);
1306 * The second argument to kern_getsockname() is a handle to a struct sockaddr.
1307 * This allows kern_getsockname() to return a pointer to an allocated struct
1308 * sockaddr which must be freed later with FREE(). The caller must
1309 * initialize *name to NULL.
1312 kern_getsockname(int s, struct sockaddr **name, int *namelen)
1314 struct thread *td = curthread;
1315 struct proc *p = td->td_proc;
1316 struct file *fp;
1317 struct socket *so;
1318 struct sockaddr *sa = NULL;
1319 int error;
1321 error = holdsock(p->p_fd, s, &fp);
1322 if (error)
1323 return (error);
1324 if (*namelen < 0) {
1325 fdrop(fp);
1326 return (EINVAL);
1328 so = (struct socket *)fp->f_data;
1329 error = so_pru_sockaddr(so, &sa);
1330 if (error == 0) {
1331 if (sa == NULL) {
1332 *namelen = 0;
1333 } else {
1334 *namelen = MIN(*namelen, sa->sa_len);
1335 *name = sa;
1339 fdrop(fp);
1340 return (error);
1344 * getsockname_args(int fdes, caddr_t asa, int *alen)
1346 * Get socket name.
1348 * MPALMOSTSAFE
1351 sys_getsockname(struct getsockname_args *uap)
1353 struct sockaddr *sa = NULL;
1354 int error, sa_len;
1356 error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1357 if (error)
1358 return (error);
1360 error = kern_getsockname(uap->fdes, &sa, &sa_len);
1362 if (error == 0)
1363 error = copyout(sa, uap->asa, sa_len);
1364 if (error == 0)
1365 error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1366 if (sa)
1367 kfree(sa, M_SONAME);
1368 return (error);
1372 * The second argument to kern_getpeername() is a handle to a struct sockaddr.
1373 * This allows kern_getpeername() to return a pointer to an allocated struct
1374 * sockaddr which must be freed later with FREE(). The caller must
1375 * initialize *name to NULL.
1378 kern_getpeername(int s, struct sockaddr **name, int *namelen)
1380 struct thread *td = curthread;
1381 struct proc *p = td->td_proc;
1382 struct file *fp;
1383 struct socket *so;
1384 struct sockaddr *sa = NULL;
1385 int error;
1387 error = holdsock(p->p_fd, s, &fp);
1388 if (error)
1389 return (error);
1390 if (*namelen < 0) {
1391 fdrop(fp);
1392 return (EINVAL);
1394 so = (struct socket *)fp->f_data;
1395 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1396 fdrop(fp);
1397 return (ENOTCONN);
1399 error = so_pru_peeraddr(so, &sa);
1400 if (error == 0) {
1401 if (sa == NULL) {
1402 *namelen = 0;
1403 } else {
1404 *namelen = MIN(*namelen, sa->sa_len);
1405 *name = sa;
1409 fdrop(fp);
1410 return (error);
1414 * getpeername_args(int fdes, caddr_t asa, int *alen)
1416 * Get name of peer for connected socket.
1418 * MPALMOSTSAFE
1421 sys_getpeername(struct getpeername_args *uap)
1423 struct sockaddr *sa = NULL;
1424 int error, sa_len;
1426 error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1427 if (error)
1428 return (error);
1430 error = kern_getpeername(uap->fdes, &sa, &sa_len);
1432 if (error == 0)
1433 error = copyout(sa, uap->asa, sa_len);
1434 if (error == 0)
1435 error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1436 if (sa)
1437 kfree(sa, M_SONAME);
1438 return (error);
1442 getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len)
1444 struct sockaddr *sa;
1445 int error;
1447 *namp = NULL;
1448 if (len > SOCK_MAXADDRLEN)
1449 return ENAMETOOLONG;
1450 if (len < offsetof(struct sockaddr, sa_data[0]))
1451 return EDOM;
1452 sa = kmalloc(len, M_SONAME, M_WAITOK);
1453 error = copyin(uaddr, sa, len);
1454 if (error) {
1455 kfree(sa, M_SONAME);
1456 } else {
1457 #if BYTE_ORDER != BIG_ENDIAN
1459 * The bind(), connect(), and sendto() syscalls were not
1460 * versioned for COMPAT_43. Thus, this check must stay.
1462 if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1463 sa->sa_family = sa->sa_len;
1464 #endif
1465 sa->sa_len = len;
1466 *namp = sa;
1468 return error;
1472 * Detach a mapped page and release resources back to the system.
1473 * We must release our wiring and if the object is ripped out
1474 * from under the vm_page we become responsible for freeing the
1475 * page.
1477 * MPSAFE
1479 static void
1480 sf_buf_mfree(void *arg)
1482 struct sf_buf *sf = arg;
1483 vm_page_t m;
1485 m = sf_buf_page(sf);
1486 if (sf_buf_free(sf)) {
1487 /* sf invalid now */
1489 vm_page_busy_wait(m, FALSE, "sockpgf");
1490 vm_page_wakeup(m);
1492 vm_page_unhold(m);
1493 #if 0
1494 if (m->object == NULL &&
1495 m->wire_count == 0 &&
1496 (m->flags & PG_NEED_COMMIT) == 0) {
1497 vm_page_free(m);
1498 } else {
1499 vm_page_wakeup(m);
1501 #endif
1506 * sendfile(2).
1507 * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1508 * struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1510 * Send a file specified by 'fd' and starting at 'offset' to a socket
1511 * specified by 's'. Send only 'nbytes' of the file or until EOF if
1512 * nbytes == 0. Optionally add a header and/or trailer to the socket
1513 * output. If specified, write the total number of bytes sent into *sbytes.
1515 * In FreeBSD kern/uipc_syscalls.c,v 1.103, a bug was fixed that caused
1516 * the headers to count against the remaining bytes to be sent from
1517 * the file descriptor. We may wish to implement a compatibility syscall
1518 * in the future.
1520 * MPALMOSTSAFE
1523 sys_sendfile(struct sendfile_args *uap)
1525 struct thread *td = curthread;
1526 struct proc *p = td->td_proc;
1527 struct file *fp;
1528 struct vnode *vp = NULL;
1529 struct sf_hdtr hdtr;
1530 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
1531 struct uio auio;
1532 struct mbuf *mheader = NULL;
1533 size_t hbytes = 0;
1534 size_t tbytes;
1535 off_t hdtr_size = 0;
1536 off_t sbytes;
1537 int error;
1539 KKASSERT(p);
1542 * Do argument checking. Must be a regular file in, stream
1543 * type and connected socket out, positive offset.
1545 fp = holdfp(p->p_fd, uap->fd, FREAD);
1546 if (fp == NULL) {
1547 return (EBADF);
1549 if (fp->f_type != DTYPE_VNODE) {
1550 fdrop(fp);
1551 return (EINVAL);
1553 vp = (struct vnode *)fp->f_data;
1554 vref(vp);
1555 fdrop(fp);
1558 * If specified, get the pointer to the sf_hdtr struct for
1559 * any headers/trailers.
1561 if (uap->hdtr) {
1562 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1563 if (error)
1564 goto done;
1566 * Send any headers.
1568 if (hdtr.headers) {
1569 error = iovec_copyin(hdtr.headers, &iov, aiov,
1570 hdtr.hdr_cnt, &hbytes);
1571 if (error)
1572 goto done;
1573 auio.uio_iov = iov;
1574 auio.uio_iovcnt = hdtr.hdr_cnt;
1575 auio.uio_offset = 0;
1576 auio.uio_segflg = UIO_USERSPACE;
1577 auio.uio_rw = UIO_WRITE;
1578 auio.uio_td = td;
1579 auio.uio_resid = hbytes;
1581 mheader = m_uiomove(&auio);
1583 iovec_free(&iov, aiov);
1584 if (mheader == NULL)
1585 goto done;
1589 error = kern_sendfile(vp, uap->s, uap->offset, uap->nbytes, mheader,
1590 &sbytes, uap->flags);
1591 if (error)
1592 goto done;
1595 * Send trailers. Wimp out and use writev(2).
1597 if (uap->hdtr != NULL && hdtr.trailers != NULL) {
1598 error = iovec_copyin(hdtr.trailers, &iov, aiov,
1599 hdtr.trl_cnt, &auio.uio_resid);
1600 if (error)
1601 goto done;
1602 auio.uio_iov = iov;
1603 auio.uio_iovcnt = hdtr.trl_cnt;
1604 auio.uio_offset = 0;
1605 auio.uio_segflg = UIO_USERSPACE;
1606 auio.uio_rw = UIO_WRITE;
1607 auio.uio_td = td;
1609 tbytes = 0; /* avoid gcc warnings */
1610 error = kern_sendmsg(uap->s, NULL, &auio, NULL, 0, &tbytes);
1612 iovec_free(&iov, aiov);
1613 if (error)
1614 goto done;
1615 hdtr_size += tbytes; /* trailer bytes successfully sent */
1618 done:
1619 if (vp)
1620 vrele(vp);
1621 if (uap->sbytes != NULL) {
1622 sbytes += hdtr_size;
1623 copyout(&sbytes, uap->sbytes, sizeof(off_t));
1625 return (error);
1629 kern_sendfile(struct vnode *vp, int sfd, off_t offset, size_t nbytes,
1630 struct mbuf *mheader, off_t *sbytes, int flags)
1632 struct thread *td = curthread;
1633 struct proc *p = td->td_proc;
1634 struct vm_object *obj;
1635 struct socket *so;
1636 struct file *fp;
1637 struct mbuf *m, *mp;
1638 struct sf_buf *sf;
1639 struct vm_page *pg;
1640 off_t off, xfsize, xbytes;
1641 off_t hbytes = 0;
1642 int error = 0;
1644 if (vp->v_type != VREG) {
1645 error = EINVAL;
1646 goto done0;
1648 if ((obj = vp->v_object) == NULL) {
1649 error = EINVAL;
1650 goto done0;
1652 error = holdsock(p->p_fd, sfd, &fp);
1653 if (error)
1654 goto done0;
1655 so = (struct socket *)fp->f_data;
1656 if (so->so_type != SOCK_STREAM) {
1657 error = EINVAL;
1658 goto done;
1660 if ((so->so_state & SS_ISCONNECTED) == 0) {
1661 error = ENOTCONN;
1662 goto done;
1664 if (offset < 0) {
1665 error = EINVAL;
1666 goto done;
1670 * preallocation is required for asynchronous passing of mbufs,
1671 * otherwise we can wind up building up an infinite number of
1672 * mbufs during the asynchronous latency.
1674 if ((so->so_snd.ssb_flags & (SSB_PREALLOC | SSB_STOPSUPP)) == 0) {
1675 error = EINVAL;
1676 goto done;
1679 *sbytes = 0;
1680 xbytes = 0;
1682 * Protect against multiple writers to the socket.
1684 ssb_lock(&so->so_snd, M_WAITOK);
1687 * Loop through the pages in the file, starting with the requested
1688 * offset. Get a file page (do I/O if necessary), map the file page
1689 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
1690 * it on the socket.
1692 for (off = offset; ; off += xfsize, *sbytes += xfsize + hbytes, xbytes += xfsize) {
1693 vm_pindex_t pindex;
1694 vm_offset_t pgoff;
1695 long space;
1697 pindex = OFF_TO_IDX(off);
1698 retry_lookup:
1700 * Calculate the amount to transfer. Not to exceed a page,
1701 * the EOF, or the passed in nbytes.
1703 xfsize = vp->v_filesize - off;
1704 if (xfsize > PAGE_SIZE)
1705 xfsize = PAGE_SIZE;
1706 pgoff = (vm_offset_t)(off & PAGE_MASK);
1707 if (PAGE_SIZE - pgoff < xfsize)
1708 xfsize = PAGE_SIZE - pgoff;
1709 if (nbytes && xfsize > (nbytes - xbytes))
1710 xfsize = nbytes - xbytes;
1711 if (xfsize <= 0)
1712 break;
1714 * Optimize the non-blocking case by looking at the socket space
1715 * before going to the extra work of constituting the sf_buf.
1717 if (so->so_snd.ssb_flags & SSB_PREALLOC)
1718 space = ssb_space_prealloc(&so->so_snd);
1719 else
1720 space = ssb_space(&so->so_snd);
1722 if ((fp->f_flag & FNONBLOCK) && space <= 0) {
1723 if (so->so_state & SS_CANTSENDMORE)
1724 error = EPIPE;
1725 else
1726 error = EAGAIN;
1727 ssb_unlock(&so->so_snd);
1728 goto done;
1731 * Attempt to look up the page.
1733 * Allocate if not found, wait and loop if busy, then hold the page.
1734 * We hold rather than wire the page because we do not want to prevent
1735 * filesystem truncation operations from occuring on the file. This
1736 * can happen even under normal operation if the file being sent is
1737 * remove()d after the sendfile() call completes, because the socket buffer
1738 * may still be draining. tmpfs will crash if we try to use wire.
1740 vm_object_hold(obj);
1741 pg = vm_page_lookup_busy_try(obj, pindex, TRUE, &error);
1742 if (error) {
1743 vm_page_sleep_busy(pg, TRUE, "sfpbsy");
1744 vm_object_drop(obj);
1745 goto retry_lookup;
1747 if (pg == NULL) {
1748 pg = vm_page_alloc(obj, pindex, VM_ALLOC_NORMAL |
1749 VM_ALLOC_NULL_OK);
1750 if (pg == NULL) {
1751 vm_wait(0);
1752 vm_object_drop(obj);
1753 goto retry_lookup;
1756 vm_page_hold(pg);
1757 vm_object_drop(obj);
1760 * If page is not valid for what we need, initiate I/O
1763 if (!pg->valid || !vm_page_is_valid(pg, pgoff, xfsize)) {
1764 struct uio auio;
1765 struct iovec aiov;
1766 int bsize;
1769 * Ensure that our page is still around when the I/O
1770 * completes.
1772 * Ensure that our page is not modified while part of
1773 * a mbuf as this could mess up tcp checksums, DMA,
1774 * etc (XXX NEEDS WORK). The softbusy is supposed to
1775 * help here but it actually doesn't.
1777 * XXX THIS HAS MULTIPLE PROBLEMS. The underlying
1778 * VM pages are not protected by the soft-busy
1779 * unless we vm_page_protect... READ them, and
1780 * they STILL aren't protected against
1781 * modification via the buffer cache (VOP_WRITE).
1783 * Fixing the second issue is particularly
1784 * difficult.
1786 * XXX We also can't soft-busy anyway because it can
1787 * deadlock against the syncer doing a vfs_msync(),
1788 * vfs_msync->vmntvnodesca->vfs_msync_scan2->
1789 * vm_object_page_clean->(scan)-> ... page
1790 * busy-wait.
1792 /*vm_page_io_start(pg);*/
1793 vm_page_wakeup(pg);
1796 * Get the page from backing store.
1798 bsize = vp->v_mount->mnt_stat.f_iosize;
1799 auio.uio_iov = &aiov;
1800 auio.uio_iovcnt = 1;
1801 aiov.iov_base = 0;
1802 aiov.iov_len = MAXBSIZE;
1803 auio.uio_resid = MAXBSIZE;
1804 auio.uio_offset = trunc_page(off);
1805 auio.uio_segflg = UIO_NOCOPY;
1806 auio.uio_rw = UIO_READ;
1807 auio.uio_td = td;
1808 vn_lock(vp, LK_SHARED | LK_RETRY);
1809 error = VOP_READ(vp, &auio,
1810 IO_VMIO | ((MAXBSIZE / bsize) << 16),
1811 td->td_ucred);
1812 vn_unlock(vp);
1813 vm_page_busy_wait(pg, FALSE, "sockpg");
1814 /*vm_page_io_finish(pg);*/
1815 if (error) {
1816 vm_page_wakeup(pg);
1817 vm_page_unhold(pg);
1818 /* vm_page_try_to_free(pg); */
1819 ssb_unlock(&so->so_snd);
1820 goto done;
1826 * Get a sendfile buf. We usually wait as long as necessary,
1827 * but this wait can be interrupted.
1829 if ((sf = sf_buf_alloc(pg)) == NULL) {
1830 vm_page_wakeup(pg);
1831 vm_page_unhold(pg);
1832 /* vm_page_try_to_free(pg); */
1833 ssb_unlock(&so->so_snd);
1834 error = EINTR;
1835 goto done;
1839 * Get an mbuf header and set it up as having external storage.
1841 MGETHDR(m, M_WAITOK, MT_DATA);
1842 if (m == NULL) {
1843 error = ENOBUFS;
1844 vm_page_wakeup(pg);
1845 vm_page_unhold(pg);
1846 /* vm_page_try_to_free(pg); */
1847 sf_buf_free(sf);
1848 ssb_unlock(&so->so_snd);
1849 goto done;
1852 vm_page_wakeup(pg);
1854 m->m_ext.ext_free = sf_buf_mfree;
1855 m->m_ext.ext_ref = sf_buf_ref;
1856 m->m_ext.ext_arg = sf;
1857 m->m_ext.ext_buf = (void *)sf_buf_kva(sf);
1858 m->m_ext.ext_size = PAGE_SIZE;
1859 m->m_data = (char *)sf_buf_kva(sf) + pgoff;
1860 m->m_flags |= M_EXT;
1861 m->m_pkthdr.len = m->m_len = xfsize;
1862 KKASSERT((m->m_flags & (M_EXT_CLUSTER)) == 0);
1864 if (mheader != NULL) {
1865 hbytes = mheader->m_pkthdr.len;
1866 mheader->m_pkthdr.len += m->m_pkthdr.len;
1867 m_cat(mheader, m);
1868 m = mheader;
1869 mheader = NULL;
1870 } else
1871 hbytes = 0;
1874 * Add the buffer to the socket buffer chain.
1876 crit_enter();
1877 retry_space:
1879 * Make sure that the socket is still able to take more data.
1880 * CANTSENDMORE being true usually means that the connection
1881 * was closed. so_error is true when an error was sensed after
1882 * a previous send.
1883 * The state is checked after the page mapping and buffer
1884 * allocation above since those operations may block and make
1885 * any socket checks stale. From this point forward, nothing
1886 * blocks before the pru_send (or more accurately, any blocking
1887 * results in a loop back to here to re-check).
1889 if ((so->so_state & SS_CANTSENDMORE) || so->so_error) {
1890 if (so->so_state & SS_CANTSENDMORE) {
1891 error = EPIPE;
1892 } else {
1893 error = so->so_error;
1894 so->so_error = 0;
1896 m_freem(m);
1897 ssb_unlock(&so->so_snd);
1898 crit_exit();
1899 goto done;
1902 * Wait for socket space to become available. We do this just
1903 * after checking the connection state above in order to avoid
1904 * a race condition with ssb_wait().
1906 if (so->so_snd.ssb_flags & SSB_PREALLOC)
1907 space = ssb_space_prealloc(&so->so_snd);
1908 else
1909 space = ssb_space(&so->so_snd);
1911 if (space < m->m_pkthdr.len && space < so->so_snd.ssb_lowat) {
1912 if (fp->f_flag & FNONBLOCK) {
1913 m_freem(m);
1914 ssb_unlock(&so->so_snd);
1915 crit_exit();
1916 error = EAGAIN;
1917 goto done;
1919 error = ssb_wait(&so->so_snd);
1921 * An error from ssb_wait usually indicates that we've
1922 * been interrupted by a signal. If we've sent anything
1923 * then return bytes sent, otherwise return the error.
1925 if (error) {
1926 m_freem(m);
1927 ssb_unlock(&so->so_snd);
1928 crit_exit();
1929 goto done;
1931 goto retry_space;
1934 if (so->so_snd.ssb_flags & SSB_PREALLOC) {
1935 for (mp = m; mp != NULL; mp = mp->m_next)
1936 ssb_preallocstream(&so->so_snd, mp);
1938 if (use_sendfile_async)
1939 error = so_pru_senda(so, 0, m, NULL, NULL, td);
1940 else
1941 error = so_pru_send(so, 0, m, NULL, NULL, td);
1943 crit_exit();
1944 if (error) {
1945 ssb_unlock(&so->so_snd);
1946 goto done;
1949 if (mheader != NULL) {
1950 *sbytes += mheader->m_pkthdr.len;
1952 if (so->so_snd.ssb_flags & SSB_PREALLOC) {
1953 for (mp = mheader; mp != NULL; mp = mp->m_next)
1954 ssb_preallocstream(&so->so_snd, mp);
1956 if (use_sendfile_async)
1957 error = so_pru_senda(so, 0, mheader, NULL, NULL, td);
1958 else
1959 error = so_pru_send(so, 0, mheader, NULL, NULL, td);
1961 mheader = NULL;
1963 ssb_unlock(&so->so_snd);
1965 done:
1966 fdrop(fp);
1967 done0:
1968 if (mheader != NULL)
1969 m_freem(mheader);
1970 return (error);