MFC rev 1.89:
[dragonfly.git] / sys / kern / uipc_syscalls.c
blobce0d79381462d847728877633220c8de2d85fe74
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
5 * sendfile(2) and related extensions:
6 * Copyright (c) 1998, David Greenman. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
36 * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94
37 * $FreeBSD: src/sys/kern/uipc_syscalls.c,v 1.65.2.17 2003/04/04 17:11:16 tegge Exp $
38 * $DragonFly: src/sys/kern/uipc_syscalls.c,v 1.88.2.1 2008/07/26 15:42:06 sephe Exp $
41 #include "opt_ktrace.h"
42 #include "opt_sctp.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/sysproto.h>
48 #include <sys/malloc.h>
49 #include <sys/filedesc.h>
50 #include <sys/event.h>
51 #include <sys/proc.h>
52 #include <sys/fcntl.h>
53 #include <sys/file.h>
54 #include <sys/filio.h>
55 #include <sys/kern_syscall.h>
56 #include <sys/mbuf.h>
57 #include <sys/protosw.h>
58 #include <sys/sfbuf.h>
59 #include <sys/socket.h>
60 #include <sys/socketvar.h>
61 #include <sys/socketops.h>
62 #include <sys/uio.h>
63 #include <sys/vnode.h>
64 #include <sys/lock.h>
65 #include <sys/mount.h>
66 #ifdef KTRACE
67 #include <sys/ktrace.h>
68 #endif
69 #include <vm/vm.h>
70 #include <vm/vm_object.h>
71 #include <vm/vm_page.h>
72 #include <vm/vm_pageout.h>
73 #include <vm/vm_kern.h>
74 #include <vm/vm_extern.h>
75 #include <sys/file2.h>
76 #include <sys/signalvar.h>
77 #include <sys/serialize.h>
79 #include <sys/thread2.h>
80 #include <sys/msgport2.h>
81 #include <sys/socketvar2.h>
82 #include <net/netmsg2.h>
84 #ifdef SCTP
85 #include <netinet/sctp_peeloff.h>
86 #endif /* SCTP */
88 struct sfbuf_mref {
89 struct sf_buf *sf;
90 int mref_count;
91 struct lwkt_serialize serializer;
94 static MALLOC_DEFINE(M_SENDFILE, "sendfile", "sendfile sfbuf ref structures");
97 * System call interface to the socket abstraction.
100 extern struct fileops socketops;
103 * socket_args(int domain, int type, int protocol)
106 kern_socket(int domain, int type, int protocol, int *res)
108 struct thread *td = curthread;
109 struct proc *p = td->td_proc;
110 struct socket *so;
111 struct file *fp;
112 int fd, error;
114 KKASSERT(p);
116 error = falloc(p, &fp, &fd);
117 if (error)
118 return (error);
119 error = socreate(domain, &so, type, protocol, td);
120 if (error) {
121 fsetfd(p, NULL, fd);
122 } else {
123 fp->f_type = DTYPE_SOCKET;
124 fp->f_flag = FREAD | FWRITE;
125 fp->f_ops = &socketops;
126 fp->f_data = so;
127 *res = fd;
128 fsetfd(p, fp, fd);
130 fdrop(fp);
131 return (error);
135 sys_socket(struct socket_args *uap)
137 int error;
139 error = kern_socket(uap->domain, uap->type, uap->protocol,
140 &uap->sysmsg_result);
142 return (error);
146 kern_bind(int s, struct sockaddr *sa)
148 struct thread *td = curthread;
149 struct proc *p = td->td_proc;
150 struct file *fp;
151 int error;
153 KKASSERT(p);
154 error = holdsock(p->p_fd, s, &fp);
155 if (error)
156 return (error);
157 error = sobind((struct socket *)fp->f_data, sa, td);
158 fdrop(fp);
159 return (error);
163 * bind_args(int s, caddr_t name, int namelen)
166 sys_bind(struct bind_args *uap)
168 struct sockaddr *sa;
169 int error;
171 error = getsockaddr(&sa, uap->name, uap->namelen);
172 if (error)
173 return (error);
174 error = kern_bind(uap->s, sa);
175 FREE(sa, M_SONAME);
177 return (error);
181 kern_listen(int s, int backlog)
183 struct thread *td = curthread;
184 struct proc *p = td->td_proc;
185 struct file *fp;
186 int error;
188 KKASSERT(p);
189 error = holdsock(p->p_fd, s, &fp);
190 if (error)
191 return (error);
192 error = solisten((struct socket *)fp->f_data, backlog, td);
193 fdrop(fp);
194 return(error);
198 * listen_args(int s, int backlog)
201 sys_listen(struct listen_args *uap)
203 int error;
205 error = kern_listen(uap->s, uap->backlog);
206 return (error);
210 * Returns the accepted socket as well.
212 static boolean_t
213 soaccept_predicate(struct netmsg *msg0)
215 struct netmsg_so_notify *msg = (struct netmsg_so_notify *)msg0;
216 struct socket *head = msg->nm_so;
218 if (head->so_error != 0) {
219 msg->nm_netmsg.nm_lmsg.ms_error = head->so_error;
220 return (TRUE);
222 if (!TAILQ_EMPTY(&head->so_comp)) {
223 /* Abuse nm_so field as copy in/copy out parameter. XXX JH */
224 msg->nm_so = TAILQ_FIRST(&head->so_comp);
225 TAILQ_REMOVE(&head->so_comp, msg->nm_so, so_list);
226 head->so_qlen--;
228 msg->nm_netmsg.nm_lmsg.ms_error = 0;
229 return (TRUE);
231 if (head->so_state & SS_CANTRCVMORE) {
232 msg->nm_netmsg.nm_lmsg.ms_error = ECONNABORTED;
233 return (TRUE);
235 if (msg->nm_fflags & FNONBLOCK) {
236 msg->nm_netmsg.nm_lmsg.ms_error = EWOULDBLOCK;
237 return (TRUE);
240 return (FALSE);
244 * The second argument to kern_accept() is a handle to a struct sockaddr.
245 * This allows kern_accept() to return a pointer to an allocated struct
246 * sockaddr which must be freed later with FREE(). The caller must
247 * initialize *name to NULL.
250 kern_accept(int s, int fflags, struct sockaddr **name, int *namelen, int *res)
252 struct thread *td = curthread;
253 struct proc *p = td->td_proc;
254 struct file *lfp = NULL;
255 struct file *nfp = NULL;
256 struct sockaddr *sa;
257 struct socket *head, *so;
258 struct netmsg_so_notify msg;
259 lwkt_port_t port;
260 int fd;
261 u_int fflag; /* type must match fp->f_flag */
262 int error, tmp;
264 *res = -1;
265 if (name && namelen && *namelen < 0)
266 return (EINVAL);
268 error = holdsock(p->p_fd, s, &lfp);
269 if (error)
270 return (error);
272 error = falloc(p, &nfp, &fd);
273 if (error) { /* Probably ran out of file descriptors. */
274 fdrop(lfp);
275 return (error);
277 head = (struct socket *)lfp->f_data;
278 if ((head->so_options & SO_ACCEPTCONN) == 0) {
279 error = EINVAL;
280 goto done;
283 if (fflags & O_FBLOCKING)
284 fflags |= lfp->f_flag & ~FNONBLOCK;
285 else if (fflags & O_FNONBLOCKING)
286 fflags |= lfp->f_flag | FNONBLOCK;
287 else
288 fflags = lfp->f_flag;
290 /* optimize for uniprocessor case later XXX JH */
291 port = head->so_proto->pr_mport(head, NULL, NULL, PRU_PRED);
292 netmsg_init_abortable(&msg.nm_netmsg, &curthread->td_msgport,
294 netmsg_so_notify,
295 netmsg_so_notify_doabort);
296 msg.nm_predicate = soaccept_predicate;
297 msg.nm_fflags = fflags;
298 msg.nm_so = head;
299 msg.nm_etype = NM_REVENT;
300 error = lwkt_domsg(port, &msg.nm_netmsg.nm_lmsg, PCATCH);
301 if (error)
302 goto done;
305 * At this point we have the connection that's ready to be accepted.
307 so = msg.nm_so;
309 fflag = lfp->f_flag;
311 /* connection has been removed from the listen queue */
312 KNOTE(&head->so_rcv.ssb_sel.si_note, 0);
314 so->so_state &= ~SS_COMP;
315 so->so_head = NULL;
316 if (head->so_sigio != NULL)
317 fsetown(fgetown(head->so_sigio), &so->so_sigio);
319 nfp->f_type = DTYPE_SOCKET;
320 nfp->f_flag = fflag;
321 nfp->f_ops = &socketops;
322 nfp->f_data = so;
323 /* Sync socket nonblocking/async state with file flags */
324 tmp = fflag & FNONBLOCK;
325 (void) fo_ioctl(nfp, FIONBIO, (caddr_t)&tmp, p->p_ucred);
326 tmp = fflag & FASYNC;
327 (void) fo_ioctl(nfp, FIOASYNC, (caddr_t)&tmp, p->p_ucred);
329 sa = NULL;
330 error = soaccept(so, &sa);
333 * Set the returned name and namelen as applicable. Set the returned
334 * namelen to 0 for older code which might ignore the return value
335 * from accept.
337 if (error == 0) {
338 if (sa && name && namelen) {
339 if (*namelen > sa->sa_len)
340 *namelen = sa->sa_len;
341 *name = sa;
342 } else {
343 if (sa)
344 FREE(sa, M_SONAME);
348 done:
350 * If an error occured clear the reserved descriptor, else associate
351 * nfp with it.
353 * Note that *res is normally ignored if an error is returned but
354 * a syscall message will still have access to the result code.
356 if (error) {
357 fsetfd(p, NULL, fd);
358 } else {
359 *res = fd;
360 fsetfd(p, nfp, fd);
362 fdrop(nfp);
363 fdrop(lfp);
364 return (error);
368 * accept(int s, caddr_t name, int *anamelen)
371 sys_accept(struct accept_args *uap)
373 struct sockaddr *sa = NULL;
374 int sa_len;
375 int error;
377 if (uap->name) {
378 error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
379 if (error)
380 return (error);
382 error = kern_accept(uap->s, 0, &sa, &sa_len, &uap->sysmsg_result);
384 if (error == 0)
385 error = copyout(sa, uap->name, sa_len);
386 if (error == 0) {
387 error = copyout(&sa_len, uap->anamelen,
388 sizeof(*uap->anamelen));
390 if (sa)
391 FREE(sa, M_SONAME);
392 } else {
393 error = kern_accept(uap->s, 0, NULL, 0, &uap->sysmsg_result);
395 return (error);
399 * extaccept(int s, int fflags, caddr_t name, int *anamelen)
402 sys_extaccept(struct extaccept_args *uap)
404 struct sockaddr *sa = NULL;
405 int sa_len;
406 int error;
407 int fflags = uap->flags & O_FMASK;
409 if (uap->name) {
410 error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
411 if (error)
412 return (error);
414 error = kern_accept(uap->s, fflags, &sa, &sa_len, &uap->sysmsg_result);
416 if (error == 0)
417 error = copyout(sa, uap->name, sa_len);
418 if (error == 0) {
419 error = copyout(&sa_len, uap->anamelen,
420 sizeof(*uap->anamelen));
422 if (sa)
423 FREE(sa, M_SONAME);
424 } else {
425 error = kern_accept(uap->s, fflags, NULL, 0, &uap->sysmsg_result);
427 return (error);
432 * Returns TRUE if predicate satisfied.
434 static boolean_t
435 soconnected_predicate(struct netmsg *msg0)
437 struct netmsg_so_notify *msg = (struct netmsg_so_notify *)msg0;
438 struct socket *so = msg->nm_so;
440 /* check predicate */
441 if (!(so->so_state & SS_ISCONNECTING) || so->so_error != 0) {
442 msg->nm_netmsg.nm_lmsg.ms_error = so->so_error;
443 return (TRUE);
446 return (FALSE);
450 kern_connect(int s, int fflags, struct sockaddr *sa)
452 struct thread *td = curthread;
453 struct proc *p = td->td_proc;
454 struct file *fp;
455 struct socket *so;
456 int error, interrupted = 0;
458 error = holdsock(p->p_fd, s, &fp);
459 if (error)
460 return (error);
461 so = (struct socket *)fp->f_data;
463 if (fflags & O_FBLOCKING)
464 /* fflags &= ~FNONBLOCK; */;
465 else if (fflags & O_FNONBLOCKING)
466 fflags |= FNONBLOCK;
467 else
468 fflags = fp->f_flag;
470 if (so->so_state & SS_ISCONNECTING) {
471 error = EALREADY;
472 goto done;
474 error = soconnect(so, sa, td);
475 if (error)
476 goto bad;
477 if ((fflags & FNONBLOCK) && (so->so_state & SS_ISCONNECTING)) {
478 error = EINPROGRESS;
479 goto done;
481 if ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
482 struct netmsg_so_notify msg;
483 lwkt_port_t port;
485 port = so->so_proto->pr_mport(so, sa, NULL, PRU_PRED);
486 netmsg_init_abortable(&msg.nm_netmsg,
487 &curthread->td_msgport,
489 netmsg_so_notify,
490 netmsg_so_notify_doabort);
491 msg.nm_predicate = soconnected_predicate;
492 msg.nm_so = so;
493 msg.nm_etype = NM_REVENT;
494 error = lwkt_domsg(port, &msg.nm_netmsg.nm_lmsg, PCATCH);
495 if (error == EINTR || error == ERESTART)
496 interrupted = 1;
498 if (error == 0) {
499 error = so->so_error;
500 so->so_error = 0;
502 bad:
503 if (!interrupted)
504 so->so_state &= ~SS_ISCONNECTING;
505 if (error == ERESTART)
506 error = EINTR;
507 done:
508 fdrop(fp);
509 return (error);
513 * connect_args(int s, caddr_t name, int namelen)
516 sys_connect(struct connect_args *uap)
518 struct sockaddr *sa;
519 int error;
521 error = getsockaddr(&sa, uap->name, uap->namelen);
522 if (error)
523 return (error);
524 error = kern_connect(uap->s, 0, sa);
525 FREE(sa, M_SONAME);
527 return (error);
531 * connect_args(int s, int fflags, caddr_t name, int namelen)
534 sys_extconnect(struct extconnect_args *uap)
536 struct sockaddr *sa;
537 int error;
538 int fflags = uap->flags & O_FMASK;
540 error = getsockaddr(&sa, uap->name, uap->namelen);
541 if (error)
542 return (error);
543 error = kern_connect(uap->s, fflags, sa);
544 FREE(sa, M_SONAME);
546 return (error);
550 kern_socketpair(int domain, int type, int protocol, int *sv)
552 struct thread *td = curthread;
553 struct proc *p = td->td_proc;
554 struct file *fp1, *fp2;
555 struct socket *so1, *so2;
556 int fd1, fd2, error;
558 KKASSERT(p);
559 error = socreate(domain, &so1, type, protocol, td);
560 if (error)
561 return (error);
562 error = socreate(domain, &so2, type, protocol, td);
563 if (error)
564 goto free1;
565 error = falloc(p, &fp1, &fd1);
566 if (error)
567 goto free2;
568 sv[0] = fd1;
569 fp1->f_data = so1;
570 error = falloc(p, &fp2, &fd2);
571 if (error)
572 goto free3;
573 fp2->f_data = so2;
574 sv[1] = fd2;
575 error = soconnect2(so1, so2);
576 if (error)
577 goto free4;
578 if (type == SOCK_DGRAM) {
580 * Datagram socket connection is asymmetric.
582 error = soconnect2(so2, so1);
583 if (error)
584 goto free4;
586 fp1->f_type = fp2->f_type = DTYPE_SOCKET;
587 fp1->f_flag = fp2->f_flag = FREAD|FWRITE;
588 fp1->f_ops = fp2->f_ops = &socketops;
589 fsetfd(p, fp1, fd1);
590 fsetfd(p, fp2, fd2);
591 fdrop(fp1);
592 fdrop(fp2);
593 return (error);
594 free4:
595 fsetfd(p, NULL, fd2);
596 fdrop(fp2);
597 free3:
598 fsetfd(p, NULL, fd1);
599 fdrop(fp1);
600 free2:
601 (void)soclose(so2, 0);
602 free1:
603 (void)soclose(so1, 0);
604 return (error);
608 * socketpair(int domain, int type, int protocol, int *rsv)
611 sys_socketpair(struct socketpair_args *uap)
613 int error, sockv[2];
615 error = kern_socketpair(uap->domain, uap->type, uap->protocol, sockv);
617 if (error == 0)
618 error = copyout(sockv, uap->rsv, sizeof(sockv));
619 return (error);
623 kern_sendmsg(int s, struct sockaddr *sa, struct uio *auio,
624 struct mbuf *control, int flags, int *res)
626 struct thread *td = curthread;
627 struct lwp *lp = td->td_lwp;
628 struct proc *p = td->td_proc;
629 struct file *fp;
630 int len, error;
631 struct socket *so;
632 #ifdef KTRACE
633 struct iovec *ktriov = NULL;
634 struct uio ktruio;
635 #endif
637 error = holdsock(p->p_fd, s, &fp);
638 if (error)
639 return (error);
640 if (auio->uio_resid < 0) {
641 error = EINVAL;
642 goto done;
644 #ifdef KTRACE
645 if (KTRPOINT(td, KTR_GENIO)) {
646 int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
648 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
649 bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen);
650 ktruio = *auio;
652 #endif
653 len = auio->uio_resid;
654 so = (struct socket *)fp->f_data;
655 if ((flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) {
656 if (fp->f_flag & FNONBLOCK)
657 flags |= MSG_FNONBLOCKING;
659 error = so_pru_sosend(so, sa, auio, NULL, control, flags, td);
660 if (error) {
661 if (auio->uio_resid != len && (error == ERESTART ||
662 error == EINTR || error == EWOULDBLOCK))
663 error = 0;
664 if (error == EPIPE)
665 lwpsignal(p, lp, SIGPIPE);
667 #ifdef KTRACE
668 if (ktriov != NULL) {
669 if (error == 0) {
670 ktruio.uio_iov = ktriov;
671 ktruio.uio_resid = len - auio->uio_resid;
672 ktrgenio(lp, s, UIO_WRITE, &ktruio, error);
674 FREE(ktriov, M_TEMP);
676 #endif
677 if (error == 0)
678 *res = len - auio->uio_resid;
679 done:
680 fdrop(fp);
681 return (error);
685 * sendto_args(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen)
688 sys_sendto(struct sendto_args *uap)
690 struct thread *td = curthread;
691 struct uio auio;
692 struct iovec aiov;
693 struct sockaddr *sa = NULL;
694 int error;
696 if (uap->to) {
697 error = getsockaddr(&sa, uap->to, uap->tolen);
698 if (error)
699 return (error);
701 aiov.iov_base = uap->buf;
702 aiov.iov_len = uap->len;
703 auio.uio_iov = &aiov;
704 auio.uio_iovcnt = 1;
705 auio.uio_offset = 0;
706 auio.uio_resid = uap->len;
707 auio.uio_segflg = UIO_USERSPACE;
708 auio.uio_rw = UIO_WRITE;
709 auio.uio_td = td;
711 error = kern_sendmsg(uap->s, sa, &auio, NULL, uap->flags,
712 &uap->sysmsg_result);
714 if (sa)
715 FREE(sa, M_SONAME);
716 return (error);
720 * sendmsg_args(int s, caddr_t msg, int flags)
723 sys_sendmsg(struct sendmsg_args *uap)
725 struct thread *td = curthread;
726 struct msghdr msg;
727 struct uio auio;
728 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
729 struct sockaddr *sa = NULL;
730 struct mbuf *control = NULL;
731 int error;
733 error = copyin(uap->msg, (caddr_t)&msg, sizeof(msg));
734 if (error)
735 return (error);
738 * Conditionally copyin msg.msg_name.
740 if (msg.msg_name) {
741 error = getsockaddr(&sa, msg.msg_name, msg.msg_namelen);
742 if (error)
743 return (error);
747 * Populate auio.
749 error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
750 &auio.uio_resid);
751 if (error)
752 goto cleanup2;
753 auio.uio_iov = iov;
754 auio.uio_iovcnt = msg.msg_iovlen;
755 auio.uio_offset = 0;
756 auio.uio_segflg = UIO_USERSPACE;
757 auio.uio_rw = UIO_WRITE;
758 auio.uio_td = td;
761 * Conditionally copyin msg.msg_control.
763 if (msg.msg_control) {
764 if (msg.msg_controllen < sizeof(struct cmsghdr) ||
765 msg.msg_controllen > MLEN) {
766 error = EINVAL;
767 goto cleanup;
769 control = m_get(MB_WAIT, MT_CONTROL);
770 if (control == NULL) {
771 error = ENOBUFS;
772 goto cleanup;
774 control->m_len = msg.msg_controllen;
775 error = copyin(msg.msg_control, mtod(control, caddr_t),
776 msg.msg_controllen);
777 if (error) {
778 m_free(control);
779 goto cleanup;
783 error = kern_sendmsg(uap->s, sa, &auio, control, uap->flags,
784 &uap->sysmsg_result);
786 cleanup:
787 iovec_free(&iov, aiov);
788 cleanup2:
789 if (sa)
790 FREE(sa, M_SONAME);
791 return (error);
795 * kern_recvmsg() takes a handle to sa and control. If the handle is non-
796 * null, it returns a dynamically allocated struct sockaddr and an mbuf.
797 * Don't forget to FREE() and m_free() these if they are returned.
800 kern_recvmsg(int s, struct sockaddr **sa, struct uio *auio,
801 struct mbuf **control, int *flags, int *res)
803 struct thread *td = curthread;
804 struct proc *p = td->td_proc;
805 struct file *fp;
806 int len, error;
807 int lflags;
808 struct socket *so;
809 #ifdef KTRACE
810 struct iovec *ktriov = NULL;
811 struct uio ktruio;
812 #endif
814 error = holdsock(p->p_fd, s, &fp);
815 if (error)
816 return (error);
817 if (auio->uio_resid < 0) {
818 error = EINVAL;
819 goto done;
821 #ifdef KTRACE
822 if (KTRPOINT(td, KTR_GENIO)) {
823 int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
825 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
826 bcopy(auio->uio_iov, ktriov, iovlen);
827 ktruio = *auio;
829 #endif
830 len = auio->uio_resid;
831 so = (struct socket *)fp->f_data;
833 if (flags == NULL || (*flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) {
834 if (fp->f_flag & FNONBLOCK) {
835 if (flags) {
836 *flags |= MSG_FNONBLOCKING;
837 } else {
838 lflags = MSG_FNONBLOCKING;
839 flags = &lflags;
844 error = so_pru_soreceive(so, sa, auio, NULL, control, flags);
845 if (error) {
846 if (auio->uio_resid != len && (error == ERESTART ||
847 error == EINTR || error == EWOULDBLOCK))
848 error = 0;
850 #ifdef KTRACE
851 if (ktriov != NULL) {
852 if (error == 0) {
853 ktruio.uio_iov = ktriov;
854 ktruio.uio_resid = len - auio->uio_resid;
855 ktrgenio(td->td_lwp, s, UIO_READ, &ktruio, error);
857 FREE(ktriov, M_TEMP);
859 #endif
860 if (error == 0)
861 *res = len - auio->uio_resid;
862 done:
863 fdrop(fp);
864 return (error);
868 * recvfrom_args(int s, caddr_t buf, size_t len, int flags,
869 * caddr_t from, int *fromlenaddr)
872 sys_recvfrom(struct recvfrom_args *uap)
874 struct thread *td = curthread;
875 struct uio auio;
876 struct iovec aiov;
877 struct sockaddr *sa = NULL;
878 int error, fromlen;
880 if (uap->from && uap->fromlenaddr) {
881 error = copyin(uap->fromlenaddr, &fromlen, sizeof(fromlen));
882 if (error)
883 return (error);
884 if (fromlen < 0)
885 return (EINVAL);
886 } else {
887 fromlen = 0;
889 aiov.iov_base = uap->buf;
890 aiov.iov_len = uap->len;
891 auio.uio_iov = &aiov;
892 auio.uio_iovcnt = 1;
893 auio.uio_offset = 0;
894 auio.uio_resid = uap->len;
895 auio.uio_segflg = UIO_USERSPACE;
896 auio.uio_rw = UIO_READ;
897 auio.uio_td = td;
899 error = kern_recvmsg(uap->s, uap->from ? &sa : NULL, &auio, NULL,
900 &uap->flags, &uap->sysmsg_result);
902 if (error == 0 && uap->from) {
903 /* note: sa may still be NULL */
904 if (sa) {
905 fromlen = MIN(fromlen, sa->sa_len);
906 error = copyout(sa, uap->from, fromlen);
907 } else {
908 fromlen = 0;
910 if (error == 0) {
911 error = copyout(&fromlen, uap->fromlenaddr,
912 sizeof(fromlen));
915 if (sa)
916 FREE(sa, M_SONAME);
918 return (error);
922 * recvmsg_args(int s, struct msghdr *msg, int flags)
925 sys_recvmsg(struct recvmsg_args *uap)
927 struct thread *td = curthread;
928 struct msghdr msg;
929 struct uio auio;
930 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
931 struct mbuf *m, *control = NULL;
932 struct sockaddr *sa = NULL;
933 caddr_t ctlbuf;
934 socklen_t *ufromlenp, *ucontrollenp;
935 int error, fromlen, controllen, len, flags, *uflagsp;
938 * This copyin handles everything except the iovec.
940 error = copyin(uap->msg, &msg, sizeof(msg));
941 if (error)
942 return (error);
944 if (msg.msg_name && msg.msg_namelen < 0)
945 return (EINVAL);
946 if (msg.msg_control && msg.msg_controllen < 0)
947 return (EINVAL);
949 ufromlenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
950 msg_namelen));
951 ucontrollenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
952 msg_controllen));
953 uflagsp = (int *)((caddr_t)uap->msg + offsetof(struct msghdr,
954 msg_flags));
957 * Populate auio.
959 error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
960 &auio.uio_resid);
961 if (error)
962 return (error);
963 auio.uio_iov = iov;
964 auio.uio_iovcnt = msg.msg_iovlen;
965 auio.uio_offset = 0;
966 auio.uio_segflg = UIO_USERSPACE;
967 auio.uio_rw = UIO_READ;
968 auio.uio_td = td;
970 flags = uap->flags;
972 error = kern_recvmsg(uap->s, msg.msg_name ? &sa : NULL, &auio,
973 msg.msg_control ? &control : NULL, &flags, &uap->sysmsg_result);
976 * Conditionally copyout the name and populate the namelen field.
978 if (error == 0 && msg.msg_name) {
979 /* note: sa may still be NULL */
980 if (sa != NULL) {
981 fromlen = MIN(msg.msg_namelen, sa->sa_len);
982 error = copyout(sa, msg.msg_name, fromlen);
983 } else
984 fromlen = 0;
985 if (error == 0)
986 error = copyout(&fromlen, ufromlenp,
987 sizeof(*ufromlenp));
991 * Copyout msg.msg_control and msg.msg_controllen.
993 if (error == 0 && msg.msg_control) {
994 len = msg.msg_controllen;
995 m = control;
996 ctlbuf = (caddr_t)msg.msg_control;
998 while(m && len > 0) {
999 unsigned int tocopy;
1001 if (len >= m->m_len) {
1002 tocopy = m->m_len;
1003 } else {
1004 msg.msg_flags |= MSG_CTRUNC;
1005 tocopy = len;
1008 error = copyout(mtod(m, caddr_t), ctlbuf, tocopy);
1009 if (error)
1010 goto cleanup;
1012 ctlbuf += tocopy;
1013 len -= tocopy;
1014 m = m->m_next;
1016 controllen = ctlbuf - (caddr_t)msg.msg_control;
1017 error = copyout(&controllen, ucontrollenp,
1018 sizeof(*ucontrollenp));
1021 if (error == 0)
1022 error = copyout(&flags, uflagsp, sizeof(*uflagsp));
1024 cleanup:
1025 if (sa)
1026 FREE(sa, M_SONAME);
1027 iovec_free(&iov, aiov);
1028 if (control)
1029 m_freem(control);
1030 return (error);
1034 * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1035 * in kernel pointer instead of a userland pointer. This allows us
1036 * to manipulate socket options in the emulation code.
1039 kern_setsockopt(int s, struct sockopt *sopt)
1041 struct thread *td = curthread;
1042 struct proc *p = td->td_proc;
1043 struct file *fp;
1044 int error;
1046 if (sopt->sopt_val == 0 && sopt->sopt_valsize != 0)
1047 return (EFAULT);
1048 if (sopt->sopt_valsize < 0)
1049 return (EINVAL);
1051 error = holdsock(p->p_fd, s, &fp);
1052 if (error)
1053 return (error);
1055 error = sosetopt((struct socket *)fp->f_data, sopt);
1056 fdrop(fp);
1057 return (error);
1061 * setsockopt_args(int s, int level, int name, caddr_t val, int valsize)
1064 sys_setsockopt(struct setsockopt_args *uap)
1066 struct thread *td = curthread;
1067 struct sockopt sopt;
1068 int error;
1070 sopt.sopt_level = uap->level;
1071 sopt.sopt_name = uap->name;
1072 sopt.sopt_valsize = uap->valsize;
1073 sopt.sopt_td = td;
1075 if (uap->val) {
1076 sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP, M_WAITOK);
1077 error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize);
1078 if (error)
1079 goto out;
1080 } else {
1081 sopt.sopt_val = NULL;
1083 error = kern_setsockopt(uap->s, &sopt);
1084 if (error)
1085 goto out;
1086 if (uap->val)
1087 error = copyout(sopt.sopt_val, uap->val, sopt.sopt_valsize);
1088 out:
1089 if (uap->val)
1090 kfree(sopt.sopt_val, M_TEMP);
1091 return(error);
1095 * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1096 * in kernel pointer instead of a userland pointer. This allows us
1097 * to manipulate socket options in the emulation code.
1100 kern_getsockopt(int s, struct sockopt *sopt)
1102 struct thread *td = curthread;
1103 struct proc *p = td->td_proc;
1104 struct file *fp;
1105 int error;
1107 if (sopt->sopt_val == 0 && sopt->sopt_valsize != 0)
1108 return (EFAULT);
1109 if (sopt->sopt_valsize < 0)
1110 return (EINVAL);
1112 error = holdsock(p->p_fd, s, &fp);
1113 if (error)
1114 return (error);
1116 error = sogetopt((struct socket *)fp->f_data, sopt);
1117 fdrop(fp);
1118 return (error);
1122 * getsockopt_Args(int s, int level, int name, caddr_t val, int *avalsize)
1125 sys_getsockopt(struct getsockopt_args *uap)
1127 struct thread *td = curthread;
1128 struct sockopt sopt;
1129 int error, valsize;
1131 if (uap->val) {
1132 error = copyin(uap->avalsize, &valsize, sizeof(valsize));
1133 if (error)
1134 return (error);
1135 if (valsize < 0)
1136 return (EINVAL);
1137 } else {
1138 valsize = 0;
1141 sopt.sopt_level = uap->level;
1142 sopt.sopt_name = uap->name;
1143 sopt.sopt_valsize = valsize;
1144 sopt.sopt_td = td;
1146 if (uap->val) {
1147 sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP, M_WAITOK);
1148 error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize);
1149 if (error)
1150 goto out;
1151 } else {
1152 sopt.sopt_val = NULL;
1154 error = kern_getsockopt(uap->s, &sopt);
1155 if (error)
1156 goto out;
1157 valsize = sopt.sopt_valsize;
1158 error = copyout(&valsize, uap->avalsize, sizeof(valsize));
1159 if (error)
1160 goto out;
1161 if (uap->val)
1162 error = copyout(sopt.sopt_val, uap->val, sopt.sopt_valsize);
1163 out:
1164 if (uap->val)
1165 kfree(sopt.sopt_val, M_TEMP);
1166 return (error);
1170 * The second argument to kern_getsockname() is a handle to a struct sockaddr.
1171 * This allows kern_getsockname() to return a pointer to an allocated struct
1172 * sockaddr which must be freed later with FREE(). The caller must
1173 * initialize *name to NULL.
1176 kern_getsockname(int s, struct sockaddr **name, int *namelen)
1178 struct thread *td = curthread;
1179 struct proc *p = td->td_proc;
1180 struct file *fp;
1181 struct socket *so;
1182 struct sockaddr *sa = NULL;
1183 int error;
1185 error = holdsock(p->p_fd, s, &fp);
1186 if (error)
1187 return (error);
1188 if (*namelen < 0) {
1189 fdrop(fp);
1190 return (EINVAL);
1192 so = (struct socket *)fp->f_data;
1193 error = so_pru_sockaddr(so, &sa);
1194 if (error == 0) {
1195 if (sa == 0) {
1196 *namelen = 0;
1197 } else {
1198 *namelen = MIN(*namelen, sa->sa_len);
1199 *name = sa;
1203 fdrop(fp);
1204 return (error);
1208 * getsockname_args(int fdes, caddr_t asa, int *alen)
1210 * Get socket name.
1213 sys_getsockname(struct getsockname_args *uap)
1215 struct sockaddr *sa = NULL;
1216 int error, sa_len;
1218 error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1219 if (error)
1220 return (error);
1222 error = kern_getsockname(uap->fdes, &sa, &sa_len);
1224 if (error == 0)
1225 error = copyout(sa, uap->asa, sa_len);
1226 if (error == 0)
1227 error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1228 if (sa)
1229 FREE(sa, M_SONAME);
1230 return (error);
1234 * The second argument to kern_getpeername() is a handle to a struct sockaddr.
1235 * This allows kern_getpeername() to return a pointer to an allocated struct
1236 * sockaddr which must be freed later with FREE(). The caller must
1237 * initialize *name to NULL.
1240 kern_getpeername(int s, struct sockaddr **name, int *namelen)
1242 struct thread *td = curthread;
1243 struct proc *p = td->td_proc;
1244 struct file *fp;
1245 struct socket *so;
1246 struct sockaddr *sa = NULL;
1247 int error;
1249 error = holdsock(p->p_fd, s, &fp);
1250 if (error)
1251 return (error);
1252 if (*namelen < 0) {
1253 fdrop(fp);
1254 return (EINVAL);
1256 so = (struct socket *)fp->f_data;
1257 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1258 fdrop(fp);
1259 return (ENOTCONN);
1261 error = so_pru_peeraddr(so, &sa);
1262 if (error == 0) {
1263 if (sa == 0) {
1264 *namelen = 0;
1265 } else {
1266 *namelen = MIN(*namelen, sa->sa_len);
1267 *name = sa;
1271 fdrop(fp);
1272 return (error);
1276 * getpeername_args(int fdes, caddr_t asa, int *alen)
1278 * Get name of peer for connected socket.
1281 sys_getpeername(struct getpeername_args *uap)
1283 struct sockaddr *sa = NULL;
1284 int error, sa_len;
1286 error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1287 if (error)
1288 return (error);
1290 error = kern_getpeername(uap->fdes, &sa, &sa_len);
1292 if (error == 0)
1293 error = copyout(sa, uap->asa, sa_len);
1294 if (error == 0)
1295 error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1296 if (sa)
1297 FREE(sa, M_SONAME);
1298 return (error);
1302 getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len)
1304 struct sockaddr *sa;
1305 int error;
1307 *namp = NULL;
1308 if (len > SOCK_MAXADDRLEN)
1309 return ENAMETOOLONG;
1310 if (len < offsetof(struct sockaddr, sa_data[0]))
1311 return EDOM;
1312 MALLOC(sa, struct sockaddr *, len, M_SONAME, M_WAITOK);
1313 error = copyin(uaddr, sa, len);
1314 if (error) {
1315 FREE(sa, M_SONAME);
1316 } else {
1317 #if BYTE_ORDER != BIG_ENDIAN
1319 * The bind(), connect(), and sendto() syscalls were not
1320 * versioned for COMPAT_43. Thus, this check must stay.
1322 if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1323 sa->sa_family = sa->sa_len;
1324 #endif
1325 sa->sa_len = len;
1326 *namp = sa;
1328 return error;
1332 * Detach a mapped page and release resources back to the system.
1333 * We must release our wiring and if the object is ripped out
1334 * from under the vm_page we become responsible for freeing the
1335 * page. These routines must be MPSAFE.
1337 * XXX HACK XXX TEMPORARY UNTIL WE IMPLEMENT EXT MBUF REFERENCE COUNTING
1339 * XXX vm_page_*() routines are not MPSAFE yet, the MP lock is required.
1341 static void
1342 sf_buf_mref(void *arg)
1344 struct sfbuf_mref *sfm = arg;
1347 * We must already hold a ref so there is no race to 0, just
1348 * atomically increment the count.
1350 atomic_add_int(&sfm->mref_count, 1);
1353 static void
1354 sf_buf_mfree(void *arg)
1356 struct sfbuf_mref *sfm = arg;
1357 vm_page_t m;
1359 KKASSERT(sfm->mref_count > 0);
1360 if (sfm->mref_count == 1) {
1362 * We are the only holder so no further locking is required,
1363 * the sfbuf can simply be freed.
1365 sfm->mref_count = 0;
1366 goto freeit;
1367 } else {
1369 * There may be other holders, we must obtain the serializer
1370 * to protect against a sf_buf_mfree() race to 0. An atomic
1371 * operation is still required for races against
1372 * sf_buf_mref().
1374 * XXX vm_page_*() and SFBUF routines not MPSAFE yet.
1376 lwkt_serialize_enter(&sfm->serializer);
1377 atomic_subtract_int(&sfm->mref_count, 1);
1378 if (sfm->mref_count == 0) {
1379 lwkt_serialize_exit(&sfm->serializer);
1380 freeit:
1381 get_mplock();
1382 crit_enter();
1383 m = sf_buf_page(sfm->sf);
1384 sf_buf_free(sfm->sf);
1385 vm_page_unwire(m, 0);
1386 if (m->wire_count == 0 && m->object == NULL)
1387 vm_page_try_to_free(m);
1388 crit_exit();
1389 rel_mplock();
1390 kfree(sfm, M_SENDFILE);
1391 } else {
1392 lwkt_serialize_exit(&sfm->serializer);
1398 * sendfile(2).
1399 * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1400 * struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1402 * Send a file specified by 'fd' and starting at 'offset' to a socket
1403 * specified by 's'. Send only 'nbytes' of the file or until EOF if
1404 * nbytes == 0. Optionally add a header and/or trailer to the socket
1405 * output. If specified, write the total number of bytes sent into *sbytes.
1407 * In FreeBSD kern/uipc_syscalls.c,v 1.103, a bug was fixed that caused
1408 * the headers to count against the remaining bytes to be sent from
1409 * the file descriptor. We may wish to implement a compatibility syscall
1410 * in the future.
1413 sys_sendfile(struct sendfile_args *uap)
1415 struct thread *td = curthread;
1416 struct proc *p = td->td_proc;
1417 struct file *fp;
1418 struct vnode *vp = NULL;
1419 struct sf_hdtr hdtr;
1420 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
1421 struct uio auio;
1422 struct mbuf *mheader = NULL;
1423 off_t hdtr_size = 0, sbytes;
1424 int error, hbytes = 0, tbytes;
1426 KKASSERT(p);
1429 * Do argument checking. Must be a regular file in, stream
1430 * type and connected socket out, positive offset.
1432 fp = holdfp(p->p_fd, uap->fd, FREAD);
1433 if (fp == NULL) {
1434 return (EBADF);
1436 if (fp->f_type != DTYPE_VNODE) {
1437 fdrop(fp);
1438 return (EINVAL);
1440 vp = (struct vnode *)fp->f_data;
1441 vref(vp);
1442 fdrop(fp);
1445 * If specified, get the pointer to the sf_hdtr struct for
1446 * any headers/trailers.
1448 if (uap->hdtr) {
1449 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1450 if (error)
1451 goto done;
1453 * Send any headers.
1455 if (hdtr.headers) {
1456 error = iovec_copyin(hdtr.headers, &iov, aiov,
1457 hdtr.hdr_cnt, &hbytes);
1458 if (error)
1459 goto done;
1460 auio.uio_iov = iov;
1461 auio.uio_iovcnt = hdtr.hdr_cnt;
1462 auio.uio_offset = 0;
1463 auio.uio_segflg = UIO_USERSPACE;
1464 auio.uio_rw = UIO_WRITE;
1465 auio.uio_td = td;
1466 auio.uio_resid = hbytes;
1468 mheader = m_uiomove(&auio);
1470 iovec_free(&iov, aiov);
1471 if (mheader == NULL)
1472 goto done;
1476 error = kern_sendfile(vp, uap->s, uap->offset, uap->nbytes, mheader,
1477 &sbytes, uap->flags);
1478 if (error)
1479 goto done;
1482 * Send trailers. Wimp out and use writev(2).
1484 if (uap->hdtr != NULL && hdtr.trailers != NULL) {
1485 error = iovec_copyin(hdtr.trailers, &iov, aiov,
1486 hdtr.trl_cnt, &auio.uio_resid);
1487 if (error)
1488 goto done;
1489 auio.uio_iov = iov;
1490 auio.uio_iovcnt = hdtr.trl_cnt;
1491 auio.uio_offset = 0;
1492 auio.uio_segflg = UIO_USERSPACE;
1493 auio.uio_rw = UIO_WRITE;
1494 auio.uio_td = td;
1496 error = kern_sendmsg(uap->s, NULL, &auio, NULL, 0, &tbytes);
1498 iovec_free(&iov, aiov);
1499 if (error)
1500 goto done;
1501 hdtr_size += tbytes; /* trailer bytes successfully sent */
1504 done:
1505 if (uap->sbytes != NULL) {
1506 sbytes += hdtr_size;
1507 copyout(&sbytes, uap->sbytes, sizeof(off_t));
1509 if (vp)
1510 vrele(vp);
1511 return (error);
1515 kern_sendfile(struct vnode *vp, int sfd, off_t offset, size_t nbytes,
1516 struct mbuf *mheader, off_t *sbytes, int flags)
1518 struct thread *td = curthread;
1519 struct proc *p = td->td_proc;
1520 struct vm_object *obj;
1521 struct socket *so;
1522 struct file *fp;
1523 struct mbuf *m;
1524 struct sf_buf *sf;
1525 struct sfbuf_mref *sfm;
1526 struct vm_page *pg;
1527 off_t off, xfsize;
1528 off_t hbytes = 0;
1529 int error = 0;
1531 if (vp->v_type != VREG) {
1532 error = EINVAL;
1533 goto done0;
1535 if ((obj = vp->v_object) == NULL) {
1536 error = EINVAL;
1537 goto done0;
1539 error = holdsock(p->p_fd, sfd, &fp);
1540 if (error)
1541 goto done0;
1542 so = (struct socket *)fp->f_data;
1543 if (so->so_type != SOCK_STREAM) {
1544 error = EINVAL;
1545 goto done;
1547 if ((so->so_state & SS_ISCONNECTED) == 0) {
1548 error = ENOTCONN;
1549 goto done;
1551 if (offset < 0) {
1552 error = EINVAL;
1553 goto done;
1556 *sbytes = 0;
1558 * Protect against multiple writers to the socket.
1560 ssb_lock(&so->so_snd, M_WAITOK);
1563 * Loop through the pages in the file, starting with the requested
1564 * offset. Get a file page (do I/O if necessary), map the file page
1565 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
1566 * it on the socket.
1568 for (off = offset; ; off += xfsize, *sbytes += xfsize + hbytes) {
1569 vm_pindex_t pindex;
1570 vm_offset_t pgoff;
1572 pindex = OFF_TO_IDX(off);
1573 retry_lookup:
1575 * Calculate the amount to transfer. Not to exceed a page,
1576 * the EOF, or the passed in nbytes.
1578 xfsize = vp->v_filesize - off;
1579 if (xfsize > PAGE_SIZE)
1580 xfsize = PAGE_SIZE;
1581 pgoff = (vm_offset_t)(off & PAGE_MASK);
1582 if (PAGE_SIZE - pgoff < xfsize)
1583 xfsize = PAGE_SIZE - pgoff;
1584 if (nbytes && xfsize > (nbytes - *sbytes))
1585 xfsize = nbytes - *sbytes;
1586 if (xfsize <= 0)
1587 break;
1589 * Optimize the non-blocking case by looking at the socket space
1590 * before going to the extra work of constituting the sf_buf.
1592 if ((fp->f_flag & FNONBLOCK) && ssb_space(&so->so_snd) <= 0) {
1593 if (so->so_state & SS_CANTSENDMORE)
1594 error = EPIPE;
1595 else
1596 error = EAGAIN;
1597 ssb_unlock(&so->so_snd);
1598 goto done;
1601 * Attempt to look up the page.
1603 * Allocate if not found, wait and loop if busy, then
1604 * wire the page. critical section protection is
1605 * required to maintain the object association (an
1606 * interrupt can free the page) through to the
1607 * vm_page_wire() call.
1609 crit_enter();
1610 pg = vm_page_lookup(obj, pindex);
1611 if (pg == NULL) {
1612 pg = vm_page_alloc(obj, pindex, VM_ALLOC_NORMAL);
1613 if (pg == NULL) {
1614 vm_wait(0);
1615 crit_exit();
1616 goto retry_lookup;
1618 vm_page_wakeup(pg);
1619 } else if (vm_page_sleep_busy(pg, TRUE, "sfpbsy")) {
1620 crit_exit();
1621 goto retry_lookup;
1623 vm_page_wire(pg);
1624 crit_exit();
1627 * If page is not valid for what we need, initiate I/O
1630 if (!pg->valid || !vm_page_is_valid(pg, pgoff, xfsize)) {
1631 struct uio auio;
1632 struct iovec aiov;
1633 int bsize;
1636 * Ensure that our page is still around when the I/O
1637 * completes.
1639 vm_page_io_start(pg);
1642 * Get the page from backing store.
1644 bsize = vp->v_mount->mnt_stat.f_iosize;
1645 auio.uio_iov = &aiov;
1646 auio.uio_iovcnt = 1;
1647 aiov.iov_base = 0;
1648 aiov.iov_len = MAXBSIZE;
1649 auio.uio_resid = MAXBSIZE;
1650 auio.uio_offset = trunc_page(off);
1651 auio.uio_segflg = UIO_NOCOPY;
1652 auio.uio_rw = UIO_READ;
1653 auio.uio_td = td;
1654 vn_lock(vp, LK_SHARED | LK_RETRY);
1655 error = VOP_READ(vp, &auio,
1656 IO_VMIO | ((MAXBSIZE / bsize) << 16),
1657 p->p_ucred);
1658 vn_unlock(vp);
1659 vm_page_flag_clear(pg, PG_ZERO);
1660 vm_page_io_finish(pg);
1661 if (error) {
1662 crit_enter();
1663 vm_page_unwire(pg, 0);
1664 vm_page_try_to_free(pg);
1665 crit_exit();
1666 ssb_unlock(&so->so_snd);
1667 goto done;
1673 * Get a sendfile buf. We usually wait as long as necessary,
1674 * but this wait can be interrupted.
1676 if ((sf = sf_buf_alloc(pg, SFB_CATCH)) == NULL) {
1677 crit_enter();
1678 vm_page_unwire(pg, 0);
1679 vm_page_try_to_free(pg);
1680 crit_exit();
1681 ssb_unlock(&so->so_snd);
1682 error = EINTR;
1683 goto done;
1687 * Get an mbuf header and set it up as having external storage.
1689 MGETHDR(m, MB_WAIT, MT_DATA);
1690 if (m == NULL) {
1691 error = ENOBUFS;
1692 sf_buf_free(sf);
1693 ssb_unlock(&so->so_snd);
1694 goto done;
1698 * sfm is a temporary hack, use a per-cpu cache for this.
1700 sfm = kmalloc(sizeof(struct sfbuf_mref), M_SENDFILE, M_WAITOK);
1701 sfm->sf = sf;
1702 sfm->mref_count = 1;
1703 lwkt_serialize_init(&sfm->serializer);
1705 m->m_ext.ext_free = sf_buf_mfree;
1706 m->m_ext.ext_ref = sf_buf_mref;
1707 m->m_ext.ext_arg = sfm;
1708 m->m_ext.ext_buf = (void *)sf->kva;
1709 m->m_ext.ext_size = PAGE_SIZE;
1710 m->m_data = (char *) sf->kva + pgoff;
1711 m->m_flags |= M_EXT;
1712 m->m_pkthdr.len = m->m_len = xfsize;
1713 KKASSERT((m->m_flags & (M_EXT_CLUSTER)) == 0);
1715 if (mheader != NULL) {
1716 hbytes = mheader->m_pkthdr.len;
1717 mheader->m_pkthdr.len += m->m_pkthdr.len;
1718 m_cat(mheader, m);
1719 m = mheader;
1720 mheader = NULL;
1721 } else
1722 hbytes = 0;
1725 * Add the buffer to the socket buffer chain.
1727 crit_enter();
1728 retry_space:
1730 * Make sure that the socket is still able to take more data.
1731 * CANTSENDMORE being true usually means that the connection
1732 * was closed. so_error is true when an error was sensed after
1733 * a previous send.
1734 * The state is checked after the page mapping and buffer
1735 * allocation above since those operations may block and make
1736 * any socket checks stale. From this point forward, nothing
1737 * blocks before the pru_send (or more accurately, any blocking
1738 * results in a loop back to here to re-check).
1740 if ((so->so_state & SS_CANTSENDMORE) || so->so_error) {
1741 if (so->so_state & SS_CANTSENDMORE) {
1742 error = EPIPE;
1743 } else {
1744 error = so->so_error;
1745 so->so_error = 0;
1747 m_freem(m);
1748 ssb_unlock(&so->so_snd);
1749 crit_exit();
1750 goto done;
1753 * Wait for socket space to become available. We do this just
1754 * after checking the connection state above in order to avoid
1755 * a race condition with ssb_wait().
1757 if (ssb_space(&so->so_snd) < so->so_snd.ssb_lowat) {
1758 if (fp->f_flag & FNONBLOCK) {
1759 m_freem(m);
1760 ssb_unlock(&so->so_snd);
1761 crit_exit();
1762 error = EAGAIN;
1763 goto done;
1765 error = ssb_wait(&so->so_snd);
1767 * An error from ssb_wait usually indicates that we've
1768 * been interrupted by a signal. If we've sent anything
1769 * then return bytes sent, otherwise return the error.
1771 if (error) {
1772 m_freem(m);
1773 ssb_unlock(&so->so_snd);
1774 crit_exit();
1775 goto done;
1777 goto retry_space;
1779 error = so_pru_send(so, 0, m, NULL, NULL, td);
1780 crit_exit();
1781 if (error) {
1782 ssb_unlock(&so->so_snd);
1783 goto done;
1786 if (mheader != NULL) {
1787 *sbytes += mheader->m_pkthdr.len;
1788 error = so_pru_send(so, 0, mheader, NULL, NULL, td);
1789 mheader = NULL;
1791 ssb_unlock(&so->so_snd);
1793 done:
1794 fdrop(fp);
1795 done0:
1796 if (mheader != NULL)
1797 m_freem(mheader);
1798 return (error);
1802 sys_sctp_peeloff(struct sctp_peeloff_args *uap)
1804 #ifdef SCTP
1805 struct thread *td = curthread;
1806 struct proc *p = td->td_proc;
1807 struct file *lfp = NULL;
1808 struct file *nfp = NULL;
1809 int error;
1810 struct socket *head, *so;
1811 caddr_t assoc_id;
1812 int fd;
1813 short fflag; /* type must match fp->f_flag */
1815 assoc_id = uap->name;
1816 error = holdsock(p->p_fd, uap->sd, &lfp);
1817 if (error) {
1818 return (error);
1820 crit_enter();
1821 head = (struct socket *)lfp->f_data;
1822 error = sctp_can_peel_off(head, assoc_id);
1823 if (error) {
1824 crit_exit();
1825 goto done;
1828 * At this point we know we do have a assoc to pull
1829 * we proceed to get the fd setup. This may block
1830 * but that is ok.
1833 fflag = lfp->f_flag;
1834 error = falloc(p, &nfp, &fd);
1835 if (error) {
1837 * Probably ran out of file descriptors. Put the
1838 * unaccepted connection back onto the queue and
1839 * do another wakeup so some other process might
1840 * have a chance at it.
1842 crit_exit();
1843 goto done;
1845 uap->sysmsg_result = fd;
1847 so = sctp_get_peeloff(head, assoc_id, &error);
1848 if (so == NULL) {
1850 * Either someone else peeled it off OR
1851 * we can't get a socket.
1853 goto noconnection;
1855 so->so_state &= ~SS_COMP;
1856 so->so_state &= ~SS_NOFDREF;
1857 so->so_head = NULL;
1858 if (head->so_sigio != NULL)
1859 fsetown(fgetown(head->so_sigio), &so->so_sigio);
1861 nfp->f_type = DTYPE_SOCKET;
1862 nfp->f_flag = fflag;
1863 nfp->f_ops = &socketops;
1864 nfp->f_data = so;
1866 noconnection:
1868 * Assign the file pointer to the reserved descriptor, or clear
1869 * the reserved descriptor if an error occured.
1871 if (error)
1872 fsetfd(p, NULL, fd);
1873 else
1874 fsetfd(p, nfp, fd);
1875 crit_exit();
1877 * Release explicitly held references before returning.
1879 done:
1880 if (nfp != NULL)
1881 fdrop(nfp);
1882 fdrop(lfp);
1883 return (error);
1884 #else /* SCTP */
1885 return(EOPNOTSUPP);
1886 #endif /* SCTP */