Merge branch 'vendor/OPENSSL'
[dragonfly.git] / sys / kern / uipc_msg.c
blob3a7c11d8f9c34daf7410da907f37d03d7ab9f79b
1 /*
2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
4 *
5 * This code is derived from software contributed to The DragonFly Project
6 * by Jeffrey M. Hsu.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/msgport.h>
38 #include <sys/protosw.h>
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <sys/socketops.h>
42 #include <sys/thread.h>
43 #include <sys/thread2.h>
44 #include <sys/msgport2.h>
45 #include <sys/spinlock2.h>
46 #include <sys/mbuf.h>
47 #include <vm/pmap.h>
48 #include <net/netmsg2.h>
50 #include <net/netisr.h>
51 #include <net/netmsg.h>
54 * Abort a socket and free it. Called from soabort() only. soabort()
55 * got a ref on the socket which we must free on reply.
57 void
58 so_pru_abort(struct socket *so)
60 struct netmsg_pru_abort msg;
62 netmsg_init(&msg.base, so, &curthread->td_msgport,
63 0, so->so_proto->pr_usrreqs->pru_abort);
64 (void)lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
65 sofree(msg.base.nm_so);
69 * Abort a socket and free it, asynchronously. Called from
70 * soaborta() only. soaborta() got a ref on the socket which we must
71 * free on reply.
73 void
74 so_pru_aborta(struct socket *so)
76 struct netmsg_pru_abort *msg;
78 msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK | M_ZERO);
79 netmsg_init(&msg->base, so, &netisr_afree_free_so_rport,
80 0, so->so_proto->pr_usrreqs->pru_abort);
81 lwkt_sendmsg(so->so_port, &msg->base.lmsg);
85 * Abort a socket and free it. Called from soabort_oncpu() only.
86 * Caller must make sure that the current CPU is inpcb's owner CPU.
88 void
89 so_pru_abort_oncpu(struct socket *so)
91 struct netmsg_pru_abort msg;
92 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_abort;
94 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
95 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
96 msg.base.lmsg.ms_flags |= MSGF_SYNC;
97 func((netmsg_t)&msg);
98 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
99 sofree(msg.base.nm_so);
103 so_pru_accept(struct socket *so, struct sockaddr **nam)
105 struct netmsg_pru_accept msg;
107 netmsg_init(&msg.base, so, &curthread->td_msgport,
108 0, so->so_proto->pr_usrreqs->pru_accept);
109 msg.nm_nam = nam;
111 return lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
115 so_pru_attach(struct socket *so, int proto, struct pru_attach_info *ai)
117 struct netmsg_pru_attach msg;
118 int error;
120 netmsg_init(&msg.base, so, &curthread->td_msgport,
121 0, so->so_proto->pr_usrreqs->pru_attach);
122 msg.nm_proto = proto;
123 msg.nm_ai = ai;
124 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
125 return (error);
129 so_pru_attach_direct(struct socket *so, int proto, struct pru_attach_info *ai)
131 struct netmsg_pru_attach msg;
132 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_attach;
134 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
135 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
136 msg.base.lmsg.ms_flags |= MSGF_SYNC;
137 msg.nm_proto = proto;
138 msg.nm_ai = ai;
139 func((netmsg_t)&msg);
140 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
141 return(msg.base.lmsg.ms_error);
145 * NOTE: If the target port changes the bind operation will deal with it.
148 so_pru_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
150 struct netmsg_pru_bind msg;
151 int error;
153 netmsg_init(&msg.base, so, &curthread->td_msgport,
154 0, so->so_proto->pr_usrreqs->pru_bind);
155 msg.nm_nam = nam;
156 msg.nm_td = td; /* used only for prison_ip() */
157 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
158 return (error);
162 so_pru_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
164 struct netmsg_pru_connect msg;
165 int error;
167 netmsg_init(&msg.base, so, &curthread->td_msgport,
168 0, so->so_proto->pr_usrreqs->pru_connect);
169 msg.nm_nam = nam;
170 msg.nm_td = td;
171 msg.nm_m = NULL;
172 msg.nm_flags = 0;
173 msg.nm_reconnect = 0;
174 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
175 return (error);
179 so_pru_connect2(struct socket *so1, struct socket *so2)
181 struct netmsg_pru_connect2 msg;
182 int error;
184 netmsg_init(&msg.base, so1, &curthread->td_msgport,
185 0, so1->so_proto->pr_usrreqs->pru_connect2);
186 msg.nm_so1 = so1;
187 msg.nm_so2 = so2;
188 error = lwkt_domsg(so1->so_port, &msg.base.lmsg, 0);
189 return (error);
193 * WARNING! Synchronous call from user context. Control function may do
194 * copyin/copyout.
197 so_pru_control_direct(struct socket *so, u_long cmd, caddr_t data,
198 struct ifnet *ifp)
200 struct netmsg_pru_control msg;
201 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_control;
203 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
204 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
205 msg.base.lmsg.ms_flags |= MSGF_SYNC;
206 msg.nm_cmd = cmd;
207 msg.nm_data = data;
208 msg.nm_ifp = ifp;
209 msg.nm_td = curthread;
210 func((netmsg_t)&msg);
211 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
212 return(msg.base.lmsg.ms_error);
216 so_pru_detach(struct socket *so)
218 struct netmsg_pru_detach msg;
219 int error;
221 netmsg_init(&msg.base, so, &curthread->td_msgport,
222 0, so->so_proto->pr_usrreqs->pru_detach);
223 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
224 return (error);
227 void
228 so_pru_detach_direct(struct socket *so)
230 struct netmsg_pru_detach msg;
231 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_detach;
233 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
234 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
235 msg.base.lmsg.ms_flags |= MSGF_SYNC;
236 func((netmsg_t)&msg);
237 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
241 so_pru_disconnect(struct socket *so)
243 struct netmsg_pru_disconnect msg;
244 int error;
246 netmsg_init(&msg.base, so, &curthread->td_msgport,
247 0, so->so_proto->pr_usrreqs->pru_disconnect);
248 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
249 return (error);
252 void
253 so_pru_disconnect_direct(struct socket *so)
255 struct netmsg_pru_disconnect msg;
256 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_disconnect;
258 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
259 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
260 msg.base.lmsg.ms_flags |= MSGF_SYNC;
261 func((netmsg_t)&msg);
262 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
266 so_pru_listen(struct socket *so, struct thread *td)
268 struct netmsg_pru_listen msg;
269 int error;
271 netmsg_init(&msg.base, so, &curthread->td_msgport,
272 0, so->so_proto->pr_usrreqs->pru_listen);
273 msg.nm_td = td; /* used only for prison_ip() XXX JH */
274 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
275 return (error);
279 so_pru_peeraddr(struct socket *so, struct sockaddr **nam)
281 struct netmsg_pru_peeraddr msg;
282 int error;
284 netmsg_init(&msg.base, so, &curthread->td_msgport,
285 0, so->so_proto->pr_usrreqs->pru_peeraddr);
286 msg.nm_nam = nam;
287 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
288 return (error);
292 so_pru_rcvd(struct socket *so, int flags)
294 struct netmsg_pru_rcvd msg;
295 int error;
297 netmsg_init(&msg.base, so, &curthread->td_msgport,
298 0, so->so_proto->pr_usrreqs->pru_rcvd);
299 msg.nm_flags = flags;
300 msg.nm_pru_flags = 0;
301 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
302 return (error);
305 void
306 so_pru_rcvd_async(struct socket *so)
308 lwkt_msg_t lmsg = &so->so_rcvd_msg.base.lmsg;
310 KASSERT(so->so_proto->pr_flags & PR_ASYNC_RCVD,
311 ("async pru_rcvd is not supported"));
313 spin_lock(&so->so_rcvd_spin);
314 if ((so->so_rcvd_msg.nm_pru_flags & PRUR_DEAD) == 0) {
315 if (lmsg->ms_flags & MSGF_DONE)
316 lwkt_sendmsg(so->so_port, lmsg);
317 } else {
318 static int deadlog = 0;
320 if (!deadlog) {
321 kprintf("async rcvd is dead\n");
322 deadlog = 1;
325 spin_unlock(&so->so_rcvd_spin);
329 so_pru_rcvoob(struct socket *so, struct mbuf *m, int flags)
331 struct netmsg_pru_rcvoob msg;
332 int error;
334 netmsg_init(&msg.base, so, &curthread->td_msgport,
335 0, so->so_proto->pr_usrreqs->pru_rcvoob);
336 msg.nm_m = m;
337 msg.nm_flags = flags;
338 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
339 return (error);
343 * NOTE: If the target port changes the implied connect will deal with it.
346 so_pru_send(struct socket *so, int flags, struct mbuf *m,
347 struct sockaddr *addr, struct mbuf *control, struct thread *td)
349 struct netmsg_pru_send msg;
350 int error;
352 netmsg_init(&msg.base, so, &curthread->td_msgport,
353 0, so->so_proto->pr_usrreqs->pru_send);
354 msg.nm_flags = flags;
355 msg.nm_m = m;
356 msg.nm_addr = addr;
357 msg.nm_control = control;
358 msg.nm_td = td;
359 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
360 return (error);
363 void
364 so_pru_sync(struct socket *so)
366 struct netmsg_base msg;
368 netmsg_init(&msg, so, &curthread->td_msgport, 0,
369 netmsg_sync_handler);
370 lwkt_domsg(so->so_port, &msg.lmsg, 0);
373 void
374 so_pru_send_async(struct socket *so, int flags, struct mbuf *m,
375 struct sockaddr *addr0, struct mbuf *control, struct thread *td)
377 struct netmsg_pru_send *msg;
378 struct sockaddr *addr = NULL;
380 KASSERT(so->so_proto->pr_flags & PR_ASYNC_SEND,
381 ("async pru_send is not supported"));
383 flags |= PRUS_NOREPLY;
384 if (addr0 != NULL) {
385 addr = kmalloc(addr0->sa_len, M_SONAME, M_WAITOK);
386 memcpy(addr, addr0, addr0->sa_len);
387 flags |= PRUS_FREEADDR;
390 msg = &m->m_hdr.mh_sndmsg;
391 netmsg_init(&msg->base, so, &netisr_apanic_rport,
392 0, so->so_proto->pr_usrreqs->pru_send);
393 msg->nm_flags = flags;
394 msg->nm_m = m;
395 msg->nm_addr = addr;
396 msg->nm_control = control;
397 msg->nm_td = td;
398 lwkt_sendmsg(so->so_port, &msg->base.lmsg);
402 so_pru_sense(struct socket *so, struct stat *sb)
404 struct netmsg_pru_sense msg;
405 int error;
407 netmsg_init(&msg.base, so, &curthread->td_msgport,
408 0, so->so_proto->pr_usrreqs->pru_sense);
409 msg.nm_stat = sb;
410 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
411 return (error);
415 so_pru_shutdown(struct socket *so)
417 struct netmsg_pru_shutdown msg;
418 int error;
420 netmsg_init(&msg.base, so, &curthread->td_msgport,
421 0, so->so_proto->pr_usrreqs->pru_shutdown);
422 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
423 return (error);
427 so_pru_sockaddr(struct socket *so, struct sockaddr **nam)
429 struct netmsg_pru_sockaddr msg;
430 int error;
432 netmsg_init(&msg.base, so, &curthread->td_msgport,
433 0, so->so_proto->pr_usrreqs->pru_sockaddr);
434 msg.nm_nam = nam;
435 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
436 return (error);
440 so_pr_ctloutput(struct socket *so, struct sockopt *sopt)
442 struct netmsg_pr_ctloutput msg;
443 int error;
445 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
446 netmsg_init(&msg.base, so, &curthread->td_msgport,
447 0, so->so_proto->pr_ctloutput);
448 msg.nm_sopt = sopt;
449 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
450 return (error);
454 * Protocol control input, typically via icmp.
456 * If the protocol pr_ctlport is not NULL we call it to figure out the
457 * protocol port. If NULL is returned we can just return, otherwise
458 * we issue a netmsg to call pr_ctlinput in the proper thread.
460 * This must be done synchronously as arg and/or extra may point to
461 * temporary data.
463 void
464 so_pru_ctlinput(struct protosw *pr, int cmd, struct sockaddr *arg, void *extra)
466 struct netmsg_pru_ctlinput msg;
467 lwkt_port_t port;
469 if (pr->pr_ctlport == NULL)
470 return;
471 KKASSERT(pr->pr_ctlinput != NULL);
472 port = pr->pr_ctlport(cmd, arg, extra);
473 if (port == NULL)
474 return;
475 netmsg_init(&msg.base, NULL, &curthread->td_msgport,
476 0, pr->pr_ctlinput);
477 msg.nm_cmd = cmd;
478 msg.nm_arg = arg;
479 msg.nm_extra = extra;
480 lwkt_domsg(port, &msg.base.lmsg, 0);
484 * If we convert all the protosw pr_ functions for all the protocols
485 * to take a message directly, this layer can go away. For the moment
486 * our dispatcher ignores the return value, but since we are handling
487 * the replymsg ourselves we return EASYNC by convention.
491 * Handle a predicate event request. This function is only called once
492 * when the predicate message queueing request is received.
494 void
495 netmsg_so_notify(netmsg_t msg)
497 struct lwkt_token *tok;
498 struct signalsockbuf *ssb;
500 ssb = (msg->notify.nm_etype & NM_REVENT) ?
501 &msg->base.nm_so->so_rcv :
502 &msg->base.nm_so->so_snd;
505 * Reply immediately if the event has occured, otherwise queue the
506 * request.
508 * NOTE: Socket can change if this is an accept predicate so cache
509 * the token.
511 tok = lwkt_token_pool_lookup(msg->base.nm_so);
512 lwkt_gettoken(tok);
513 if (msg->notify.nm_predicate(&msg->notify)) {
514 lwkt_reltoken(tok);
515 lwkt_replymsg(&msg->base.lmsg,
516 msg->base.lmsg.ms_error);
517 } else {
518 TAILQ_INSERT_TAIL(&ssb->ssb_kq.ki_mlist, &msg->notify, nm_list);
519 atomic_set_int(&ssb->ssb_flags, SSB_MEVENT);
520 lwkt_reltoken(tok);
525 * Called by doio when trying to abort a netmsg_so_notify message.
526 * Unlike the other functions this one is dispatched directly by
527 * the LWKT subsystem, so it takes a lwkt_msg_t as an argument.
529 * The original message, lmsg, is under the control of the caller and
530 * will not be destroyed until we return so we can safely reference it
531 * in our synchronous abort request.
533 * This part of the abort request occurs on the originating cpu which
534 * means we may race the message flags and the original message may
535 * not even have been processed by the target cpu yet.
537 void
538 netmsg_so_notify_doabort(lwkt_msg_t lmsg)
540 struct netmsg_so_notify_abort msg;
542 if ((lmsg->ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) {
543 netmsg_init(&msg.base, NULL, &curthread->td_msgport,
544 0, netmsg_so_notify_abort);
545 msg.nm_notifymsg = (void *)lmsg;
546 lwkt_domsg(lmsg->ms_target_port, &msg.base.lmsg, 0);
551 * Predicate requests can be aborted. This function is only called once
552 * and will interlock against processing/reply races (since such races
553 * occur on the same thread that controls the port where the abort is
554 * requeued).
556 * This part of the abort request occurs on the target cpu. The message
557 * flags must be tested again in case the test that we did on the
558 * originating cpu raced. Since messages are handled in sequence, the
559 * original message will have already been handled by the loop and either
560 * replied to or queued.
562 * We really only need to interlock with MSGF_REPLY (a bit that is set on
563 * our cpu when we reply). Note that MSGF_DONE is not set until the
564 * reply reaches the originating cpu. Test both bits anyway.
566 void
567 netmsg_so_notify_abort(netmsg_t msg)
569 struct netmsg_so_notify_abort *abrtmsg = &msg->notify_abort;
570 struct netmsg_so_notify *nmsg = abrtmsg->nm_notifymsg;
571 struct signalsockbuf *ssb;
574 * The original notify message is not destroyed until after the
575 * abort request is returned, so we can check its state.
577 lwkt_getpooltoken(nmsg->base.nm_so);
578 if ((nmsg->base.lmsg.ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) {
579 ssb = (nmsg->nm_etype & NM_REVENT) ?
580 &nmsg->base.nm_so->so_rcv :
581 &nmsg->base.nm_so->so_snd;
582 TAILQ_REMOVE(&ssb->ssb_kq.ki_mlist, nmsg, nm_list);
583 lwkt_relpooltoken(nmsg->base.nm_so);
584 lwkt_replymsg(&nmsg->base.lmsg, EINTR);
585 } else {
586 lwkt_relpooltoken(nmsg->base.nm_so);
590 * Reply to the abort message
592 lwkt_replymsg(&abrtmsg->base.lmsg, 0);
595 void
596 so_async_rcvd_reply(struct socket *so)
598 spin_lock(&so->so_rcvd_spin);
599 lwkt_replymsg(&so->so_rcvd_msg.base.lmsg, 0);
600 spin_unlock(&so->so_rcvd_spin);
603 void
604 so_async_rcvd_drop(struct socket *so)
606 lwkt_msg_t lmsg = &so->so_rcvd_msg.base.lmsg;
608 spin_lock(&so->so_rcvd_spin);
609 if ((lmsg->ms_flags & MSGF_DONE) == 0)
610 lwkt_dropmsg(lmsg);
611 so->so_rcvd_msg.nm_pru_flags |= PRUR_DEAD;
612 spin_unlock(&so->so_rcvd_spin);