kernel - Fix excessive call stack depth on stuck interrupt
[dragonfly.git] / sys / kern / uipc_msg.c
blob713206d7464baaacf43048e607641d5048bdcff8
1 /*
2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
4 *
5 * This code is derived from software contributed to The DragonFly Project
6 * by Jeffrey M. Hsu.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/msgport.h>
38 #include <sys/protosw.h>
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <sys/socketops.h>
42 #include <sys/thread.h>
43 #include <sys/thread2.h>
44 #include <sys/msgport2.h>
45 #include <sys/spinlock2.h>
46 #include <sys/sysctl.h>
47 #include <sys/mbuf.h>
48 #include <vm/pmap.h>
50 #include <net/netmsg2.h>
51 #include <sys/socketvar2.h>
53 #include <net/netisr.h>
54 #include <net/netmsg.h>
56 static int async_rcvd_drop_race = 0;
57 SYSCTL_INT(_kern_ipc, OID_AUTO, async_rcvd_drop_race, CTLFLAG_RW,
58 &async_rcvd_drop_race, 0, "# of asynchronized pru_rcvd msg drop races");
61 * Abort a socket and free it, asynchronously. Called from
62 * soabort_async() only. soabort_async() got a ref on the
63 * socket which we must free on reply.
65 void
66 so_pru_abort_async(struct socket *so)
68 struct netmsg_pru_abort *msg;
70 msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK | M_ZERO);
71 netmsg_init(&msg->base, so, &netisr_afree_free_so_rport,
72 0, so->so_proto->pr_usrreqs->pru_abort);
73 lwkt_sendmsg(so->so_port, &msg->base.lmsg);
77 * Abort a socket and free it. Called from soabort_direct() only.
78 * Caller must make sure that the current CPU is inpcb's owner CPU.
79 * soabort_direct() got a ref on the socket which we must free.
81 void
82 so_pru_abort_direct(struct socket *so)
84 struct netmsg_pru_abort msg;
85 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_abort;
87 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
88 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
89 msg.base.lmsg.ms_flags |= MSGF_SYNC;
90 func((netmsg_t)&msg);
91 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
92 sofree(msg.base.nm_so);
95 int
96 so_pru_accept(struct socket *so, struct sockaddr **nam)
98 struct netmsg_pru_accept msg;
100 netmsg_init(&msg.base, so, &curthread->td_msgport,
101 0, so->so_proto->pr_usrreqs->pru_accept);
102 msg.nm_nam = nam;
104 return lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
108 so_pru_attach(struct socket *so, int proto, struct pru_attach_info *ai)
110 struct netmsg_pru_attach msg;
111 int error;
113 netmsg_init(&msg.base, so, &curthread->td_msgport,
114 0, so->so_proto->pr_usrreqs->pru_attach);
115 msg.nm_proto = proto;
116 msg.nm_ai = ai;
117 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
118 return (error);
122 so_pru_attach_direct(struct socket *so, int proto, struct pru_attach_info *ai)
124 struct netmsg_pru_attach msg;
125 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_attach;
127 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
128 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
129 msg.base.lmsg.ms_flags |= MSGF_SYNC;
130 msg.nm_proto = proto;
131 msg.nm_ai = ai;
132 func((netmsg_t)&msg);
133 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
134 return(msg.base.lmsg.ms_error);
138 so_pru_attach_fast(struct socket *so, int proto, struct pru_attach_info *ai)
140 struct netmsg_pru_attach *msg;
141 int error;
143 error = so->so_proto->pr_usrreqs->pru_preattach(so, proto, ai);
144 if (error)
145 return error;
147 msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK | M_NULLOK);
148 if (msg == NULL) {
150 * Fail to allocate message; fallback to
151 * synchronized pru_attach.
153 return so_pru_attach(so, proto, NULL /* postattach */);
156 netmsg_init(&msg->base, so, &netisr_afree_rport, 0,
157 so->so_proto->pr_usrreqs->pru_attach);
158 msg->nm_proto = proto;
159 msg->nm_ai = NULL; /* postattach */
160 lwkt_sendmsg(so->so_port, &msg->base.lmsg);
162 return 0;
166 * NOTE: If the target port changes the bind operation will deal with it.
169 so_pru_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
171 struct netmsg_pru_bind msg;
172 int error;
174 netmsg_init(&msg.base, so, &curthread->td_msgport,
175 0, so->so_proto->pr_usrreqs->pru_bind);
176 msg.nm_nam = nam;
177 msg.nm_td = td; /* used only for prison_ip() */
178 msg.nm_flags = 0;
179 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
180 return (error);
184 so_pru_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
186 struct netmsg_pru_connect msg;
187 int error;
189 netmsg_init(&msg.base, so, &curthread->td_msgport,
190 0, so->so_proto->pr_usrreqs->pru_connect);
191 msg.nm_nam = nam;
192 msg.nm_td = td;
193 msg.nm_m = NULL;
194 msg.nm_sndflags = 0;
195 msg.nm_flags = 0;
196 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
197 return (error);
201 so_pru_connect_async(struct socket *so, struct sockaddr *nam, struct thread *td)
203 struct netmsg_pru_connect *msg;
204 int error, flags;
206 KASSERT(so->so_proto->pr_usrreqs->pru_preconnect != NULL,
207 ("async pru_connect is not supported"));
209 /* NOTE: sockaddr immediately follows netmsg */
210 msg = kmalloc(sizeof(*msg) + nam->sa_len, M_LWKTMSG,
211 M_WAITOK | M_NULLOK);
212 if (msg == NULL) {
214 * Fail to allocate message; fallback to
215 * synchronized pru_connect.
217 return so_pru_connect(so, nam, td);
220 error = so->so_proto->pr_usrreqs->pru_preconnect(so, nam, td);
221 if (error) {
222 kfree(msg, M_LWKTMSG);
223 return error;
226 flags = PRUC_ASYNC;
227 if (td != NULL && (so->so_proto->pr_flags & PR_ACONN_HOLDTD)) {
228 lwkt_hold(td);
229 flags |= PRUC_HELDTD;
232 netmsg_init(&msg->base, so, &netisr_afree_rport, 0,
233 so->so_proto->pr_usrreqs->pru_connect);
234 msg->nm_nam = (struct sockaddr *)(msg + 1);
235 memcpy(msg->nm_nam, nam, nam->sa_len);
236 msg->nm_td = td;
237 msg->nm_m = NULL;
238 msg->nm_sndflags = 0;
239 msg->nm_flags = flags;
240 lwkt_sendmsg(so->so_port, &msg->base.lmsg);
241 return 0;
245 so_pru_connect2(struct socket *so1, struct socket *so2)
247 struct netmsg_pru_connect2 msg;
248 int error;
250 netmsg_init(&msg.base, so1, &curthread->td_msgport,
251 0, so1->so_proto->pr_usrreqs->pru_connect2);
252 msg.nm_so1 = so1;
253 msg.nm_so2 = so2;
254 error = lwkt_domsg(so1->so_port, &msg.base.lmsg, 0);
255 return (error);
259 * WARNING! Synchronous call from user context. Control function may do
260 * copyin/copyout.
263 so_pru_control_direct(struct socket *so, u_long cmd, caddr_t data,
264 struct ifnet *ifp)
266 struct netmsg_pru_control msg;
267 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_control;
269 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
270 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
271 msg.base.lmsg.ms_flags |= MSGF_SYNC;
272 msg.nm_cmd = cmd;
273 msg.nm_data = data;
274 msg.nm_ifp = ifp;
275 msg.nm_td = curthread;
276 func((netmsg_t)&msg);
277 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
278 return(msg.base.lmsg.ms_error);
282 so_pru_detach(struct socket *so)
284 struct netmsg_pru_detach msg;
285 int error;
287 netmsg_init(&msg.base, so, &curthread->td_msgport,
288 0, so->so_proto->pr_usrreqs->pru_detach);
289 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
290 return (error);
294 so_pru_detach_direct(struct socket *so)
296 struct netmsg_pru_detach msg;
297 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_detach;
299 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
300 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
301 msg.base.lmsg.ms_flags |= MSGF_SYNC;
302 func((netmsg_t)&msg);
303 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
304 return(msg.base.lmsg.ms_error);
308 so_pru_disconnect(struct socket *so)
310 struct netmsg_pru_disconnect msg;
311 int error;
313 netmsg_init(&msg.base, so, &curthread->td_msgport,
314 0, so->so_proto->pr_usrreqs->pru_disconnect);
315 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
316 return (error);
319 void
320 so_pru_disconnect_direct(struct socket *so)
322 struct netmsg_pru_disconnect msg;
323 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_disconnect;
325 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
326 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
327 msg.base.lmsg.ms_flags |= MSGF_SYNC;
328 func((netmsg_t)&msg);
329 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
333 so_pru_listen(struct socket *so, struct thread *td)
335 struct netmsg_pru_listen msg;
336 int error;
338 netmsg_init(&msg.base, so, &curthread->td_msgport,
339 0, so->so_proto->pr_usrreqs->pru_listen);
340 msg.nm_td = td; /* used only for prison_ip() XXX JH */
341 msg.nm_flags = 0;
342 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
343 return (error);
347 so_pru_peeraddr(struct socket *so, struct sockaddr **nam)
349 struct netmsg_pru_peeraddr msg;
350 int error;
352 netmsg_init(&msg.base, so, &curthread->td_msgport,
353 0, so->so_proto->pr_usrreqs->pru_peeraddr);
354 msg.nm_nam = nam;
355 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
356 return (error);
360 so_pru_rcvd(struct socket *so, int flags)
362 struct netmsg_pru_rcvd msg;
363 int error;
365 netmsg_init(&msg.base, so, &curthread->td_msgport,
366 0, so->so_proto->pr_usrreqs->pru_rcvd);
367 msg.nm_flags = flags;
368 msg.nm_pru_flags = 0;
369 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
370 return (error);
373 void
374 so_pru_rcvd_async(struct socket *so)
376 lwkt_msg_t lmsg = &so->so_rcvd_msg.base.lmsg;
378 KASSERT(so->so_proto->pr_flags & PR_ASYNC_RCVD,
379 ("async pru_rcvd is not supported"));
382 * WARNING! Spinlock is a bit dodgy, use hacked up sendmsg
383 * to avoid deadlocking.
385 spin_lock(&so->so_rcvd_spin);
386 if ((so->so_rcvd_msg.nm_pru_flags & PRUR_DEAD) == 0) {
387 if (lmsg->ms_flags & MSGF_DONE) {
388 lwkt_sendmsg_prepare(so->so_port, lmsg);
389 spin_unlock(&so->so_rcvd_spin);
390 lwkt_sendmsg_start(so->so_port, lmsg);
391 } else {
392 spin_unlock(&so->so_rcvd_spin);
394 } else {
395 spin_unlock(&so->so_rcvd_spin);
400 so_pru_rcvoob(struct socket *so, struct mbuf *m, int flags)
402 struct netmsg_pru_rcvoob msg;
403 int error;
405 netmsg_init(&msg.base, so, &curthread->td_msgport,
406 0, so->so_proto->pr_usrreqs->pru_rcvoob);
407 msg.nm_m = m;
408 msg.nm_flags = flags;
409 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
410 return (error);
414 * NOTE: If the target port changes the implied connect will deal with it.
417 so_pru_send(struct socket *so, int flags, struct mbuf *m,
418 struct sockaddr *addr, struct mbuf *control, struct thread *td)
420 struct netmsg_pru_send msg;
421 int error;
423 netmsg_init(&msg.base, so, &curthread->td_msgport,
424 0, so->so_proto->pr_usrreqs->pru_send);
425 msg.nm_flags = flags;
426 msg.nm_m = m;
427 msg.nm_addr = addr;
428 msg.nm_control = control;
429 msg.nm_td = td;
430 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
431 return (error);
434 void
435 so_pru_sync(struct socket *so)
437 struct netmsg_base msg;
439 netmsg_init(&msg, so, &curthread->td_msgport, 0,
440 netmsg_sync_handler);
441 lwkt_domsg(so->so_port, &msg.lmsg, 0);
444 void
445 so_pru_send_async(struct socket *so, int flags, struct mbuf *m,
446 struct sockaddr *addr0, struct mbuf *control, struct thread *td)
448 struct netmsg_pru_send *msg;
449 struct sockaddr *addr = NULL;
451 KASSERT(so->so_proto->pr_flags & PR_ASYNC_SEND,
452 ("async pru_send is not supported"));
454 if (addr0 != NULL) {
455 addr = kmalloc(addr0->sa_len, M_SONAME, M_WAITOK | M_NULLOK);
456 if (addr == NULL) {
458 * Fail to allocate address; fallback to
459 * synchronized pru_send.
461 so_pru_send(so, flags, m, addr0, control, td);
462 return;
464 memcpy(addr, addr0, addr0->sa_len);
465 flags |= PRUS_FREEADDR;
467 flags |= PRUS_NOREPLY;
469 if (td != NULL && (so->so_proto->pr_flags & PR_ASEND_HOLDTD)) {
470 lwkt_hold(td);
471 flags |= PRUS_HELDTD;
474 msg = &m->m_hdr.mh_sndmsg;
475 netmsg_init(&msg->base, so, &netisr_apanic_rport,
476 0, so->so_proto->pr_usrreqs->pru_send);
477 msg->nm_flags = flags;
478 msg->nm_m = m;
479 msg->nm_addr = addr;
480 msg->nm_control = control;
481 msg->nm_td = td;
482 lwkt_sendmsg(so->so_port, &msg->base.lmsg);
486 so_pru_sense(struct socket *so, struct stat *sb)
488 struct netmsg_pru_sense msg;
489 int error;
491 netmsg_init(&msg.base, so, &curthread->td_msgport,
492 0, so->so_proto->pr_usrreqs->pru_sense);
493 msg.nm_stat = sb;
494 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
495 return (error);
499 so_pru_shutdown(struct socket *so)
501 struct netmsg_pru_shutdown msg;
502 int error;
504 netmsg_init(&msg.base, so, &curthread->td_msgport,
505 0, so->so_proto->pr_usrreqs->pru_shutdown);
506 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
507 return (error);
511 so_pru_sockaddr(struct socket *so, struct sockaddr **nam)
513 struct netmsg_pru_sockaddr msg;
514 int error;
516 netmsg_init(&msg.base, so, &curthread->td_msgport,
517 0, so->so_proto->pr_usrreqs->pru_sockaddr);
518 msg.nm_nam = nam;
519 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
520 return (error);
524 so_pr_ctloutput(struct socket *so, struct sockopt *sopt)
526 struct netmsg_pr_ctloutput msg;
527 int error;
529 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
531 if (sopt->sopt_dir == SOPT_SET && so->so_proto->pr_ctloutmsg != NULL) {
532 struct netmsg_pr_ctloutput *amsg;
534 /* Fast path: asynchronous pr_ctloutput */
535 amsg = so->so_proto->pr_ctloutmsg(sopt);
536 if (amsg != NULL) {
537 netmsg_init(&amsg->base, so, &netisr_afree_rport, 0,
538 so->so_proto->pr_ctloutput);
539 /* nm_flags and nm_sopt are setup by pr_ctloutmsg */
540 lwkt_sendmsg(so->so_port, &amsg->base.lmsg);
541 return 0;
543 /* FALLTHROUGH */
546 netmsg_init(&msg.base, so, &curthread->td_msgport,
547 0, so->so_proto->pr_ctloutput);
548 msg.nm_flags = 0;
549 msg.nm_sopt = sopt;
550 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
551 return (error);
554 struct lwkt_port *
555 so_pr_ctlport(struct protosw *pr, int cmd, struct sockaddr *arg,
556 void *extra, int *cpuid)
558 if (pr->pr_ctlport == NULL)
559 return NULL;
560 KKASSERT(pr->pr_ctlinput != NULL);
562 return pr->pr_ctlport(cmd, arg, extra, cpuid);
566 * Protocol control input, typically via icmp.
568 * If the protocol pr_ctlport is not NULL we call it to figure out the
569 * protocol port. If NULL is returned we can just return, otherwise
570 * we issue a netmsg to call pr_ctlinput in the proper thread.
572 * This must be done synchronously as arg and/or extra may point to
573 * temporary data.
575 void
576 so_pr_ctlinput(struct protosw *pr, int cmd, struct sockaddr *arg, void *extra)
578 struct netmsg_pr_ctlinput msg;
579 lwkt_port_t port;
580 int cpuid;
582 port = so_pr_ctlport(pr, cmd, arg, extra, &cpuid);
583 if (port == NULL)
584 return;
585 netmsg_init(&msg.base, NULL, &curthread->td_msgport,
586 0, pr->pr_ctlinput);
587 msg.nm_cmd = cmd;
588 msg.nm_direct = 0;
589 msg.nm_arg = arg;
590 msg.nm_extra = extra;
591 lwkt_domsg(port, &msg.base.lmsg, 0);
594 void
595 so_pr_ctlinput_direct(struct protosw *pr, int cmd, struct sockaddr *arg,
596 void *extra)
598 struct netmsg_pr_ctlinput msg;
599 netisr_fn_t func;
600 lwkt_port_t port;
601 int cpuid;
603 port = so_pr_ctlport(pr, cmd, arg, extra, &cpuid);
604 if (port == NULL)
605 return;
606 if (cpuid != ncpus && cpuid != mycpuid)
607 return;
609 func = pr->pr_ctlinput;
610 netmsg_init(&msg.base, NULL, &netisr_adone_rport, 0, func);
611 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
612 msg.base.lmsg.ms_flags |= MSGF_SYNC;
613 msg.nm_cmd = cmd;
614 msg.nm_direct = 1;
615 msg.nm_arg = arg;
616 msg.nm_extra = extra;
617 func((netmsg_t)&msg);
618 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
622 * If we convert all the protosw pr_ functions for all the protocols
623 * to take a message directly, this layer can go away. For the moment
624 * our dispatcher ignores the return value, but since we are handling
625 * the replymsg ourselves we return EASYNC by convention.
629 * Handle a predicate event request. This function is only called once
630 * when the predicate message queueing request is received.
632 void
633 netmsg_so_notify(netmsg_t msg)
635 struct socket *so = msg->base.nm_so;
636 struct signalsockbuf *ssb;
638 ssb = (msg->notify.nm_etype & NM_REVENT) ? &so->so_rcv : &so->so_snd;
641 * Reply immediately if the event has occured, otherwise queue the
642 * request.
644 * NOTE: Socket can change if this is an accept predicate so cache
645 * the token.
647 lwkt_getpooltoken(so);
648 atomic_set_int(&ssb->ssb_flags, SSB_MEVENT);
649 if (msg->notify.nm_predicate(&msg->notify)) {
650 if (TAILQ_EMPTY(&ssb->ssb_mlist))
651 atomic_clear_int(&ssb->ssb_flags, SSB_MEVENT);
652 lwkt_relpooltoken(so);
653 lwkt_replymsg(&msg->base.lmsg,
654 msg->base.lmsg.ms_error);
655 } else {
656 TAILQ_INSERT_TAIL(&ssb->ssb_mlist, &msg->notify, nm_list);
658 * NOTE:
659 * If predict ever blocks, 'tok' will be released, so
660 * SSB_MEVENT set beforehand could have been cleared
661 * when we reach here. In case that happens, we set
662 * SSB_MEVENT again, after the notify has been queued.
664 atomic_set_int(&ssb->ssb_flags, SSB_MEVENT);
665 lwkt_relpooltoken(so);
670 * Called by doio when trying to abort a netmsg_so_notify message.
671 * Unlike the other functions this one is dispatched directly by
672 * the LWKT subsystem, so it takes a lwkt_msg_t as an argument.
674 * The original message, lmsg, is under the control of the caller and
675 * will not be destroyed until we return so we can safely reference it
676 * in our synchronous abort request.
678 * This part of the abort request occurs on the originating cpu which
679 * means we may race the message flags and the original message may
680 * not even have been processed by the target cpu yet.
682 void
683 netmsg_so_notify_doabort(lwkt_msg_t lmsg)
685 struct netmsg_so_notify_abort msg;
687 if ((lmsg->ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) {
688 const struct netmsg_base *nmsg =
689 (const struct netmsg_base *)lmsg;
691 netmsg_init(&msg.base, nmsg->nm_so, &curthread->td_msgport,
692 0, netmsg_so_notify_abort);
693 msg.nm_notifymsg = (void *)lmsg;
694 lwkt_domsg(lmsg->ms_target_port, &msg.base.lmsg, 0);
699 * Predicate requests can be aborted. This function is only called once
700 * and will interlock against processing/reply races (since such races
701 * occur on the same thread that controls the port where the abort is
702 * requeued).
704 * This part of the abort request occurs on the target cpu. The message
705 * flags must be tested again in case the test that we did on the
706 * originating cpu raced. Since messages are handled in sequence, the
707 * original message will have already been handled by the loop and either
708 * replied to or queued.
710 * We really only need to interlock with MSGF_REPLY (a bit that is set on
711 * our cpu when we reply). Note that MSGF_DONE is not set until the
712 * reply reaches the originating cpu. Test both bits anyway.
714 void
715 netmsg_so_notify_abort(netmsg_t msg)
717 struct netmsg_so_notify_abort *abrtmsg = &msg->notify_abort;
718 struct netmsg_so_notify *nmsg = abrtmsg->nm_notifymsg;
719 struct signalsockbuf *ssb;
722 * The original notify message is not destroyed until after the
723 * abort request is returned, so we can check its state.
725 lwkt_getpooltoken(nmsg->base.nm_so);
726 if ((nmsg->base.lmsg.ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) {
727 ssb = (nmsg->nm_etype & NM_REVENT) ?
728 &nmsg->base.nm_so->so_rcv :
729 &nmsg->base.nm_so->so_snd;
730 TAILQ_REMOVE(&ssb->ssb_mlist, nmsg, nm_list);
731 lwkt_relpooltoken(nmsg->base.nm_so);
732 lwkt_replymsg(&nmsg->base.lmsg, EINTR);
733 } else {
734 lwkt_relpooltoken(nmsg->base.nm_so);
738 * Reply to the abort message
740 lwkt_replymsg(&abrtmsg->base.lmsg, 0);
743 void
744 so_async_rcvd_reply(struct socket *so)
747 * Spinlock safe, reply runs to degenerate lwkt_null_replyport()
749 spin_lock(&so->so_rcvd_spin);
750 lwkt_replymsg(&so->so_rcvd_msg.base.lmsg, 0);
751 spin_unlock(&so->so_rcvd_spin);
754 void
755 so_async_rcvd_drop(struct socket *so)
757 lwkt_msg_t lmsg = &so->so_rcvd_msg.base.lmsg;
760 * Spinlock safe, drop runs to degenerate lwkt_spin_dropmsg()
762 spin_lock(&so->so_rcvd_spin);
763 so->so_rcvd_msg.nm_pru_flags |= PRUR_DEAD;
764 again:
765 lwkt_dropmsg(lmsg);
766 if ((lmsg->ms_flags & MSGF_DONE) == 0) {
767 ++async_rcvd_drop_race;
768 ssleep(so, &so->so_rcvd_spin, 0, "soadrop", 1);
769 goto again;
771 spin_unlock(&so->so_rcvd_spin);