libpam: Play some 4D chess for static pam modules support.
[dragonfly.git] / sys / kern / uipc_msg.c
blobebeed6b6619e1f09dbee2650d7ae81e869f362af
1 /*
2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
4 *
5 * This code is derived from software contributed to The DragonFly Project
6 * by Jeffrey M. Hsu.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/msgport.h>
38 #include <sys/protosw.h>
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <sys/socketops.h>
42 #include <sys/thread.h>
43 #include <sys/thread2.h>
44 #include <sys/msgport2.h>
45 #include <sys/spinlock2.h>
46 #include <sys/sysctl.h>
47 #include <sys/mbuf.h>
48 #include <vm/pmap.h>
50 #include <net/netmsg2.h>
51 #include <net/netisr2.h>
52 #include <sys/socketvar2.h>
54 #include <net/netisr.h>
55 #include <net/netmsg.h>
57 static int async_rcvd_drop_race = 0;
58 SYSCTL_INT(_kern_ipc, OID_AUTO, async_rcvd_drop_race, CTLFLAG_RW,
59 &async_rcvd_drop_race, 0, "# of asynchronized pru_rcvd msg drop races");
62 * Abort a socket and free it, asynchronously. Called from
63 * soabort_async() only. soabort_async() got a ref on the
64 * socket which we must free on reply.
66 void
67 so_pru_abort_async(struct socket *so)
69 struct netmsg_pru_abort *msg;
71 msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK | M_ZERO);
72 netmsg_init(&msg->base, so, &netisr_afree_free_so_rport,
73 0, so->so_proto->pr_usrreqs->pru_abort);
74 lwkt_sendmsg(so->so_port, &msg->base.lmsg);
78 * Abort a socket and free it. Called from soabort_direct() only.
79 * Caller must make sure that the current CPU is inpcb's owner CPU.
80 * soabort_direct() got a ref on the socket which we must free.
82 void
83 so_pru_abort_direct(struct socket *so)
85 struct netmsg_pru_abort msg;
86 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_abort;
88 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
89 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
90 msg.base.lmsg.ms_flags |= MSGF_SYNC;
91 func((netmsg_t)&msg);
92 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
93 sofree(msg.base.nm_so);
96 int
97 so_pru_accept(struct socket *so, struct sockaddr **nam)
99 struct netmsg_pru_accept msg;
101 netmsg_init(&msg.base, so, &curthread->td_msgport,
102 0, so->so_proto->pr_usrreqs->pru_accept);
103 msg.nm_nam = nam;
105 return lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
109 so_pru_attach(struct socket *so, int proto, struct pru_attach_info *ai)
111 struct netmsg_pru_attach msg;
112 int error;
114 netmsg_init(&msg.base, so, &curthread->td_msgport,
115 0, so->so_proto->pr_usrreqs->pru_attach);
116 msg.nm_proto = proto;
117 msg.nm_ai = ai;
118 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
119 return (error);
123 so_pru_attach_direct(struct socket *so, int proto, struct pru_attach_info *ai)
125 struct netmsg_pru_attach msg;
126 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_attach;
128 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
129 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
130 msg.base.lmsg.ms_flags |= MSGF_SYNC;
131 msg.nm_proto = proto;
132 msg.nm_ai = ai;
133 func((netmsg_t)&msg);
134 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
135 return(msg.base.lmsg.ms_error);
139 so_pru_attach_fast(struct socket *so, int proto, struct pru_attach_info *ai)
141 struct netmsg_pru_attach *msg;
142 int error;
144 error = so->so_proto->pr_usrreqs->pru_preattach(so, proto, ai);
145 if (error)
146 return error;
148 msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK | M_NULLOK);
149 if (msg == NULL) {
151 * Fail to allocate message; fallback to
152 * synchronized pru_attach.
154 return so_pru_attach(so, proto, NULL /* postattach */);
157 netmsg_init(&msg->base, so, &netisr_afree_rport, 0,
158 so->so_proto->pr_usrreqs->pru_attach);
159 msg->nm_proto = proto;
160 msg->nm_ai = NULL; /* postattach */
161 if (so->so_port == netisr_curport())
162 lwkt_sendmsg_oncpu(so->so_port, &msg->base.lmsg);
163 else
164 lwkt_sendmsg(so->so_port, &msg->base.lmsg);
166 return 0;
170 * NOTE: If the target port changes the bind operation will deal with it.
173 so_pru_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
175 struct netmsg_pru_bind msg;
176 int error;
178 netmsg_init(&msg.base, so, &curthread->td_msgport,
179 0, so->so_proto->pr_usrreqs->pru_bind);
180 msg.nm_nam = nam;
181 msg.nm_td = td; /* used only for prison_ip() */
182 msg.nm_flags = 0;
183 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
184 return (error);
188 so_pru_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
190 struct netmsg_pru_connect msg;
191 int error;
193 netmsg_init(&msg.base, so, &curthread->td_msgport,
194 0, so->so_proto->pr_usrreqs->pru_connect);
195 msg.nm_nam = nam;
196 msg.nm_td = td;
197 msg.nm_m = NULL;
198 msg.nm_sndflags = 0;
199 msg.nm_flags = 0;
200 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
201 return (error);
205 so_pru_connect_async(struct socket *so, struct sockaddr *nam, struct thread *td)
207 struct netmsg_pru_connect *msg;
208 int error, flags;
210 KASSERT(so->so_proto->pr_usrreqs->pru_preconnect != NULL,
211 ("async pru_connect is not supported"));
213 /* NOTE: sockaddr immediately follows netmsg */
214 msg = kmalloc(sizeof(*msg) + nam->sa_len, M_LWKTMSG,
215 M_WAITOK | M_NULLOK);
216 if (msg == NULL) {
218 * Fail to allocate message; fallback to
219 * synchronized pru_connect.
221 return so_pru_connect(so, nam, td);
224 error = so->so_proto->pr_usrreqs->pru_preconnect(so, nam, td);
225 if (error) {
226 kfree(msg, M_LWKTMSG);
227 return error;
230 flags = PRUC_ASYNC;
231 if (td != NULL && (so->so_proto->pr_flags & PR_ACONN_HOLDTD)) {
232 lwkt_hold(td);
233 flags |= PRUC_HELDTD;
236 netmsg_init(&msg->base, so, &netisr_afree_rport, 0,
237 so->so_proto->pr_usrreqs->pru_connect);
238 msg->nm_nam = (struct sockaddr *)(msg + 1);
239 memcpy(msg->nm_nam, nam, nam->sa_len);
240 msg->nm_td = td;
241 msg->nm_m = NULL;
242 msg->nm_sndflags = 0;
243 msg->nm_flags = flags;
244 if (so->so_port == netisr_curport())
245 lwkt_sendmsg_oncpu(so->so_port, &msg->base.lmsg);
246 else
247 lwkt_sendmsg(so->so_port, &msg->base.lmsg);
248 return 0;
252 so_pru_connect2(struct socket *so1, struct socket *so2)
254 struct netmsg_pru_connect2 msg;
255 int error;
257 netmsg_init(&msg.base, so1, &curthread->td_msgport,
258 0, so1->so_proto->pr_usrreqs->pru_connect2);
259 msg.nm_so1 = so1;
260 msg.nm_so2 = so2;
261 error = lwkt_domsg(so1->so_port, &msg.base.lmsg, 0);
262 return (error);
266 * WARNING! Synchronous call from user context. Control function may do
267 * copyin/copyout.
270 so_pru_control_direct(struct socket *so, u_long cmd, caddr_t data,
271 struct ifnet *ifp)
273 struct netmsg_pru_control msg;
274 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_control;
276 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
277 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
278 msg.base.lmsg.ms_flags |= MSGF_SYNC;
279 msg.nm_cmd = cmd;
280 msg.nm_data = data;
281 msg.nm_ifp = ifp;
282 msg.nm_td = curthread;
283 func((netmsg_t)&msg);
284 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
285 return(msg.base.lmsg.ms_error);
289 so_pru_detach(struct socket *so)
291 struct netmsg_pru_detach msg;
292 int error;
294 netmsg_init(&msg.base, so, &curthread->td_msgport,
295 0, so->so_proto->pr_usrreqs->pru_detach);
296 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
297 return (error);
301 so_pru_detach_direct(struct socket *so)
303 struct netmsg_pru_detach msg;
304 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_detach;
306 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
307 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
308 msg.base.lmsg.ms_flags |= MSGF_SYNC;
309 func((netmsg_t)&msg);
310 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
311 return(msg.base.lmsg.ms_error);
315 so_pru_disconnect(struct socket *so)
317 struct netmsg_pru_disconnect msg;
318 int error;
320 netmsg_init(&msg.base, so, &curthread->td_msgport,
321 0, so->so_proto->pr_usrreqs->pru_disconnect);
322 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
323 return (error);
326 void
327 so_pru_disconnect_direct(struct socket *so)
329 struct netmsg_pru_disconnect msg;
330 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_disconnect;
332 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
333 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
334 msg.base.lmsg.ms_flags |= MSGF_SYNC;
335 func((netmsg_t)&msg);
336 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
340 so_pru_listen(struct socket *so, struct thread *td)
342 struct netmsg_pru_listen msg;
343 int error;
345 netmsg_init(&msg.base, so, &curthread->td_msgport,
346 0, so->so_proto->pr_usrreqs->pru_listen);
347 msg.nm_td = td; /* used only for prison_ip() XXX JH */
348 msg.nm_flags = 0;
349 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
350 return (error);
354 so_pru_peeraddr(struct socket *so, struct sockaddr **nam)
356 struct netmsg_pru_peeraddr msg;
357 int error;
359 netmsg_init(&msg.base, so, &curthread->td_msgport,
360 0, so->so_proto->pr_usrreqs->pru_peeraddr);
361 msg.nm_nam = nam;
362 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
363 return (error);
367 so_pru_rcvd(struct socket *so, int flags)
369 struct netmsg_pru_rcvd msg;
370 int error;
372 netmsg_init(&msg.base, so, &curthread->td_msgport,
373 0, so->so_proto->pr_usrreqs->pru_rcvd);
374 msg.nm_flags = flags;
375 msg.nm_pru_flags = 0;
376 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
377 return (error);
380 void
381 so_pru_rcvd_async(struct socket *so)
383 lwkt_msg_t lmsg = &so->so_rcvd_msg.base.lmsg;
385 KASSERT(so->so_proto->pr_flags & PR_ASYNC_RCVD,
386 ("async pru_rcvd is not supported"));
389 * WARNING! Spinlock is a bit dodgy, use hacked up sendmsg
390 * to avoid deadlocking.
392 spin_lock(&so->so_rcvd_spin);
393 if ((so->so_rcvd_msg.nm_pru_flags & PRUR_DEAD) == 0) {
394 if (lmsg->ms_flags & MSGF_DONE) {
395 lwkt_sendmsg_prepare(so->so_port, lmsg);
396 spin_unlock(&so->so_rcvd_spin);
397 if (so->so_port == netisr_curport())
398 lwkt_sendmsg_start_oncpu(so->so_port, lmsg);
399 else
400 lwkt_sendmsg_start(so->so_port, lmsg);
401 } else {
402 spin_unlock(&so->so_rcvd_spin);
404 } else {
405 spin_unlock(&so->so_rcvd_spin);
410 so_pru_rcvoob(struct socket *so, struct mbuf *m, int flags)
412 struct netmsg_pru_rcvoob msg;
413 int error;
415 netmsg_init(&msg.base, so, &curthread->td_msgport,
416 0, so->so_proto->pr_usrreqs->pru_rcvoob);
417 msg.nm_m = m;
418 msg.nm_flags = flags;
419 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
420 return (error);
424 * NOTE: If the target port changes the implied connect will deal with it.
427 so_pru_send(struct socket *so, int flags, struct mbuf *m,
428 struct sockaddr *addr, struct mbuf *control, struct thread *td)
430 struct netmsg_pru_send msg;
431 int error;
433 netmsg_init(&msg.base, so, &curthread->td_msgport,
434 0, so->so_proto->pr_usrreqs->pru_send);
435 msg.nm_flags = flags;
436 msg.nm_m = m;
437 msg.nm_addr = addr;
438 msg.nm_control = control;
439 msg.nm_td = td;
440 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
441 return (error);
444 void
445 so_pru_sync(struct socket *so)
447 struct netmsg_base msg;
449 netmsg_init(&msg, so, &curthread->td_msgport, 0,
450 netmsg_sync_handler);
451 lwkt_domsg(so->so_port, &msg.lmsg, 0);
454 void
455 so_pru_send_async(struct socket *so, int flags, struct mbuf *m,
456 struct sockaddr *addr0, struct mbuf *control, struct thread *td)
458 struct netmsg_pru_send *msg;
459 struct sockaddr *addr = NULL;
461 KASSERT(so->so_proto->pr_flags & PR_ASYNC_SEND,
462 ("async pru_send is not supported"));
464 if (addr0 != NULL) {
465 addr = kmalloc(addr0->sa_len, M_SONAME, M_WAITOK | M_NULLOK);
466 if (addr == NULL) {
468 * Fail to allocate address; fallback to
469 * synchronized pru_send.
471 so_pru_send(so, flags, m, addr0, control, td);
472 return;
474 memcpy(addr, addr0, addr0->sa_len);
475 flags |= PRUS_FREEADDR;
477 flags |= PRUS_NOREPLY;
479 if (td != NULL && (so->so_proto->pr_flags & PR_ASEND_HOLDTD)) {
480 lwkt_hold(td);
481 flags |= PRUS_HELDTD;
484 msg = &m->m_hdr.mh_sndmsg;
485 netmsg_init(&msg->base, so, &netisr_apanic_rport,
486 0, so->so_proto->pr_usrreqs->pru_send);
487 msg->nm_flags = flags;
488 msg->nm_m = m;
489 msg->nm_addr = addr;
490 msg->nm_control = control;
491 msg->nm_td = td;
492 if (so->so_port == netisr_curport())
493 lwkt_sendmsg_oncpu(so->so_port, &msg->base.lmsg);
494 else
495 lwkt_sendmsg(so->so_port, &msg->base.lmsg);
499 so_pru_sense(struct socket *so, struct stat *sb)
501 struct netmsg_pru_sense msg;
502 int error;
504 netmsg_init(&msg.base, so, &curthread->td_msgport,
505 0, so->so_proto->pr_usrreqs->pru_sense);
506 msg.nm_stat = sb;
507 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
508 return (error);
512 so_pru_shutdown(struct socket *so)
514 struct netmsg_pru_shutdown msg;
515 int error;
517 netmsg_init(&msg.base, so, &curthread->td_msgport,
518 0, so->so_proto->pr_usrreqs->pru_shutdown);
519 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
520 return (error);
524 so_pru_sockaddr(struct socket *so, struct sockaddr **nam)
526 struct netmsg_pru_sockaddr msg;
527 int error;
529 netmsg_init(&msg.base, so, &curthread->td_msgport,
530 0, so->so_proto->pr_usrreqs->pru_sockaddr);
531 msg.nm_nam = nam;
532 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
533 return (error);
537 so_pr_ctloutput(struct socket *so, struct sockopt *sopt)
539 struct netmsg_pr_ctloutput msg;
540 int error;
542 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
544 if (sopt->sopt_dir == SOPT_SET && so->so_proto->pr_ctloutmsg != NULL) {
545 struct netmsg_pr_ctloutput *amsg;
547 /* Fast path: asynchronous pr_ctloutput */
548 amsg = so->so_proto->pr_ctloutmsg(sopt);
549 if (amsg != NULL) {
550 netmsg_init(&amsg->base, so, &netisr_afree_rport, 0,
551 so->so_proto->pr_ctloutput);
552 /* nm_flags and nm_sopt are setup by pr_ctloutmsg */
553 if (so->so_port == netisr_curport()) {
554 lwkt_sendmsg_oncpu(so->so_port,
555 &amsg->base.lmsg);
556 } else {
557 lwkt_sendmsg(so->so_port, &amsg->base.lmsg);
559 return 0;
561 /* FALLTHROUGH */
564 netmsg_init(&msg.base, so, &curthread->td_msgport,
565 0, so->so_proto->pr_ctloutput);
566 msg.nm_flags = 0;
567 msg.nm_sopt = sopt;
568 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
569 return (error);
572 struct lwkt_port *
573 so_pr_ctlport(struct protosw *pr, int cmd, struct sockaddr *arg,
574 void *extra, int *cpuid)
576 if (pr->pr_ctlport == NULL)
577 return NULL;
578 KKASSERT(pr->pr_ctlinput != NULL);
580 return pr->pr_ctlport(cmd, arg, extra, cpuid);
584 * Protocol control input, typically via icmp.
586 * If the protocol pr_ctlport is not NULL we call it to figure out the
587 * protocol port. If NULL is returned we can just return, otherwise
588 * we issue a netmsg to call pr_ctlinput in the proper thread.
590 * This must be done synchronously as arg and/or extra may point to
591 * temporary data.
593 void
594 so_pr_ctlinput(struct protosw *pr, int cmd, struct sockaddr *arg, void *extra)
596 struct netmsg_pr_ctlinput msg;
597 lwkt_port_t port;
598 int cpuid;
600 port = so_pr_ctlport(pr, cmd, arg, extra, &cpuid);
601 if (port == NULL)
602 return;
603 netmsg_init(&msg.base, NULL, &curthread->td_msgport,
604 0, pr->pr_ctlinput);
605 msg.nm_cmd = cmd;
606 msg.nm_direct = 0;
607 msg.nm_arg = arg;
608 msg.nm_extra = extra;
609 lwkt_domsg(port, &msg.base.lmsg, 0);
612 void
613 so_pr_ctlinput_direct(struct protosw *pr, int cmd, struct sockaddr *arg,
614 void *extra)
616 struct netmsg_pr_ctlinput msg;
617 netisr_fn_t func;
618 lwkt_port_t port;
619 int cpuid;
621 port = so_pr_ctlport(pr, cmd, arg, extra, &cpuid);
622 if (port == NULL)
623 return;
624 if (cpuid != netisr_ncpus && cpuid != mycpuid)
625 return;
627 func = pr->pr_ctlinput;
628 netmsg_init(&msg.base, NULL, &netisr_adone_rport, 0, func);
629 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
630 msg.base.lmsg.ms_flags |= MSGF_SYNC;
631 msg.nm_cmd = cmd;
632 msg.nm_direct = 1;
633 msg.nm_arg = arg;
634 msg.nm_extra = extra;
635 func((netmsg_t)&msg);
636 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
640 * If we convert all the protosw pr_ functions for all the protocols
641 * to take a message directly, this layer can go away. For the moment
642 * our dispatcher ignores the return value, but since we are handling
643 * the replymsg ourselves we return EASYNC by convention.
647 * Handle a predicate event request. This function is only called once
648 * when the predicate message queueing request is received.
650 void
651 netmsg_so_notify(netmsg_t msg)
653 struct socket *so = msg->base.nm_so;
654 struct signalsockbuf *ssb;
656 ssb = (msg->notify.nm_etype & NM_REVENT) ? &so->so_rcv : &so->so_snd;
659 * Reply immediately if the event has occured, otherwise queue the
660 * request.
662 * NOTE: Socket can change if this is an accept predicate so cache
663 * the token.
665 lwkt_getpooltoken(so);
666 atomic_set_int(&ssb->ssb_flags, SSB_MEVENT);
667 if (msg->notify.nm_predicate(&msg->notify)) {
668 if (TAILQ_EMPTY(&ssb->ssb_mlist))
669 atomic_clear_int(&ssb->ssb_flags, SSB_MEVENT);
670 lwkt_relpooltoken(so);
671 lwkt_replymsg(&msg->base.lmsg,
672 msg->base.lmsg.ms_error);
673 } else {
674 TAILQ_INSERT_TAIL(&ssb->ssb_mlist, &msg->notify, nm_list);
676 * NOTE:
677 * If predict ever blocks, 'tok' will be released, so
678 * SSB_MEVENT set beforehand could have been cleared
679 * when we reach here. In case that happens, we set
680 * SSB_MEVENT again, after the notify has been queued.
682 atomic_set_int(&ssb->ssb_flags, SSB_MEVENT);
683 lwkt_relpooltoken(so);
688 * Called by doio when trying to abort a netmsg_so_notify message.
689 * Unlike the other functions this one is dispatched directly by
690 * the LWKT subsystem, so it takes a lwkt_msg_t as an argument.
692 * The original message, lmsg, is under the control of the caller and
693 * will not be destroyed until we return so we can safely reference it
694 * in our synchronous abort request.
696 * This part of the abort request occurs on the originating cpu which
697 * means we may race the message flags and the original message may
698 * not even have been processed by the target cpu yet.
700 void
701 netmsg_so_notify_doabort(lwkt_msg_t lmsg)
703 struct netmsg_so_notify_abort msg;
705 if ((lmsg->ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) {
706 const struct netmsg_base *nmsg =
707 (const struct netmsg_base *)lmsg;
709 netmsg_init(&msg.base, nmsg->nm_so, &curthread->td_msgport,
710 0, netmsg_so_notify_abort);
711 msg.nm_notifymsg = (void *)lmsg;
712 lwkt_domsg(lmsg->ms_target_port, &msg.base.lmsg, 0);
717 * Predicate requests can be aborted. This function is only called once
718 * and will interlock against processing/reply races (since such races
719 * occur on the same thread that controls the port where the abort is
720 * requeued).
722 * This part of the abort request occurs on the target cpu. The message
723 * flags must be tested again in case the test that we did on the
724 * originating cpu raced. Since messages are handled in sequence, the
725 * original message will have already been handled by the loop and either
726 * replied to or queued.
728 * We really only need to interlock with MSGF_REPLY (a bit that is set on
729 * our cpu when we reply). Note that MSGF_DONE is not set until the
730 * reply reaches the originating cpu. Test both bits anyway.
732 void
733 netmsg_so_notify_abort(netmsg_t msg)
735 struct netmsg_so_notify_abort *abrtmsg = &msg->notify_abort;
736 struct netmsg_so_notify *nmsg = abrtmsg->nm_notifymsg;
737 struct signalsockbuf *ssb;
740 * The original notify message is not destroyed until after the
741 * abort request is returned, so we can check its state.
743 lwkt_getpooltoken(nmsg->base.nm_so);
744 if ((nmsg->base.lmsg.ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) {
745 ssb = (nmsg->nm_etype & NM_REVENT) ?
746 &nmsg->base.nm_so->so_rcv :
747 &nmsg->base.nm_so->so_snd;
748 TAILQ_REMOVE(&ssb->ssb_mlist, nmsg, nm_list);
749 lwkt_relpooltoken(nmsg->base.nm_so);
750 lwkt_replymsg(&nmsg->base.lmsg, EINTR);
751 } else {
752 lwkt_relpooltoken(nmsg->base.nm_so);
756 * Reply to the abort message
758 lwkt_replymsg(&abrtmsg->base.lmsg, 0);
761 void
762 so_async_rcvd_reply(struct socket *so)
765 * Spinlock safe, reply runs to degenerate lwkt_null_replyport()
767 spin_lock(&so->so_rcvd_spin);
768 lwkt_replymsg(&so->so_rcvd_msg.base.lmsg, 0);
769 spin_unlock(&so->so_rcvd_spin);
772 void
773 so_async_rcvd_drop(struct socket *so)
775 lwkt_msg_t lmsg = &so->so_rcvd_msg.base.lmsg;
778 * Spinlock safe, drop runs to degenerate lwkt_spin_dropmsg()
780 spin_lock(&so->so_rcvd_spin);
781 so->so_rcvd_msg.nm_pru_flags |= PRUR_DEAD;
782 again:
783 lwkt_dropmsg(lmsg);
784 if ((lmsg->ms_flags & MSGF_DONE) == 0) {
785 ++async_rcvd_drop_race;
786 ssleep(so, &so->so_rcvd_spin, 0, "soadrop", 1);
787 goto again;
789 spin_unlock(&so->so_rcvd_spin);