2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/msgport.h>
38 #include <sys/protosw.h>
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <sys/socketops.h>
42 #include <sys/thread.h>
43 #include <sys/thread2.h>
44 #include <sys/msgport2.h>
45 #include <sys/spinlock2.h>
46 #include <sys/sysctl.h>
50 #include <net/netmsg2.h>
51 #include <net/netisr2.h>
52 #include <sys/socketvar2.h>
54 #include <net/netisr.h>
55 #include <net/netmsg.h>
57 static int async_rcvd_drop_race
= 0;
58 SYSCTL_INT(_kern_ipc
, OID_AUTO
, async_rcvd_drop_race
, CTLFLAG_RW
,
59 &async_rcvd_drop_race
, 0, "# of asynchronized pru_rcvd msg drop races");
62 * Abort a socket and free it, asynchronously. Called from
63 * soabort_async() only. soabort_async() got a ref on the
64 * socket which we must free on reply.
67 so_pru_abort_async(struct socket
*so
)
69 struct netmsg_pru_abort
*msg
;
71 msg
= kmalloc(sizeof(*msg
), M_LWKTMSG
, M_WAITOK
| M_ZERO
);
72 netmsg_init(&msg
->base
, so
, &netisr_afree_free_so_rport
,
73 0, so
->so_proto
->pr_usrreqs
->pru_abort
);
74 lwkt_sendmsg(so
->so_port
, &msg
->base
.lmsg
);
78 * Abort a socket and free it. Called from soabort_direct() only.
79 * Caller must make sure that the current CPU is inpcb's owner CPU.
80 * soabort_direct() got a ref on the socket which we must free.
83 so_pru_abort_direct(struct socket
*so
)
85 struct netmsg_pru_abort msg
;
86 netisr_fn_t func
= so
->so_proto
->pr_usrreqs
->pru_abort
;
88 netmsg_init(&msg
.base
, so
, &netisr_adone_rport
, 0, func
);
89 msg
.base
.lmsg
.ms_flags
&= ~(MSGF_REPLY
| MSGF_DONE
);
90 msg
.base
.lmsg
.ms_flags
|= MSGF_SYNC
;
92 KKASSERT(msg
.base
.lmsg
.ms_flags
& MSGF_DONE
);
93 sofree(msg
.base
.nm_so
);
97 so_pru_accept(struct socket
*so
, struct sockaddr
**nam
)
99 struct netmsg_pru_accept msg
;
101 netmsg_init(&msg
.base
, so
, &curthread
->td_msgport
,
102 0, so
->so_proto
->pr_usrreqs
->pru_accept
);
105 return lwkt_domsg(so
->so_port
, &msg
.base
.lmsg
, 0);
109 so_pru_attach(struct socket
*so
, int proto
, struct pru_attach_info
*ai
)
111 struct netmsg_pru_attach msg
;
114 netmsg_init(&msg
.base
, so
, &curthread
->td_msgport
,
115 0, so
->so_proto
->pr_usrreqs
->pru_attach
);
116 msg
.nm_proto
= proto
;
118 error
= lwkt_domsg(so
->so_port
, &msg
.base
.lmsg
, 0);
123 so_pru_attach_direct(struct socket
*so
, int proto
, struct pru_attach_info
*ai
)
125 struct netmsg_pru_attach msg
;
126 netisr_fn_t func
= so
->so_proto
->pr_usrreqs
->pru_attach
;
128 netmsg_init(&msg
.base
, so
, &netisr_adone_rport
, 0, func
);
129 msg
.base
.lmsg
.ms_flags
&= ~(MSGF_REPLY
| MSGF_DONE
);
130 msg
.base
.lmsg
.ms_flags
|= MSGF_SYNC
;
131 msg
.nm_proto
= proto
;
133 func((netmsg_t
)&msg
);
134 KKASSERT(msg
.base
.lmsg
.ms_flags
& MSGF_DONE
);
135 return(msg
.base
.lmsg
.ms_error
);
139 so_pru_attach_fast(struct socket
*so
, int proto
, struct pru_attach_info
*ai
)
141 struct netmsg_pru_attach
*msg
;
144 error
= so
->so_proto
->pr_usrreqs
->pru_preattach(so
, proto
, ai
);
148 msg
= kmalloc(sizeof(*msg
), M_LWKTMSG
, M_WAITOK
| M_NULLOK
);
151 * Fail to allocate message; fallback to
152 * synchronized pru_attach.
154 return so_pru_attach(so
, proto
, NULL
/* postattach */);
157 netmsg_init(&msg
->base
, so
, &netisr_afree_rport
, 0,
158 so
->so_proto
->pr_usrreqs
->pru_attach
);
159 msg
->nm_proto
= proto
;
160 msg
->nm_ai
= NULL
; /* postattach */
161 if (so
->so_port
== netisr_curport())
162 lwkt_sendmsg_oncpu(so
->so_port
, &msg
->base
.lmsg
);
164 lwkt_sendmsg(so
->so_port
, &msg
->base
.lmsg
);
170 * NOTE: If the target port changes the bind operation will deal with it.
173 so_pru_bind(struct socket
*so
, struct sockaddr
*nam
, struct thread
*td
)
175 struct netmsg_pru_bind msg
;
178 netmsg_init(&msg
.base
, so
, &curthread
->td_msgport
,
179 0, so
->so_proto
->pr_usrreqs
->pru_bind
);
181 msg
.nm_td
= td
; /* used only for prison_ip() */
183 error
= lwkt_domsg(so
->so_port
, &msg
.base
.lmsg
, 0);
188 so_pru_connect(struct socket
*so
, struct sockaddr
*nam
, struct thread
*td
)
190 struct netmsg_pru_connect msg
;
193 netmsg_init(&msg
.base
, so
, &curthread
->td_msgport
,
194 0, so
->so_proto
->pr_usrreqs
->pru_connect
);
200 error
= lwkt_domsg(so
->so_port
, &msg
.base
.lmsg
, 0);
205 so_pru_connect_async(struct socket
*so
, struct sockaddr
*nam
, struct thread
*td
)
207 struct netmsg_pru_connect
*msg
;
210 KASSERT(so
->so_proto
->pr_usrreqs
->pru_preconnect
!= NULL
,
211 ("async pru_connect is not supported"));
213 /* NOTE: sockaddr immediately follows netmsg */
214 msg
= kmalloc(sizeof(*msg
) + nam
->sa_len
, M_LWKTMSG
,
215 M_WAITOK
| M_NULLOK
);
218 * Fail to allocate message; fallback to
219 * synchronized pru_connect.
221 return so_pru_connect(so
, nam
, td
);
224 error
= so
->so_proto
->pr_usrreqs
->pru_preconnect(so
, nam
, td
);
226 kfree(msg
, M_LWKTMSG
);
231 if (td
!= NULL
&& (so
->so_proto
->pr_flags
& PR_ACONN_HOLDTD
)) {
233 flags
|= PRUC_HELDTD
;
236 netmsg_init(&msg
->base
, so
, &netisr_afree_rport
, 0,
237 so
->so_proto
->pr_usrreqs
->pru_connect
);
238 msg
->nm_nam
= (struct sockaddr
*)(msg
+ 1);
239 memcpy(msg
->nm_nam
, nam
, nam
->sa_len
);
242 msg
->nm_sndflags
= 0;
243 msg
->nm_flags
= flags
;
244 if (so
->so_port
== netisr_curport())
245 lwkt_sendmsg_oncpu(so
->so_port
, &msg
->base
.lmsg
);
247 lwkt_sendmsg(so
->so_port
, &msg
->base
.lmsg
);
252 so_pru_connect2(struct socket
*so1
, struct socket
*so2
)
254 struct netmsg_pru_connect2 msg
;
257 netmsg_init(&msg
.base
, so1
, &curthread
->td_msgport
,
258 0, so1
->so_proto
->pr_usrreqs
->pru_connect2
);
261 error
= lwkt_domsg(so1
->so_port
, &msg
.base
.lmsg
, 0);
266 * WARNING! Synchronous call from user context. Control function may do
270 so_pru_control_direct(struct socket
*so
, u_long cmd
, caddr_t data
,
273 struct netmsg_pru_control msg
;
274 netisr_fn_t func
= so
->so_proto
->pr_usrreqs
->pru_control
;
276 netmsg_init(&msg
.base
, so
, &netisr_adone_rport
, 0, func
);
277 msg
.base
.lmsg
.ms_flags
&= ~(MSGF_REPLY
| MSGF_DONE
);
278 msg
.base
.lmsg
.ms_flags
|= MSGF_SYNC
;
282 msg
.nm_td
= curthread
;
283 func((netmsg_t
)&msg
);
284 KKASSERT(msg
.base
.lmsg
.ms_flags
& MSGF_DONE
);
285 return(msg
.base
.lmsg
.ms_error
);
289 so_pru_detach(struct socket
*so
)
291 struct netmsg_pru_detach msg
;
294 netmsg_init(&msg
.base
, so
, &curthread
->td_msgport
,
295 0, so
->so_proto
->pr_usrreqs
->pru_detach
);
296 error
= lwkt_domsg(so
->so_port
, &msg
.base
.lmsg
, 0);
301 so_pru_detach_direct(struct socket
*so
)
303 struct netmsg_pru_detach msg
;
304 netisr_fn_t func
= so
->so_proto
->pr_usrreqs
->pru_detach
;
306 netmsg_init(&msg
.base
, so
, &netisr_adone_rport
, 0, func
);
307 msg
.base
.lmsg
.ms_flags
&= ~(MSGF_REPLY
| MSGF_DONE
);
308 msg
.base
.lmsg
.ms_flags
|= MSGF_SYNC
;
309 func((netmsg_t
)&msg
);
310 KKASSERT(msg
.base
.lmsg
.ms_flags
& MSGF_DONE
);
311 return(msg
.base
.lmsg
.ms_error
);
315 so_pru_disconnect(struct socket
*so
)
317 struct netmsg_pru_disconnect msg
;
320 netmsg_init(&msg
.base
, so
, &curthread
->td_msgport
,
321 0, so
->so_proto
->pr_usrreqs
->pru_disconnect
);
322 error
= lwkt_domsg(so
->so_port
, &msg
.base
.lmsg
, 0);
327 so_pru_disconnect_direct(struct socket
*so
)
329 struct netmsg_pru_disconnect msg
;
330 netisr_fn_t func
= so
->so_proto
->pr_usrreqs
->pru_disconnect
;
332 netmsg_init(&msg
.base
, so
, &netisr_adone_rport
, 0, func
);
333 msg
.base
.lmsg
.ms_flags
&= ~(MSGF_REPLY
| MSGF_DONE
);
334 msg
.base
.lmsg
.ms_flags
|= MSGF_SYNC
;
335 func((netmsg_t
)&msg
);
336 KKASSERT(msg
.base
.lmsg
.ms_flags
& MSGF_DONE
);
340 so_pru_listen(struct socket
*so
, struct thread
*td
)
342 struct netmsg_pru_listen msg
;
345 netmsg_init(&msg
.base
, so
, &curthread
->td_msgport
,
346 0, so
->so_proto
->pr_usrreqs
->pru_listen
);
347 msg
.nm_td
= td
; /* used only for prison_ip() XXX JH */
349 error
= lwkt_domsg(so
->so_port
, &msg
.base
.lmsg
, 0);
354 so_pru_peeraddr(struct socket
*so
, struct sockaddr
**nam
)
356 struct netmsg_pru_peeraddr msg
;
359 netmsg_init(&msg
.base
, so
, &curthread
->td_msgport
,
360 0, so
->so_proto
->pr_usrreqs
->pru_peeraddr
);
362 error
= lwkt_domsg(so
->so_port
, &msg
.base
.lmsg
, 0);
367 so_pru_rcvd(struct socket
*so
, int flags
)
369 struct netmsg_pru_rcvd msg
;
372 netmsg_init(&msg
.base
, so
, &curthread
->td_msgport
,
373 0, so
->so_proto
->pr_usrreqs
->pru_rcvd
);
374 msg
.nm_flags
= flags
;
375 msg
.nm_pru_flags
= 0;
376 error
= lwkt_domsg(so
->so_port
, &msg
.base
.lmsg
, 0);
381 so_pru_rcvd_async(struct socket
*so
)
383 lwkt_msg_t lmsg
= &so
->so_rcvd_msg
.base
.lmsg
;
385 KASSERT(so
->so_proto
->pr_flags
& PR_ASYNC_RCVD
,
386 ("async pru_rcvd is not supported"));
389 * WARNING! Spinlock is a bit dodgy, use hacked up sendmsg
390 * to avoid deadlocking.
392 spin_lock(&so
->so_rcvd_spin
);
393 if ((so
->so_rcvd_msg
.nm_pru_flags
& PRUR_DEAD
) == 0) {
394 if (lmsg
->ms_flags
& MSGF_DONE
) {
395 lwkt_sendmsg_prepare(so
->so_port
, lmsg
);
396 spin_unlock(&so
->so_rcvd_spin
);
397 if (so
->so_port
== netisr_curport())
398 lwkt_sendmsg_start_oncpu(so
->so_port
, lmsg
);
400 lwkt_sendmsg_start(so
->so_port
, lmsg
);
402 spin_unlock(&so
->so_rcvd_spin
);
405 spin_unlock(&so
->so_rcvd_spin
);
410 so_pru_rcvoob(struct socket
*so
, struct mbuf
*m
, int flags
)
412 struct netmsg_pru_rcvoob msg
;
415 netmsg_init(&msg
.base
, so
, &curthread
->td_msgport
,
416 0, so
->so_proto
->pr_usrreqs
->pru_rcvoob
);
418 msg
.nm_flags
= flags
;
419 error
= lwkt_domsg(so
->so_port
, &msg
.base
.lmsg
, 0);
424 * NOTE: If the target port changes the implied connect will deal with it.
427 so_pru_send(struct socket
*so
, int flags
, struct mbuf
*m
,
428 struct sockaddr
*addr
, struct mbuf
*control
, struct thread
*td
)
430 struct netmsg_pru_send msg
;
433 netmsg_init(&msg
.base
, so
, &curthread
->td_msgport
,
434 0, so
->so_proto
->pr_usrreqs
->pru_send
);
435 msg
.nm_flags
= flags
;
438 msg
.nm_control
= control
;
440 error
= lwkt_domsg(so
->so_port
, &msg
.base
.lmsg
, 0);
445 so_pru_sync(struct socket
*so
)
447 struct netmsg_base msg
;
449 netmsg_init(&msg
, so
, &curthread
->td_msgport
, 0,
450 netmsg_sync_handler
);
451 lwkt_domsg(so
->so_port
, &msg
.lmsg
, 0);
455 so_pru_send_async(struct socket
*so
, int flags
, struct mbuf
*m
,
456 struct sockaddr
*addr0
, struct mbuf
*control
, struct thread
*td
)
458 struct netmsg_pru_send
*msg
;
459 struct sockaddr
*addr
= NULL
;
461 KASSERT(so
->so_proto
->pr_flags
& PR_ASYNC_SEND
,
462 ("async pru_send is not supported"));
465 addr
= kmalloc(addr0
->sa_len
, M_SONAME
, M_WAITOK
| M_NULLOK
);
468 * Fail to allocate address; fallback to
469 * synchronized pru_send.
471 so_pru_send(so
, flags
, m
, addr0
, control
, td
);
474 memcpy(addr
, addr0
, addr0
->sa_len
);
475 flags
|= PRUS_FREEADDR
;
477 flags
|= PRUS_NOREPLY
;
479 if (td
!= NULL
&& (so
->so_proto
->pr_flags
& PR_ASEND_HOLDTD
)) {
481 flags
|= PRUS_HELDTD
;
484 msg
= &m
->m_hdr
.mh_sndmsg
;
485 netmsg_init(&msg
->base
, so
, &netisr_apanic_rport
,
486 0, so
->so_proto
->pr_usrreqs
->pru_send
);
487 msg
->nm_flags
= flags
;
490 msg
->nm_control
= control
;
492 if (so
->so_port
== netisr_curport())
493 lwkt_sendmsg_oncpu(so
->so_port
, &msg
->base
.lmsg
);
495 lwkt_sendmsg(so
->so_port
, &msg
->base
.lmsg
);
499 so_pru_sense(struct socket
*so
, struct stat
*sb
)
501 struct netmsg_pru_sense msg
;
504 netmsg_init(&msg
.base
, so
, &curthread
->td_msgport
,
505 0, so
->so_proto
->pr_usrreqs
->pru_sense
);
507 error
= lwkt_domsg(so
->so_port
, &msg
.base
.lmsg
, 0);
512 so_pru_shutdown(struct socket
*so
)
514 struct netmsg_pru_shutdown msg
;
517 netmsg_init(&msg
.base
, so
, &curthread
->td_msgport
,
518 0, so
->so_proto
->pr_usrreqs
->pru_shutdown
);
519 error
= lwkt_domsg(so
->so_port
, &msg
.base
.lmsg
, 0);
524 so_pru_sockaddr(struct socket
*so
, struct sockaddr
**nam
)
526 struct netmsg_pru_sockaddr msg
;
529 netmsg_init(&msg
.base
, so
, &curthread
->td_msgport
,
530 0, so
->so_proto
->pr_usrreqs
->pru_sockaddr
);
532 error
= lwkt_domsg(so
->so_port
, &msg
.base
.lmsg
, 0);
537 so_pr_ctloutput(struct socket
*so
, struct sockopt
*sopt
)
539 struct netmsg_pr_ctloutput msg
;
542 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
544 if (sopt
->sopt_dir
== SOPT_SET
&& so
->so_proto
->pr_ctloutmsg
!= NULL
) {
545 struct netmsg_pr_ctloutput
*amsg
;
547 /* Fast path: asynchronous pr_ctloutput */
548 amsg
= so
->so_proto
->pr_ctloutmsg(sopt
);
550 netmsg_init(&amsg
->base
, so
, &netisr_afree_rport
, 0,
551 so
->so_proto
->pr_ctloutput
);
552 /* nm_flags and nm_sopt are setup by pr_ctloutmsg */
553 if (so
->so_port
== netisr_curport()) {
554 lwkt_sendmsg_oncpu(so
->so_port
,
557 lwkt_sendmsg(so
->so_port
, &amsg
->base
.lmsg
);
564 netmsg_init(&msg
.base
, so
, &curthread
->td_msgport
,
565 0, so
->so_proto
->pr_ctloutput
);
568 error
= lwkt_domsg(so
->so_port
, &msg
.base
.lmsg
, 0);
573 so_pr_ctlport(struct protosw
*pr
, int cmd
, struct sockaddr
*arg
,
574 void *extra
, int *cpuid
)
576 if (pr
->pr_ctlport
== NULL
)
578 KKASSERT(pr
->pr_ctlinput
!= NULL
);
580 return pr
->pr_ctlport(cmd
, arg
, extra
, cpuid
);
584 * Protocol control input, typically via icmp.
586 * If the protocol pr_ctlport is not NULL we call it to figure out the
587 * protocol port. If NULL is returned we can just return, otherwise
588 * we issue a netmsg to call pr_ctlinput in the proper thread.
590 * This must be done synchronously as arg and/or extra may point to
594 so_pr_ctlinput(struct protosw
*pr
, int cmd
, struct sockaddr
*arg
, void *extra
)
596 struct netmsg_pr_ctlinput msg
;
600 port
= so_pr_ctlport(pr
, cmd
, arg
, extra
, &cpuid
);
603 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
,
608 msg
.nm_extra
= extra
;
609 lwkt_domsg(port
, &msg
.base
.lmsg
, 0);
613 so_pr_ctlinput_direct(struct protosw
*pr
, int cmd
, struct sockaddr
*arg
,
616 struct netmsg_pr_ctlinput msg
;
621 port
= so_pr_ctlport(pr
, cmd
, arg
, extra
, &cpuid
);
624 if (cpuid
!= netisr_ncpus
&& cpuid
!= mycpuid
)
627 func
= pr
->pr_ctlinput
;
628 netmsg_init(&msg
.base
, NULL
, &netisr_adone_rport
, 0, func
);
629 msg
.base
.lmsg
.ms_flags
&= ~(MSGF_REPLY
| MSGF_DONE
);
630 msg
.base
.lmsg
.ms_flags
|= MSGF_SYNC
;
634 msg
.nm_extra
= extra
;
635 func((netmsg_t
)&msg
);
636 KKASSERT(msg
.base
.lmsg
.ms_flags
& MSGF_DONE
);
640 * If we convert all the protosw pr_ functions for all the protocols
641 * to take a message directly, this layer can go away. For the moment
642 * our dispatcher ignores the return value, but since we are handling
643 * the replymsg ourselves we return EASYNC by convention.
647 * Handle a predicate event request. This function is only called once
648 * when the predicate message queueing request is received.
651 netmsg_so_notify(netmsg_t msg
)
653 struct socket
*so
= msg
->base
.nm_so
;
654 struct signalsockbuf
*ssb
;
656 ssb
= (msg
->notify
.nm_etype
& NM_REVENT
) ? &so
->so_rcv
: &so
->so_snd
;
659 * Reply immediately if the event has occured, otherwise queue the
662 * NOTE: Socket can change if this is an accept predicate so cache
665 lwkt_getpooltoken(so
);
666 atomic_set_int(&ssb
->ssb_flags
, SSB_MEVENT
);
667 if (msg
->notify
.nm_predicate(&msg
->notify
)) {
668 if (TAILQ_EMPTY(&ssb
->ssb_mlist
))
669 atomic_clear_int(&ssb
->ssb_flags
, SSB_MEVENT
);
670 lwkt_relpooltoken(so
);
671 lwkt_replymsg(&msg
->base
.lmsg
,
672 msg
->base
.lmsg
.ms_error
);
674 TAILQ_INSERT_TAIL(&ssb
->ssb_mlist
, &msg
->notify
, nm_list
);
677 * If predict ever blocks, 'tok' will be released, so
678 * SSB_MEVENT set beforehand could have been cleared
679 * when we reach here. In case that happens, we set
680 * SSB_MEVENT again, after the notify has been queued.
682 atomic_set_int(&ssb
->ssb_flags
, SSB_MEVENT
);
683 lwkt_relpooltoken(so
);
688 * Called by doio when trying to abort a netmsg_so_notify message.
689 * Unlike the other functions this one is dispatched directly by
690 * the LWKT subsystem, so it takes a lwkt_msg_t as an argument.
692 * The original message, lmsg, is under the control of the caller and
693 * will not be destroyed until we return so we can safely reference it
694 * in our synchronous abort request.
696 * This part of the abort request occurs on the originating cpu which
697 * means we may race the message flags and the original message may
698 * not even have been processed by the target cpu yet.
701 netmsg_so_notify_doabort(lwkt_msg_t lmsg
)
703 struct netmsg_so_notify_abort msg
;
705 if ((lmsg
->ms_flags
& (MSGF_DONE
| MSGF_REPLY
)) == 0) {
706 const struct netmsg_base
*nmsg
=
707 (const struct netmsg_base
*)lmsg
;
709 netmsg_init(&msg
.base
, nmsg
->nm_so
, &curthread
->td_msgport
,
710 0, netmsg_so_notify_abort
);
711 msg
.nm_notifymsg
= (void *)lmsg
;
712 lwkt_domsg(lmsg
->ms_target_port
, &msg
.base
.lmsg
, 0);
717 * Predicate requests can be aborted. This function is only called once
718 * and will interlock against processing/reply races (since such races
719 * occur on the same thread that controls the port where the abort is
722 * This part of the abort request occurs on the target cpu. The message
723 * flags must be tested again in case the test that we did on the
724 * originating cpu raced. Since messages are handled in sequence, the
725 * original message will have already been handled by the loop and either
726 * replied to or queued.
728 * We really only need to interlock with MSGF_REPLY (a bit that is set on
729 * our cpu when we reply). Note that MSGF_DONE is not set until the
730 * reply reaches the originating cpu. Test both bits anyway.
733 netmsg_so_notify_abort(netmsg_t msg
)
735 struct netmsg_so_notify_abort
*abrtmsg
= &msg
->notify_abort
;
736 struct netmsg_so_notify
*nmsg
= abrtmsg
->nm_notifymsg
;
737 struct signalsockbuf
*ssb
;
740 * The original notify message is not destroyed until after the
741 * abort request is returned, so we can check its state.
743 lwkt_getpooltoken(nmsg
->base
.nm_so
);
744 if ((nmsg
->base
.lmsg
.ms_flags
& (MSGF_DONE
| MSGF_REPLY
)) == 0) {
745 ssb
= (nmsg
->nm_etype
& NM_REVENT
) ?
746 &nmsg
->base
.nm_so
->so_rcv
:
747 &nmsg
->base
.nm_so
->so_snd
;
748 TAILQ_REMOVE(&ssb
->ssb_mlist
, nmsg
, nm_list
);
749 lwkt_relpooltoken(nmsg
->base
.nm_so
);
750 lwkt_replymsg(&nmsg
->base
.lmsg
, EINTR
);
752 lwkt_relpooltoken(nmsg
->base
.nm_so
);
756 * Reply to the abort message
758 lwkt_replymsg(&abrtmsg
->base
.lmsg
, 0);
762 so_async_rcvd_reply(struct socket
*so
)
765 * Spinlock safe, reply runs to degenerate lwkt_null_replyport()
767 spin_lock(&so
->so_rcvd_spin
);
768 lwkt_replymsg(&so
->so_rcvd_msg
.base
.lmsg
, 0);
769 spin_unlock(&so
->so_rcvd_spin
);
773 so_async_rcvd_drop(struct socket
*so
)
775 lwkt_msg_t lmsg
= &so
->so_rcvd_msg
.base
.lmsg
;
778 * Spinlock safe, drop runs to degenerate lwkt_spin_dropmsg()
780 spin_lock(&so
->so_rcvd_spin
);
781 so
->so_rcvd_msg
.nm_pru_flags
|= PRUR_DEAD
;
784 if ((lmsg
->ms_flags
& MSGF_DONE
) == 0) {
785 ++async_rcvd_drop_race
;
786 ssleep(so
, &so
->so_rcvd_spin
, 0, "soadrop", 1);
789 spin_unlock(&so
->so_rcvd_spin
);