tools - Fix backup file permissions for hammer-backup.sh
[dragonfly.git] / sys / kern / uipc_msg.c
blob55480c4e569d9d6708588de344db51db7986144c
1 /*
2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
4 *
5 * This code is derived from software contributed to The DragonFly Project
6 * by Jeffrey M. Hsu.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/msgport.h>
38 #include <sys/protosw.h>
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <sys/socketops.h>
42 #include <sys/thread.h>
43 #include <sys/thread2.h>
44 #include <sys/msgport2.h>
45 #include <sys/spinlock2.h>
46 #include <sys/sysctl.h>
47 #include <sys/mbuf.h>
48 #include <vm/pmap.h>
50 #include <net/netmsg2.h>
51 #include <sys/socketvar2.h>
53 #include <net/netisr.h>
54 #include <net/netmsg.h>
56 static int async_rcvd_drop_race = 0;
57 SYSCTL_INT(_kern_ipc, OID_AUTO, async_rcvd_drop_race, CTLFLAG_RW,
58 &async_rcvd_drop_race, 0, "# of asynchronized pru_rcvd msg drop races");
61 * Abort a socket and free it. Called from soabort() only. soabort()
62 * got a ref on the socket which we must free on reply.
64 void
65 so_pru_abort(struct socket *so)
67 struct netmsg_pru_abort msg;
69 netmsg_init(&msg.base, so, &curthread->td_msgport,
70 0, so->so_proto->pr_usrreqs->pru_abort);
71 lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
72 sofree(msg.base.nm_so);
76 * Abort a socket and free it, asynchronously. Called from
77 * soabort_async() only. soabort_async() got a ref on the
78 * socket which we must free on reply.
80 void
81 so_pru_abort_async(struct socket *so)
83 struct netmsg_pru_abort *msg;
85 msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK | M_ZERO);
86 netmsg_init(&msg->base, so, &netisr_afree_free_so_rport,
87 0, so->so_proto->pr_usrreqs->pru_abort);
88 lwkt_sendmsg(so->so_port, &msg->base.lmsg);
92 * Abort a socket and free it. Called from soabort_oncpu() only.
93 * Caller must make sure that the current CPU is inpcb's owner CPU.
95 void
96 so_pru_abort_direct(struct socket *so)
98 struct netmsg_pru_abort msg;
99 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_abort;
101 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
102 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
103 msg.base.lmsg.ms_flags |= MSGF_SYNC;
104 func((netmsg_t)&msg);
105 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
106 sofree(msg.base.nm_so);
110 so_pru_accept(struct socket *so, struct sockaddr **nam)
112 struct netmsg_pru_accept msg;
114 netmsg_init(&msg.base, so, &curthread->td_msgport,
115 0, so->so_proto->pr_usrreqs->pru_accept);
116 msg.nm_nam = nam;
118 return lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
122 so_pru_attach(struct socket *so, int proto, struct pru_attach_info *ai)
124 struct netmsg_pru_attach msg;
125 int error;
127 netmsg_init(&msg.base, so, &curthread->td_msgport,
128 0, so->so_proto->pr_usrreqs->pru_attach);
129 msg.nm_proto = proto;
130 msg.nm_ai = ai;
131 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
132 return (error);
136 so_pru_attach_direct(struct socket *so, int proto, struct pru_attach_info *ai)
138 struct netmsg_pru_attach msg;
139 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_attach;
141 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
142 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
143 msg.base.lmsg.ms_flags |= MSGF_SYNC;
144 msg.nm_proto = proto;
145 msg.nm_ai = ai;
146 func((netmsg_t)&msg);
147 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
148 return(msg.base.lmsg.ms_error);
152 * NOTE: If the target port changes the bind operation will deal with it.
155 so_pru_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
157 struct netmsg_pru_bind msg;
158 int error;
160 netmsg_init(&msg.base, so, &curthread->td_msgport,
161 0, so->so_proto->pr_usrreqs->pru_bind);
162 msg.nm_nam = nam;
163 msg.nm_td = td; /* used only for prison_ip() */
164 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
165 return (error);
169 so_pru_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
171 struct netmsg_pru_connect msg;
172 int error;
174 netmsg_init(&msg.base, so, &curthread->td_msgport,
175 0, so->so_proto->pr_usrreqs->pru_connect);
176 msg.nm_nam = nam;
177 msg.nm_td = td;
178 msg.nm_m = NULL;
179 msg.nm_sndflags = 0;
180 msg.nm_flags = 0;
181 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
182 return (error);
186 so_pru_connect_async(struct socket *so, struct sockaddr *nam, struct thread *td)
188 struct netmsg_pru_connect *msg;
189 int error, flags;
191 KASSERT(so->so_proto->pr_usrreqs->pru_preconnect != NULL,
192 ("async pru_connect is not supported"));
194 /* NOTE: sockaddr immediately follows netmsg */
195 msg = kmalloc(sizeof(*msg) + nam->sa_len, M_LWKTMSG, M_NOWAIT);
196 if (msg == NULL) {
198 * Fail to allocate message w/o waiting;
199 * fallback to synchronized pru_connect.
201 return so_pru_connect(so, nam, td);
204 error = so->so_proto->pr_usrreqs->pru_preconnect(so, nam, td);
205 if (error) {
206 kfree(msg, M_LWKTMSG);
207 return error;
210 flags = PRUC_ASYNC;
211 if (td != NULL && (so->so_proto->pr_flags & PR_ACONN_HOLDTD)) {
212 lwkt_hold(td);
213 flags |= PRUC_HELDTD;
216 netmsg_init(&msg->base, so, &netisr_afree_rport, 0,
217 so->so_proto->pr_usrreqs->pru_connect);
218 msg->nm_nam = (struct sockaddr *)(msg + 1);
219 memcpy(msg->nm_nam, nam, nam->sa_len);
220 msg->nm_td = td;
221 msg->nm_m = NULL;
222 msg->nm_sndflags = 0;
223 msg->nm_flags = flags;
224 lwkt_sendmsg(so->so_port, &msg->base.lmsg);
225 return 0;
229 so_pru_connect2(struct socket *so1, struct socket *so2)
231 struct netmsg_pru_connect2 msg;
232 int error;
234 netmsg_init(&msg.base, so1, &curthread->td_msgport,
235 0, so1->so_proto->pr_usrreqs->pru_connect2);
236 msg.nm_so1 = so1;
237 msg.nm_so2 = so2;
238 error = lwkt_domsg(so1->so_port, &msg.base.lmsg, 0);
239 return (error);
243 * WARNING! Synchronous call from user context. Control function may do
244 * copyin/copyout.
247 so_pru_control_direct(struct socket *so, u_long cmd, caddr_t data,
248 struct ifnet *ifp)
250 struct netmsg_pru_control msg;
251 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_control;
253 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
254 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
255 msg.base.lmsg.ms_flags |= MSGF_SYNC;
256 msg.nm_cmd = cmd;
257 msg.nm_data = data;
258 msg.nm_ifp = ifp;
259 msg.nm_td = curthread;
260 func((netmsg_t)&msg);
261 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
262 return(msg.base.lmsg.ms_error);
266 so_pru_detach(struct socket *so)
268 struct netmsg_pru_detach msg;
269 int error;
271 netmsg_init(&msg.base, so, &curthread->td_msgport,
272 0, so->so_proto->pr_usrreqs->pru_detach);
273 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
274 return (error);
278 so_pru_detach_direct(struct socket *so)
280 struct netmsg_pru_detach msg;
281 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_detach;
283 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
284 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
285 msg.base.lmsg.ms_flags |= MSGF_SYNC;
286 func((netmsg_t)&msg);
287 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
288 return(msg.base.lmsg.ms_error);
292 so_pru_disconnect(struct socket *so)
294 struct netmsg_pru_disconnect msg;
295 int error;
297 netmsg_init(&msg.base, so, &curthread->td_msgport,
298 0, so->so_proto->pr_usrreqs->pru_disconnect);
299 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
300 return (error);
303 void
304 so_pru_disconnect_direct(struct socket *so)
306 struct netmsg_pru_disconnect msg;
307 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_disconnect;
309 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
310 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
311 msg.base.lmsg.ms_flags |= MSGF_SYNC;
312 func((netmsg_t)&msg);
313 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
317 so_pru_listen(struct socket *so, struct thread *td)
319 struct netmsg_pru_listen msg;
320 int error;
322 netmsg_init(&msg.base, so, &curthread->td_msgport,
323 0, so->so_proto->pr_usrreqs->pru_listen);
324 msg.nm_td = td; /* used only for prison_ip() XXX JH */
325 msg.nm_flags = 0;
326 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
327 return (error);
331 so_pru_peeraddr(struct socket *so, struct sockaddr **nam)
333 struct netmsg_pru_peeraddr msg;
334 int error;
336 netmsg_init(&msg.base, so, &curthread->td_msgport,
337 0, so->so_proto->pr_usrreqs->pru_peeraddr);
338 msg.nm_nam = nam;
339 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
340 return (error);
344 so_pru_rcvd(struct socket *so, int flags)
346 struct netmsg_pru_rcvd msg;
347 int error;
349 netmsg_init(&msg.base, so, &curthread->td_msgport,
350 0, so->so_proto->pr_usrreqs->pru_rcvd);
351 msg.nm_flags = flags;
352 msg.nm_pru_flags = 0;
353 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
354 return (error);
357 void
358 so_pru_rcvd_async(struct socket *so)
360 lwkt_msg_t lmsg = &so->so_rcvd_msg.base.lmsg;
362 KASSERT(so->so_proto->pr_flags & PR_ASYNC_RCVD,
363 ("async pru_rcvd is not supported"));
366 * WARNING! Spinlock is a bit dodgy, use hacked up sendmsg
367 * to avoid deadlocking.
369 spin_lock(&so->so_rcvd_spin);
370 if ((so->so_rcvd_msg.nm_pru_flags & PRUR_DEAD) == 0) {
371 if (lmsg->ms_flags & MSGF_DONE) {
372 lwkt_sendmsg_prepare(so->so_port, lmsg);
373 spin_unlock(&so->so_rcvd_spin);
374 lwkt_sendmsg_start(so->so_port, lmsg);
375 } else {
376 spin_unlock(&so->so_rcvd_spin);
378 } else {
379 spin_unlock(&so->so_rcvd_spin);
384 so_pru_rcvoob(struct socket *so, struct mbuf *m, int flags)
386 struct netmsg_pru_rcvoob msg;
387 int error;
389 netmsg_init(&msg.base, so, &curthread->td_msgport,
390 0, so->so_proto->pr_usrreqs->pru_rcvoob);
391 msg.nm_m = m;
392 msg.nm_flags = flags;
393 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
394 return (error);
398 * NOTE: If the target port changes the implied connect will deal with it.
401 so_pru_send(struct socket *so, int flags, struct mbuf *m,
402 struct sockaddr *addr, struct mbuf *control, struct thread *td)
404 struct netmsg_pru_send msg;
405 int error;
407 netmsg_init(&msg.base, so, &curthread->td_msgport,
408 0, so->so_proto->pr_usrreqs->pru_send);
409 msg.nm_flags = flags;
410 msg.nm_m = m;
411 msg.nm_addr = addr;
412 msg.nm_control = control;
413 msg.nm_td = td;
414 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
415 return (error);
418 void
419 so_pru_sync(struct socket *so)
421 struct netmsg_base msg;
423 netmsg_init(&msg, so, &curthread->td_msgport, 0,
424 netmsg_sync_handler);
425 lwkt_domsg(so->so_port, &msg.lmsg, 0);
428 void
429 so_pru_send_async(struct socket *so, int flags, struct mbuf *m,
430 struct sockaddr *addr0, struct mbuf *control, struct thread *td)
432 struct netmsg_pru_send *msg;
433 struct sockaddr *addr = NULL;
435 KASSERT(so->so_proto->pr_flags & PR_ASYNC_SEND,
436 ("async pru_send is not supported"));
438 if (addr0 != NULL) {
439 addr = kmalloc(addr0->sa_len, M_SONAME, M_WAITOK | M_NULLOK);
440 if (addr == NULL) {
442 * Fail to allocate address; fallback to
443 * synchronized pru_send.
445 so_pru_send(so, flags, m, addr0, control, td);
446 return;
448 memcpy(addr, addr0, addr0->sa_len);
449 flags |= PRUS_FREEADDR;
451 flags |= PRUS_NOREPLY;
453 if (td != NULL && (so->so_proto->pr_flags & PR_ASEND_HOLDTD)) {
454 lwkt_hold(td);
455 flags |= PRUS_HELDTD;
458 msg = &m->m_hdr.mh_sndmsg;
459 netmsg_init(&msg->base, so, &netisr_apanic_rport,
460 0, so->so_proto->pr_usrreqs->pru_send);
461 msg->nm_flags = flags;
462 msg->nm_m = m;
463 msg->nm_addr = addr;
464 msg->nm_control = control;
465 msg->nm_td = td;
466 lwkt_sendmsg(so->so_port, &msg->base.lmsg);
470 so_pru_sense(struct socket *so, struct stat *sb)
472 struct netmsg_pru_sense msg;
473 int error;
475 netmsg_init(&msg.base, so, &curthread->td_msgport,
476 0, so->so_proto->pr_usrreqs->pru_sense);
477 msg.nm_stat = sb;
478 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
479 return (error);
483 so_pru_shutdown(struct socket *so)
485 struct netmsg_pru_shutdown msg;
486 int error;
488 netmsg_init(&msg.base, so, &curthread->td_msgport,
489 0, so->so_proto->pr_usrreqs->pru_shutdown);
490 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
491 return (error);
495 so_pru_sockaddr(struct socket *so, struct sockaddr **nam)
497 struct netmsg_pru_sockaddr msg;
498 int error;
500 netmsg_init(&msg.base, so, &curthread->td_msgport,
501 0, so->so_proto->pr_usrreqs->pru_sockaddr);
502 msg.nm_nam = nam;
503 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
504 return (error);
508 so_pr_ctloutput(struct socket *so, struct sockopt *sopt)
510 struct netmsg_pr_ctloutput msg;
511 int error;
513 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
514 netmsg_init(&msg.base, so, &curthread->td_msgport,
515 0, so->so_proto->pr_ctloutput);
516 msg.nm_sopt = sopt;
517 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
518 return (error);
522 * Protocol control input, typically via icmp.
524 * If the protocol pr_ctlport is not NULL we call it to figure out the
525 * protocol port. If NULL is returned we can just return, otherwise
526 * we issue a netmsg to call pr_ctlinput in the proper thread.
528 * This must be done synchronously as arg and/or extra may point to
529 * temporary data.
531 void
532 so_pru_ctlinput(struct protosw *pr, int cmd, struct sockaddr *arg, void *extra)
534 struct netmsg_pru_ctlinput msg;
535 lwkt_port_t port;
537 if (pr->pr_ctlport == NULL)
538 return;
539 KKASSERT(pr->pr_ctlinput != NULL);
540 port = pr->pr_ctlport(cmd, arg, extra);
541 if (port == NULL)
542 return;
543 netmsg_init(&msg.base, NULL, &curthread->td_msgport,
544 0, pr->pr_ctlinput);
545 msg.nm_cmd = cmd;
546 msg.nm_arg = arg;
547 msg.nm_extra = extra;
548 lwkt_domsg(port, &msg.base.lmsg, 0);
552 * If we convert all the protosw pr_ functions for all the protocols
553 * to take a message directly, this layer can go away. For the moment
554 * our dispatcher ignores the return value, but since we are handling
555 * the replymsg ourselves we return EASYNC by convention.
559 * Handle a predicate event request. This function is only called once
560 * when the predicate message queueing request is received.
562 void
563 netmsg_so_notify(netmsg_t msg)
565 struct lwkt_token *tok;
566 struct signalsockbuf *ssb;
568 ssb = (msg->notify.nm_etype & NM_REVENT) ?
569 &msg->base.nm_so->so_rcv :
570 &msg->base.nm_so->so_snd;
573 * Reply immediately if the event has occured, otherwise queue the
574 * request.
576 * NOTE: Socket can change if this is an accept predicate so cache
577 * the token.
579 tok = lwkt_token_pool_lookup(msg->base.nm_so);
580 lwkt_gettoken(tok);
581 atomic_set_int(&ssb->ssb_flags, SSB_MEVENT);
582 if (msg->notify.nm_predicate(&msg->notify)) {
583 if (TAILQ_EMPTY(&ssb->ssb_kq.ki_mlist))
584 atomic_clear_int(&ssb->ssb_flags, SSB_MEVENT);
585 lwkt_reltoken(tok);
586 lwkt_replymsg(&msg->base.lmsg,
587 msg->base.lmsg.ms_error);
588 } else {
589 TAILQ_INSERT_TAIL(&ssb->ssb_kq.ki_mlist, &msg->notify, nm_list);
591 * NOTE:
592 * If predict ever blocks, 'tok' will be released, so
593 * SSB_MEVENT set beforehand could have been cleared
594 * when we reach here. In case that happens, we set
595 * SSB_MEVENT again, after the notify has been queued.
597 atomic_set_int(&ssb->ssb_flags, SSB_MEVENT);
598 lwkt_reltoken(tok);
603 * Called by doio when trying to abort a netmsg_so_notify message.
604 * Unlike the other functions this one is dispatched directly by
605 * the LWKT subsystem, so it takes a lwkt_msg_t as an argument.
607 * The original message, lmsg, is under the control of the caller and
608 * will not be destroyed until we return so we can safely reference it
609 * in our synchronous abort request.
611 * This part of the abort request occurs on the originating cpu which
612 * means we may race the message flags and the original message may
613 * not even have been processed by the target cpu yet.
615 void
616 netmsg_so_notify_doabort(lwkt_msg_t lmsg)
618 struct netmsg_so_notify_abort msg;
620 if ((lmsg->ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) {
621 const struct netmsg_base *nmsg =
622 (const struct netmsg_base *)lmsg;
624 netmsg_init(&msg.base, nmsg->nm_so, &curthread->td_msgport,
625 0, netmsg_so_notify_abort);
626 msg.nm_notifymsg = (void *)lmsg;
627 lwkt_domsg(lmsg->ms_target_port, &msg.base.lmsg, 0);
632 * Predicate requests can be aborted. This function is only called once
633 * and will interlock against processing/reply races (since such races
634 * occur on the same thread that controls the port where the abort is
635 * requeued).
637 * This part of the abort request occurs on the target cpu. The message
638 * flags must be tested again in case the test that we did on the
639 * originating cpu raced. Since messages are handled in sequence, the
640 * original message will have already been handled by the loop and either
641 * replied to or queued.
643 * We really only need to interlock with MSGF_REPLY (a bit that is set on
644 * our cpu when we reply). Note that MSGF_DONE is not set until the
645 * reply reaches the originating cpu. Test both bits anyway.
647 void
648 netmsg_so_notify_abort(netmsg_t msg)
650 struct netmsg_so_notify_abort *abrtmsg = &msg->notify_abort;
651 struct netmsg_so_notify *nmsg = abrtmsg->nm_notifymsg;
652 struct signalsockbuf *ssb;
655 * The original notify message is not destroyed until after the
656 * abort request is returned, so we can check its state.
658 lwkt_getpooltoken(nmsg->base.nm_so);
659 if ((nmsg->base.lmsg.ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) {
660 ssb = (nmsg->nm_etype & NM_REVENT) ?
661 &nmsg->base.nm_so->so_rcv :
662 &nmsg->base.nm_so->so_snd;
663 TAILQ_REMOVE(&ssb->ssb_kq.ki_mlist, nmsg, nm_list);
664 lwkt_relpooltoken(nmsg->base.nm_so);
665 lwkt_replymsg(&nmsg->base.lmsg, EINTR);
666 } else {
667 lwkt_relpooltoken(nmsg->base.nm_so);
671 * Reply to the abort message
673 lwkt_replymsg(&abrtmsg->base.lmsg, 0);
676 void
677 so_async_rcvd_reply(struct socket *so)
680 * Spinlock safe, reply runs to degenerate lwkt_null_replyport()
682 spin_lock(&so->so_rcvd_spin);
683 lwkt_replymsg(&so->so_rcvd_msg.base.lmsg, 0);
684 spin_unlock(&so->so_rcvd_spin);
687 void
688 so_async_rcvd_drop(struct socket *so)
690 lwkt_msg_t lmsg = &so->so_rcvd_msg.base.lmsg;
693 * Spinlock safe, drop runs to degenerate lwkt_spin_dropmsg()
695 spin_lock(&so->so_rcvd_spin);
696 so->so_rcvd_msg.nm_pru_flags |= PRUR_DEAD;
697 again:
698 lwkt_dropmsg(lmsg);
699 if ((lmsg->ms_flags & MSGF_DONE) == 0) {
700 ++async_rcvd_drop_race;
701 ssleep(so, &so->so_rcvd_spin, 0, "soadrop", 1);
702 goto again;
704 spin_unlock(&so->so_rcvd_spin);