1 /* $FreeBSD: src/sys/netinet6/frag6.c,v 1.2.2.6 2002/04/28 05:40:26 suz Exp $ */
2 /* $KAME: frag6.c,v 1.33 2002/01/07 11:34:48 kjc Exp $ */
5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
37 #include <sys/domain.h>
38 #include <sys/protosw.h>
39 #include <sys/socket.h>
40 #include <sys/errno.h>
42 #include <sys/kernel.h>
43 #include <sys/syslog.h>
44 #include <sys/thread2.h>
47 #include <net/route.h>
48 #include <net/netisr2.h>
49 #include <net/netmsg2.h>
51 #include <netinet/in.h>
52 #include <netinet/in_var.h>
53 #include <netinet/ip6.h>
54 #include <netinet6/ip6_var.h>
55 #include <netinet/icmp6.h>
57 #include <net/net_osdep.h>
59 #define FRAG6_SLOWTIMO (hz / PR_SLOWHZ)
62 * Define it to get a correct behavior on per-interface statistics.
63 * You will need to perform an extra routing table lookup, per fragment,
64 * to do it. This may, or may not be, a performance hit.
66 #define IN6_IFSTAT_STRICT
68 static void frag6_enq (struct ip6asfrag
*, struct ip6asfrag
*);
69 static void frag6_deq (struct ip6asfrag
*);
70 static void frag6_insque (struct ip6q
*, struct ip6q
*);
71 static void frag6_remque (struct ip6q
*);
72 static void frag6_freef (struct ip6q
*);
73 static void frag6_slowtimo_dispatch (netmsg_t
);
74 static void frag6_slowtimo (void *);
75 static void frag6_drain_dispatch (netmsg_t
);
77 /* XXX we eventually need splreass6, or some real semaphore */
78 int frag6_doing_reass
;
79 u_int frag6_nfragpackets
;
81 struct ip6q ip6q
; /* ip6 reassemble queue */
84 MALLOC_DEFINE(M_FTABLE
, "fragment", "fragment reassembly header");
86 static struct callout frag6_slowtimo_ch
;
87 static struct netmsg_base frag6_slowtimo_nmsg
;
88 static struct netmsg_base frag6_drain_nmsg
;
89 static volatile int frag6_draining
;
92 * Initialise reassembly queue and fragment identifier.
99 ip6_maxfragpackets
= nmbclusters
/ 4;
100 ip6_maxfrags
= nmbclusters
/ 4;
103 * in many cases, random() here does NOT return random number
104 * as initialization during bootstrap time occur in fixed order.
107 ip6_id
= krandom() ^ tv
.tv_usec
;
108 ip6q
.ip6q_next
= ip6q
.ip6q_prev
= &ip6q
;
110 netmsg_init(&frag6_drain_nmsg
, NULL
, &netisr_adone_rport
,
111 MSGF_PRIORITY
, frag6_drain_dispatch
);
113 callout_init_mp(&frag6_slowtimo_ch
);
114 netmsg_init(&frag6_slowtimo_nmsg
, NULL
, &netisr_adone_rport
,
115 MSGF_PRIORITY
, frag6_slowtimo_dispatch
);
117 callout_reset_bycpu(&frag6_slowtimo_ch
, FRAG6_SLOWTIMO
,
118 frag6_slowtimo
, NULL
, 0);
122 * In RFC2460, fragment and reassembly rule do not agree with each other,
123 * in terms of next header field handling in fragment header.
124 * While the sender will use the same value for all of the fragmented packets,
125 * receiver is suggested not to check the consistency.
127 * fragment rule (p20):
128 * (2) A Fragment header containing:
129 * The Next Header value that identifies the first header of
130 * the Fragmentable Part of the original packet.
131 * -> next header field is same for all fragments
133 * reassembly rule (p21):
134 * The Next Header field of the last header of the Unfragmentable
135 * Part is obtained from the Next Header field of the first
136 * fragment's Fragment header.
137 * -> should grab it from the first fragment only
139 * The following note also contradicts with fragment rule - noone is going to
140 * send different fragment with different next header field.
142 * additional note (p22):
143 * The Next Header values in the Fragment headers of different
144 * fragments of the same original packet may differ. Only the value
145 * from the Offset zero fragment packet is used for reassembly.
146 * -> should grab it from the first fragment only
148 * There is no explicit reason given in the RFC. Historical reason maybe?
154 frag6_input(struct mbuf
**mp
, int *offp
, int proto
)
156 struct mbuf
*m
= *mp
, *t
;
158 struct ip6_frag
*ip6f
;
160 struct ip6asfrag
*af6
, *ip6af
, *af6dwn
;
161 int offset
= *offp
, nxt
, i
, next
;
163 int fragoff
, frgpartlen
; /* must be larger than u_int16_t */
164 struct ifnet
*dstifp
;
165 #ifdef IN6_IFSTAT_STRICT
166 static struct route_in6 ro
;
167 struct sockaddr_in6
*dst
;
170 ip6
= mtod(m
, struct ip6_hdr
*);
171 #ifndef PULLDOWN_TEST
172 IP6_EXTHDR_CHECK(m
, offset
, sizeof(struct ip6_frag
), IPPROTO_DONE
);
173 ip6f
= (struct ip6_frag
*)((caddr_t
)ip6
+ offset
);
175 IP6_EXTHDR_GET(ip6f
, struct ip6_frag
*, m
, offset
, sizeof(*ip6f
));
181 #ifdef IN6_IFSTAT_STRICT
182 /* find the destination interface of the packet. */
183 dst
= (struct sockaddr_in6
*)&ro
.ro_dst
;
185 (!(ro
.ro_rt
->rt_flags
& RTF_UP
) ||
186 !IN6_ARE_ADDR_EQUAL(&dst
->sin6_addr
, &ip6
->ip6_dst
))) {
190 if (ro
.ro_rt
== NULL
) {
191 bzero(dst
, sizeof(*dst
));
192 dst
->sin6_family
= AF_INET6
;
193 dst
->sin6_len
= sizeof(struct sockaddr_in6
);
194 dst
->sin6_addr
= ip6
->ip6_dst
;
196 rtalloc((struct route
*)&ro
);
197 if (ro
.ro_rt
!= NULL
&& ro
.ro_rt
->rt_ifa
!= NULL
)
198 dstifp
= ((struct in6_ifaddr
*)ro
.ro_rt
->rt_ifa
)->ia_ifp
;
200 /* we are violating the spec, this is not the destination interface */
201 if (m
->m_flags
& M_PKTHDR
)
202 dstifp
= m
->m_pkthdr
.rcvif
;
205 /* jumbo payload can't contain a fragment header */
206 if (ip6
->ip6_plen
== 0) {
207 icmp6_error(m
, ICMP6_PARAM_PROB
, ICMP6_PARAMPROB_HEADER
, offset
);
208 in6_ifstat_inc(dstifp
, ifs6_reass_fail
);
213 * check whether fragment packet's fragment length is
214 * multiple of 8 octets.
215 * sizeof(struct ip6_frag) == 8
216 * sizeof(struct ip6_hdr) = 40
218 if ((ip6f
->ip6f_offlg
& IP6F_MORE_FRAG
) &&
219 (((ntohs(ip6
->ip6_plen
) - offset
) & 0x7) != 0)) {
220 icmp6_error(m
, ICMP6_PARAM_PROB
,
221 ICMP6_PARAMPROB_HEADER
,
222 offsetof(struct ip6_hdr
, ip6_plen
));
223 in6_ifstat_inc(dstifp
, ifs6_reass_fail
);
227 ip6stat
.ip6s_fragments
++;
228 in6_ifstat_inc(dstifp
, ifs6_reass_reqd
);
230 /* offset now points to data portion */
231 offset
+= sizeof(struct ip6_frag
);
233 frag6_doing_reass
= 1;
236 * Enforce upper bound on number of fragments.
237 * If maxfrag is 0, never accept fragments.
238 * If maxfrag is -1, accept all fragments without limitation.
240 if (ip6_maxfrags
< 0)
242 else if (frag6_nfrags
>= (u_int
)ip6_maxfrags
)
245 for (q6
= ip6q
.ip6q_next
; q6
!= &ip6q
; q6
= q6
->ip6q_next
)
246 if (ip6f
->ip6f_ident
== q6
->ip6q_ident
&&
247 IN6_ARE_ADDR_EQUAL(&ip6
->ip6_src
, &q6
->ip6q_src
) &&
248 IN6_ARE_ADDR_EQUAL(&ip6
->ip6_dst
, &q6
->ip6q_dst
))
253 * the first fragment to arrive, create a reassembly queue.
258 * Enforce upper bound on number of fragmented packets
259 * for which we attempt reassembly;
260 * If maxfrag is 0, never accept fragments.
261 * If maxfrag is -1, accept all fragments without limitation.
263 if (ip6_maxfragpackets
< 0)
265 else if (frag6_nfragpackets
>= (u_int
)ip6_maxfragpackets
)
267 frag6_nfragpackets
++;
268 q6
= (struct ip6q
*)kmalloc(sizeof(struct ip6q
), M_FTABLE
,
273 frag6_insque(q6
, &ip6q
);
275 /* ip6q_nxt will be filled afterwards, from 1st fragment */
276 q6
->ip6q_down
= q6
->ip6q_up
= (struct ip6asfrag
*)q6
;
278 q6
->ip6q_nxtp
= (u_char
*)nxtp
;
280 q6
->ip6q_ident
= ip6f
->ip6f_ident
;
281 q6
->ip6q_arrive
= 0; /* Is it used anywhere? */
282 q6
->ip6q_ttl
= IPV6_FRAGTTL
;
283 q6
->ip6q_src
= ip6
->ip6_src
;
284 q6
->ip6q_dst
= ip6
->ip6_dst
;
285 q6
->ip6q_unfrglen
= -1; /* The 1st fragment has not arrived. */
290 * If it's the 1st fragment, record the length of the
291 * unfragmentable part and the next header of the fragment header.
293 fragoff
= ntohs(ip6f
->ip6f_offlg
& IP6F_OFF_MASK
);
295 q6
->ip6q_unfrglen
= offset
- sizeof(struct ip6_hdr
)
296 - sizeof(struct ip6_frag
);
297 q6
->ip6q_nxt
= ip6f
->ip6f_nxt
;
301 * Check that the reassembled packet would not exceed 65535 bytes
303 * If it would exceed, discard the fragment and return an ICMP error.
305 frgpartlen
= sizeof(struct ip6_hdr
) + ntohs(ip6
->ip6_plen
) - offset
;
306 if (q6
->ip6q_unfrglen
>= 0) {
307 /* The 1st fragment has already arrived. */
308 if (q6
->ip6q_unfrglen
+ fragoff
+ frgpartlen
> IPV6_MAXPACKET
) {
309 icmp6_error(m
, ICMP6_PARAM_PROB
, ICMP6_PARAMPROB_HEADER
,
310 offset
- sizeof(struct ip6_frag
) +
311 offsetof(struct ip6_frag
, ip6f_offlg
));
312 frag6_doing_reass
= 0;
313 return (IPPROTO_DONE
);
316 else if (fragoff
+ frgpartlen
> IPV6_MAXPACKET
) {
317 icmp6_error(m
, ICMP6_PARAM_PROB
, ICMP6_PARAMPROB_HEADER
,
318 offset
- sizeof(struct ip6_frag
) +
319 offsetof(struct ip6_frag
, ip6f_offlg
));
320 frag6_doing_reass
= 0;
321 return (IPPROTO_DONE
);
324 * If it's the first fragment, do the above check for each
325 * fragment already stored in the reassembly queue.
328 for (af6
= q6
->ip6q_down
; af6
!= (struct ip6asfrag
*)q6
;
330 af6dwn
= af6
->ip6af_down
;
332 if (q6
->ip6q_unfrglen
+ af6
->ip6af_off
+ af6
->ip6af_frglen
>
334 struct mbuf
*merr
= IP6_REASS_MBUF(af6
);
335 struct ip6_hdr
*ip6err
;
336 int erroff
= af6
->ip6af_offset
;
338 /* dequeue the fragment. */
340 kfree(af6
, M_FTABLE
);
342 /* adjust pointer. */
343 ip6err
= mtod(merr
, struct ip6_hdr
*);
346 * Restore source and destination addresses
347 * in the erroneous IPv6 header.
349 ip6err
->ip6_src
= q6
->ip6q_src
;
350 ip6err
->ip6_dst
= q6
->ip6q_dst
;
352 icmp6_error(merr
, ICMP6_PARAM_PROB
,
353 ICMP6_PARAMPROB_HEADER
,
354 erroff
- sizeof(struct ip6_frag
) +
355 offsetof(struct ip6_frag
, ip6f_offlg
));
360 ip6af
= (struct ip6asfrag
*)kmalloc(sizeof(struct ip6asfrag
), M_FTABLE
,
364 ip6af
->ip6af_head
= ip6
->ip6_flow
;
365 ip6af
->ip6af_len
= ip6
->ip6_plen
;
366 ip6af
->ip6af_nxt
= ip6
->ip6_nxt
;
367 ip6af
->ip6af_hlim
= ip6
->ip6_hlim
;
368 ip6af
->ip6af_mff
= ip6f
->ip6f_offlg
& IP6F_MORE_FRAG
;
369 ip6af
->ip6af_off
= fragoff
;
370 ip6af
->ip6af_frglen
= frgpartlen
;
371 ip6af
->ip6af_offset
= offset
;
372 IP6_REASS_MBUF(ip6af
) = m
;
375 af6
= (struct ip6asfrag
*)q6
;
380 * Find a segment which begins after this one does.
382 for (af6
= q6
->ip6q_down
; af6
!= (struct ip6asfrag
*)q6
;
383 af6
= af6
->ip6af_down
)
384 if (af6
->ip6af_off
> ip6af
->ip6af_off
)
388 * RFC 5722: Drop overlapping fragments
390 if (af6
->ip6af_up
!= (struct ip6asfrag
*)q6
) {
391 i
= af6
->ip6af_up
->ip6af_off
+ af6
->ip6af_up
->ip6af_frglen
394 kfree(ip6af
, M_FTABLE
);
398 if (af6
!= (struct ip6asfrag
*)q6
) {
399 i
= (ip6af
->ip6af_off
+ ip6af
->ip6af_frglen
) - af6
->ip6af_off
;
401 kfree(ip6af
, M_FTABLE
);
409 * Stick new segment in its place;
410 * check for complete reassembly.
411 * Move to front of packet queue, as we are
412 * the most recently active fragmented packet.
414 frag6_enq(ip6af
, af6
->ip6af_up
);
418 if (q6
!= ip6q
.ip6q_next
) {
420 frag6_insque(q6
, &ip6q
);
424 for (af6
= q6
->ip6q_down
; af6
!= (struct ip6asfrag
*)q6
;
425 af6
= af6
->ip6af_down
) {
426 if (af6
->ip6af_off
!= next
) {
427 frag6_doing_reass
= 0;
430 next
+= af6
->ip6af_frglen
;
432 if (af6
->ip6af_up
->ip6af_mff
) {
433 frag6_doing_reass
= 0;
438 * Reassembly is complete; concatenate fragments.
440 ip6af
= q6
->ip6q_down
;
441 t
= m
= IP6_REASS_MBUF(ip6af
);
442 af6
= ip6af
->ip6af_down
;
444 while (af6
!= (struct ip6asfrag
*)q6
) {
445 af6dwn
= af6
->ip6af_down
;
449 t
->m_next
= IP6_REASS_MBUF(af6
);
450 m_adj(t
->m_next
, af6
->ip6af_offset
);
451 kfree(af6
, M_FTABLE
);
455 /* adjust offset to point where the original next header starts */
456 offset
= ip6af
->ip6af_offset
- sizeof(struct ip6_frag
);
457 kfree(ip6af
, M_FTABLE
);
458 ip6
= mtod(m
, struct ip6_hdr
*);
459 ip6
->ip6_plen
= htons((u_short
)next
+ offset
- sizeof(struct ip6_hdr
));
460 ip6
->ip6_src
= q6
->ip6q_src
;
461 ip6
->ip6_dst
= q6
->ip6q_dst
;
464 *q6
->ip6q_nxtp
= (u_char
)(nxt
& 0xff);
468 * Delete frag6 header with as a few cost as possible.
470 if (offset
< m
->m_len
) {
471 bcopy((caddr_t
)ip6
, (caddr_t
)ip6
+ sizeof(struct ip6_frag
),
473 m
->m_data
+= sizeof(struct ip6_frag
);
474 m
->m_len
-= sizeof(struct ip6_frag
);
476 /* this comes with no copy if the boundary is on cluster */
477 if ((t
= m_split(m
, offset
, M_NOWAIT
)) == NULL
) {
479 frag6_nfrags
-= q6
->ip6q_nfrag
;
481 frag6_nfragpackets
--;
484 m_adj(t
, sizeof(struct ip6_frag
));
489 * Store NXT to the original.
492 char *prvnxtp
= ip6_get_prevhdr(m
, offset
); /* XXX */
497 frag6_nfrags
-= q6
->ip6q_nfrag
;
499 frag6_nfragpackets
--;
501 if (m
->m_flags
& M_PKTHDR
) { /* Isn't it always true? */
503 for (t
= m
; t
; t
= t
->m_next
)
505 m
->m_pkthdr
.len
= plen
;
508 ip6stat
.ip6s_reassembled
++;
509 in6_ifstat_inc(dstifp
, ifs6_reass_ok
);
512 * Reassembly complete, return the next protocol.
513 * Be sure to clear M_HASH to force the packet
514 * to be re-characterized.
516 m
->m_flags
&= ~M_HASH
;
521 frag6_doing_reass
= 0;
525 in6_ifstat_inc(dstifp
, ifs6_reass_fail
);
526 ip6stat
.ip6s_fragdropped
++;
528 frag6_doing_reass
= 0;
533 * Free a fragment reassembly header and all
534 * associated datagrams.
537 frag6_freef(struct ip6q
*q6
)
539 struct ip6asfrag
*af6
, *down6
;
541 for (af6
= q6
->ip6q_down
; af6
!= (struct ip6asfrag
*)q6
;
543 struct mbuf
*m
= IP6_REASS_MBUF(af6
);
545 down6
= af6
->ip6af_down
;
549 * Return ICMP time exceeded error for the 1st fragment.
550 * Just free other fragments.
552 if (af6
->ip6af_off
== 0) {
556 ip6
= mtod(m
, struct ip6_hdr
*);
558 /* restoure source and destination addresses */
559 ip6
->ip6_src
= q6
->ip6q_src
;
560 ip6
->ip6_dst
= q6
->ip6q_dst
;
562 icmp6_error(m
, ICMP6_TIME_EXCEEDED
,
563 ICMP6_TIME_EXCEED_REASSEMBLY
, 0);
566 kfree(af6
, M_FTABLE
);
569 frag6_nfrags
-= q6
->ip6q_nfrag
;
571 frag6_nfragpackets
--;
575 * Put an ip fragment on a reassembly chain.
576 * Like insque, but pointers in middle of structure.
579 frag6_enq(struct ip6asfrag
*af6
, struct ip6asfrag
*up6
)
582 af6
->ip6af_down
= up6
->ip6af_down
;
583 up6
->ip6af_down
->ip6af_up
= af6
;
584 up6
->ip6af_down
= af6
;
588 * To frag6_enq as remque is to insque.
591 frag6_deq(struct ip6asfrag
*af6
)
593 af6
->ip6af_up
->ip6af_down
= af6
->ip6af_down
;
594 af6
->ip6af_down
->ip6af_up
= af6
->ip6af_up
;
598 frag6_insque(struct ip6q
*new, struct ip6q
*old
)
600 new->ip6q_prev
= old
;
601 new->ip6q_next
= old
->ip6q_next
;
602 old
->ip6q_next
->ip6q_prev
= new;
603 old
->ip6q_next
= new;
607 frag6_remque(struct ip6q
*p6
)
609 p6
->ip6q_prev
->ip6q_next
= p6
->ip6q_next
;
610 p6
->ip6q_next
->ip6q_prev
= p6
->ip6q_prev
;
614 * IPv6 reassembling timer processing;
615 * if a timer expires on a reassembly
619 frag6_slowtimo_dispatch(netmsg_t nmsg
)
627 netisr_replymsg(&nmsg
->base
, 0);
630 frag6_doing_reass
= 1;
633 while (q6
!= &ip6q
) {
636 if (q6
->ip6q_prev
->ip6q_ttl
== 0) {
637 ip6stat
.ip6s_fragtimeout
++;
638 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
639 frag6_freef(q6
->ip6q_prev
);
643 * If we are over the maximum number of fragments
644 * (due to the limit being lowered), drain off
645 * enough to get down to the new limit.
647 while (frag6_nfragpackets
> (u_int
)ip6_maxfragpackets
&&
649 ip6stat
.ip6s_fragoverflow
++;
650 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
651 frag6_freef(ip6q
.ip6q_prev
);
653 frag6_doing_reass
= 0;
657 * Routing changes might produce a better route than we last used;
658 * make sure we notice eventually, even if forwarding only for one
659 * destination and the cache is never replaced.
661 if (ip6_forward_rt
.ro_rt
) {
662 RTFREE(ip6_forward_rt
.ro_rt
);
663 ip6_forward_rt
.ro_rt
= NULL
;
665 if (ipsrcchk_rt
.ro_rt
) {
666 RTFREE(ipsrcchk_rt
.ro_rt
);
667 ipsrcchk_rt
.ro_rt
= NULL
;
670 callout_reset(&frag6_slowtimo_ch
, FRAG6_SLOWTIMO
, frag6_slowtimo
, NULL
);
674 frag6_slowtimo(void *dummy __unused
)
676 struct netmsg_base
*nmsg
= &frag6_slowtimo_nmsg
;
678 KKASSERT(mycpuid
== 0);
681 if (nmsg
->lmsg
.ms_flags
& MSGF_DONE
)
682 netisr_sendmsg_oncpu(nmsg
);
687 * Drain off all datagram fragments.
690 frag6_drain_oncpu(void)
695 if (frag6_doing_reass
)
697 while (ip6q
.ip6q_next
!= &ip6q
) {
698 ip6stat
.ip6s_fragdropped
++;
699 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
700 frag6_freef(ip6q
.ip6q_next
);
705 frag6_drain_dispatch(netmsg_t nmsg
)
711 netisr_replymsg(&nmsg
->base
, 0);
719 frag6_drain_ipi(void *dummy __unused
)
721 struct netmsg_base
*nmsg
= &frag6_drain_nmsg
;
723 KKASSERT(mycpuid
== 0);
726 if (nmsg
->lmsg
.ms_flags
& MSGF_DONE
)
727 netisr_sendmsg_oncpu(nmsg
);
740 if (!frag6_nfrags
|| frag6_draining
) {
741 /* No fragments or is draining; done. */
747 lwkt_send_ipiq_bycpu(0, frag6_drain_ipi
, NULL
);