2 * IPv6 fragment reassembly
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on: net/ipv4/ip_fragment.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
18 * Andi Kleen Make it work with multiple hosts.
19 * More RFC compliance.
21 * Horst von Brand Add missing #include <linux/string.h>
22 * Alexey Kuznetsov SMP races, threading, cleanup.
23 * Patrick McHardy LRU queue of frag heads for evictor.
24 * Mitsuru KANDA @USAGI Register inet6_protocol{}.
26 * YOSHIFUJI,H. @USAGI Always remove fragment header to
27 * calculate ICV correctly.
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/sockios.h>
34 #include <linux/jiffies.h>
35 #include <linux/net.h>
36 #include <linux/list.h>
37 #include <linux/netdevice.h>
38 #include <linux/in6.h>
39 #include <linux/ipv6.h>
40 #include <linux/icmpv6.h>
41 #include <linux/random.h>
42 #include <linux/jhash.h>
43 #include <linux/skbuff.h>
44 #include <linux/slab.h>
50 #include <net/ip6_route.h>
51 #include <net/protocol.h>
52 #include <net/transp_v6.h>
53 #include <net/rawv6.h>
54 #include <net/ndisc.h>
55 #include <net/addrconf.h>
56 #include <net/inet_frag.h>
60 struct inet6_skb_parm h
;
64 #define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb))
68 * Equivalent of ipv4 struct ipq
73 struct inet_frag_queue q
;
75 __be32 id
; /* fragment id */
77 struct in6_addr saddr
;
78 struct in6_addr daddr
;
85 static struct inet_frags ip6_frags
;
87 int ip6_frag_nqueues(struct net
*net
)
89 return net
->ipv6
.frags
.nqueues
;
92 int ip6_frag_mem(struct net
*net
)
94 return atomic_read(&net
->ipv6
.frags
.mem
);
97 static int ip6_frag_reasm(struct frag_queue
*fq
, struct sk_buff
*prev
,
98 struct net_device
*dev
);
101 * callers should be careful not to use the hash value outside the ipfrag_lock
102 * as doing so could race with ipfrag_hash_rnd being recalculated.
104 unsigned int inet6_hash_frag(__be32 id
, const struct in6_addr
*saddr
,
105 const struct in6_addr
*daddr
, u32 rnd
)
109 a
= (__force u32
)saddr
->s6_addr32
[0];
110 b
= (__force u32
)saddr
->s6_addr32
[1];
111 c
= (__force u32
)saddr
->s6_addr32
[2];
113 a
+= JHASH_GOLDEN_RATIO
;
114 b
+= JHASH_GOLDEN_RATIO
;
116 __jhash_mix(a
, b
, c
);
118 a
+= (__force u32
)saddr
->s6_addr32
[3];
119 b
+= (__force u32
)daddr
->s6_addr32
[0];
120 c
+= (__force u32
)daddr
->s6_addr32
[1];
121 __jhash_mix(a
, b
, c
);
123 a
+= (__force u32
)daddr
->s6_addr32
[2];
124 b
+= (__force u32
)daddr
->s6_addr32
[3];
125 c
+= (__force u32
)id
;
126 __jhash_mix(a
, b
, c
);
128 return c
& (INETFRAGS_HASHSZ
- 1);
130 EXPORT_SYMBOL_GPL(inet6_hash_frag
);
132 static unsigned int ip6_hashfn(struct inet_frag_queue
*q
)
134 struct frag_queue
*fq
;
136 fq
= container_of(q
, struct frag_queue
, q
);
137 return inet6_hash_frag(fq
->id
, &fq
->saddr
, &fq
->daddr
, ip6_frags
.rnd
);
140 int ip6_frag_match(struct inet_frag_queue
*q
, void *a
)
142 struct frag_queue
*fq
;
143 struct ip6_create_arg
*arg
= a
;
145 fq
= container_of(q
, struct frag_queue
, q
);
146 return (fq
->id
== arg
->id
&& fq
->user
== arg
->user
&&
147 ipv6_addr_equal(&fq
->saddr
, arg
->src
) &&
148 ipv6_addr_equal(&fq
->daddr
, arg
->dst
));
150 EXPORT_SYMBOL(ip6_frag_match
);
152 void ip6_frag_init(struct inet_frag_queue
*q
, void *a
)
154 struct frag_queue
*fq
= container_of(q
, struct frag_queue
, q
);
155 struct ip6_create_arg
*arg
= a
;
158 fq
->user
= arg
->user
;
159 ipv6_addr_copy(&fq
->saddr
, arg
->src
);
160 ipv6_addr_copy(&fq
->daddr
, arg
->dst
);
162 EXPORT_SYMBOL(ip6_frag_init
);
164 /* Destruction primitives. */
166 static __inline__
void fq_put(struct frag_queue
*fq
)
168 inet_frag_put(&fq
->q
, &ip6_frags
);
171 /* Kill fq entry. It is not destroyed immediately,
172 * because caller (and someone more) holds reference count.
174 static __inline__
void fq_kill(struct frag_queue
*fq
)
176 inet_frag_kill(&fq
->q
, &ip6_frags
);
179 static void ip6_evictor(struct net
*net
, struct inet6_dev
*idev
)
183 evicted
= inet_frag_evictor(&net
->ipv6
.frags
, &ip6_frags
);
185 IP6_ADD_STATS_BH(net
, idev
, IPSTATS_MIB_REASMFAILS
, evicted
);
188 static void ip6_frag_expire(unsigned long data
)
190 struct frag_queue
*fq
;
191 struct net_device
*dev
= NULL
;
194 fq
= container_of((struct inet_frag_queue
*)data
, struct frag_queue
, q
);
196 spin_lock(&fq
->q
.lock
);
198 if (fq
->q
.last_in
& INET_FRAG_COMPLETE
)
203 net
= container_of(fq
->q
.net
, struct net
, ipv6
.frags
);
205 dev
= dev_get_by_index_rcu(net
, fq
->iif
);
209 IP6_INC_STATS_BH(net
, __in6_dev_get(dev
), IPSTATS_MIB_REASMTIMEOUT
);
210 IP6_INC_STATS_BH(net
, __in6_dev_get(dev
), IPSTATS_MIB_REASMFAILS
);
212 /* Don't send error if the first segment did not arrive. */
213 if (!(fq
->q
.last_in
& INET_FRAG_FIRST_IN
) || !fq
->q
.fragments
)
217 But use as source device on which LAST ARRIVED
218 segment was received. And do not use fq->dev
219 pointer directly, device might already disappeared.
221 fq
->q
.fragments
->dev
= dev
;
222 icmpv6_send(fq
->q
.fragments
, ICMPV6_TIME_EXCEED
, ICMPV6_EXC_FRAGTIME
, 0);
226 spin_unlock(&fq
->q
.lock
);
230 static __inline__
struct frag_queue
*
231 fq_find(struct net
*net
, __be32 id
, struct in6_addr
*src
, struct in6_addr
*dst
)
233 struct inet_frag_queue
*q
;
234 struct ip6_create_arg arg
;
238 arg
.user
= IP6_DEFRAG_LOCAL_DELIVER
;
242 read_lock(&ip6_frags
.lock
);
243 hash
= inet6_hash_frag(id
, src
, dst
, ip6_frags
.rnd
);
245 q
= inet_frag_find(&net
->ipv6
.frags
, &ip6_frags
, &arg
, hash
);
249 return container_of(q
, struct frag_queue
, q
);
252 static int ip6_frag_queue(struct frag_queue
*fq
, struct sk_buff
*skb
,
253 struct frag_hdr
*fhdr
, int nhoff
)
255 struct sk_buff
*prev
, *next
;
256 struct net_device
*dev
;
258 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
260 if (fq
->q
.last_in
& INET_FRAG_COMPLETE
)
263 offset
= ntohs(fhdr
->frag_off
) & ~0x7;
264 end
= offset
+ (ntohs(ipv6_hdr(skb
)->payload_len
) -
265 ((u8
*)(fhdr
+ 1) - (u8
*)(ipv6_hdr(skb
) + 1)));
267 if ((unsigned int)end
> IPV6_MAXPLEN
) {
268 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
269 IPSTATS_MIB_INHDRERRORS
);
270 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
,
271 ((u8
*)&fhdr
->frag_off
-
272 skb_network_header(skb
)));
276 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
277 const unsigned char *nh
= skb_network_header(skb
);
278 skb
->csum
= csum_sub(skb
->csum
,
279 csum_partial(nh
, (u8
*)(fhdr
+ 1) - nh
,
283 /* Is this the final fragment? */
284 if (!(fhdr
->frag_off
& htons(IP6_MF
))) {
285 /* If we already have some bits beyond end
286 * or have different end, the segment is corrupted.
288 if (end
< fq
->q
.len
||
289 ((fq
->q
.last_in
& INET_FRAG_LAST_IN
) && end
!= fq
->q
.len
))
291 fq
->q
.last_in
|= INET_FRAG_LAST_IN
;
294 /* Check if the fragment is rounded to 8 bytes.
295 * Required by the RFC.
298 /* RFC2460 says always send parameter problem in
301 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
302 IPSTATS_MIB_INHDRERRORS
);
303 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
,
304 offsetof(struct ipv6hdr
, payload_len
));
307 if (end
> fq
->q
.len
) {
308 /* Some bits beyond end -> corruption. */
309 if (fq
->q
.last_in
& INET_FRAG_LAST_IN
)
318 /* Point into the IP datagram 'data' part. */
319 if (!pskb_pull(skb
, (u8
*) (fhdr
+ 1) - skb
->data
))
322 if (pskb_trim_rcsum(skb
, end
- offset
))
325 /* Find out which fragments are in front and at the back of us
326 * in the chain of fragments so far. We must know where to put
327 * this fragment, right?
329 prev
= fq
->q
.fragments_tail
;
330 if (!prev
|| FRAG6_CB(prev
)->offset
< offset
) {
335 for(next
= fq
->q
.fragments
; next
!= NULL
; next
= next
->next
) {
336 if (FRAG6_CB(next
)->offset
>= offset
)
342 /* RFC5722, Section 4:
343 * When reassembling an IPv6 datagram, if
344 * one or more its constituent fragments is determined to be an
345 * overlapping fragment, the entire datagram (and any constituent
346 * fragments, including those not yet received) MUST be silently
350 /* Check for overlap with preceding fragment. */
352 (FRAG6_CB(prev
)->offset
+ prev
->len
) - offset
> 0)
355 /* Look for overlap with succeeding segment. */
356 if (next
&& FRAG6_CB(next
)->offset
< end
)
359 FRAG6_CB(skb
)->offset
= offset
;
361 /* Insert this fragment in the chain of fragments. */
364 fq
->q
.fragments_tail
= skb
;
368 fq
->q
.fragments
= skb
;
372 fq
->iif
= dev
->ifindex
;
375 fq
->q
.stamp
= skb
->tstamp
;
376 fq
->q
.meat
+= skb
->len
;
377 atomic_add(skb
->truesize
, &fq
->q
.net
->mem
);
379 /* The first fragment.
380 * nhoffset is obtained from the first fragment, of course.
383 fq
->nhoffset
= nhoff
;
384 fq
->q
.last_in
|= INET_FRAG_FIRST_IN
;
387 if (fq
->q
.last_in
== (INET_FRAG_FIRST_IN
| INET_FRAG_LAST_IN
) &&
388 fq
->q
.meat
== fq
->q
.len
)
389 return ip6_frag_reasm(fq
, prev
, dev
);
391 write_lock(&ip6_frags
.lock
);
392 list_move_tail(&fq
->q
.lru_list
, &fq
->q
.net
->lru_list
);
393 write_unlock(&ip6_frags
.lock
);
399 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
400 IPSTATS_MIB_REASMFAILS
);
406 * Check if this packet is complete.
407 * Returns NULL on failure by any reason, and pointer
408 * to current nexthdr field in reassembled frame.
410 * It is called with locked fq, and caller must check that
411 * queue is eligible for reassembly i.e. it is not COMPLETE,
412 * the last and the first frames arrived and all the bits are here.
414 static int ip6_frag_reasm(struct frag_queue
*fq
, struct sk_buff
*prev
,
415 struct net_device
*dev
)
417 struct net
*net
= container_of(fq
->q
.net
, struct net
, ipv6
.frags
);
418 struct sk_buff
*fp
, *head
= fq
->q
.fragments
;
424 /* Make the one we just received the head. */
427 fp
= skb_clone(head
, GFP_ATOMIC
);
432 fp
->next
= head
->next
;
434 fq
->q
.fragments_tail
= fp
;
437 skb_morph(head
, fq
->q
.fragments
);
438 head
->next
= fq
->q
.fragments
->next
;
440 kfree_skb(fq
->q
.fragments
);
441 fq
->q
.fragments
= head
;
444 WARN_ON(head
== NULL
);
445 WARN_ON(FRAG6_CB(head
)->offset
!= 0);
447 /* Unfragmented part is taken from the first segment. */
448 payload_len
= ((head
->data
- skb_network_header(head
)) -
449 sizeof(struct ipv6hdr
) + fq
->q
.len
-
450 sizeof(struct frag_hdr
));
451 if (payload_len
> IPV6_MAXPLEN
)
454 /* Head of list must not be cloned. */
455 if (skb_cloned(head
) && pskb_expand_head(head
, 0, 0, GFP_ATOMIC
))
458 /* If the first fragment is fragmented itself, we split
459 * it to two chunks: the first with data and paged part
460 * and the second, holding only fragments. */
461 if (skb_has_frag_list(head
)) {
462 struct sk_buff
*clone
;
465 if ((clone
= alloc_skb(0, GFP_ATOMIC
)) == NULL
)
467 clone
->next
= head
->next
;
469 skb_shinfo(clone
)->frag_list
= skb_shinfo(head
)->frag_list
;
470 skb_frag_list_init(head
);
471 for (i
=0; i
<skb_shinfo(head
)->nr_frags
; i
++)
472 plen
+= skb_shinfo(head
)->frags
[i
].size
;
473 clone
->len
= clone
->data_len
= head
->data_len
- plen
;
474 head
->data_len
-= clone
->len
;
475 head
->len
-= clone
->len
;
477 clone
->ip_summed
= head
->ip_summed
;
478 atomic_add(clone
->truesize
, &fq
->q
.net
->mem
);
481 /* We have to remove fragment header from datagram and to relocate
482 * header in order to calculate ICV correctly. */
483 nhoff
= fq
->nhoffset
;
484 skb_network_header(head
)[nhoff
] = skb_transport_header(head
)[0];
485 memmove(head
->head
+ sizeof(struct frag_hdr
), head
->head
,
486 (head
->data
- head
->head
) - sizeof(struct frag_hdr
));
487 head
->mac_header
+= sizeof(struct frag_hdr
);
488 head
->network_header
+= sizeof(struct frag_hdr
);
490 skb_shinfo(head
)->frag_list
= head
->next
;
491 skb_reset_transport_header(head
);
492 skb_push(head
, head
->data
- skb_network_header(head
));
494 for (fp
=head
->next
; fp
; fp
= fp
->next
) {
495 head
->data_len
+= fp
->len
;
496 head
->len
+= fp
->len
;
497 if (head
->ip_summed
!= fp
->ip_summed
)
498 head
->ip_summed
= CHECKSUM_NONE
;
499 else if (head
->ip_summed
== CHECKSUM_COMPLETE
)
500 head
->csum
= csum_add(head
->csum
, fp
->csum
);
501 head
->truesize
+= fp
->truesize
;
503 atomic_sub(head
->truesize
, &fq
->q
.net
->mem
);
507 head
->tstamp
= fq
->q
.stamp
;
508 ipv6_hdr(head
)->payload_len
= htons(payload_len
);
509 IP6CB(head
)->nhoff
= nhoff
;
511 /* Yes, and fold redundant checksum back. 8) */
512 if (head
->ip_summed
== CHECKSUM_COMPLETE
)
513 head
->csum
= csum_partial(skb_network_header(head
),
514 skb_network_header_len(head
),
518 IP6_INC_STATS_BH(net
, __in6_dev_get(dev
), IPSTATS_MIB_REASMOKS
);
520 fq
->q
.fragments
= NULL
;
521 fq
->q
.fragments_tail
= NULL
;
526 printk(KERN_DEBUG
"ip6_frag_reasm: payload len = %d\n", payload_len
);
530 printk(KERN_DEBUG
"ip6_frag_reasm: no memory for reassembly\n");
533 IP6_INC_STATS_BH(net
, __in6_dev_get(dev
), IPSTATS_MIB_REASMFAILS
);
538 static int ipv6_frag_rcv(struct sk_buff
*skb
)
540 struct frag_hdr
*fhdr
;
541 struct frag_queue
*fq
;
542 struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
543 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
545 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_REASMREQDS
);
547 /* Jumbo payload inhibits frag. header */
548 if (hdr
->payload_len
==0)
551 if (!pskb_may_pull(skb
, (skb_transport_offset(skb
) +
552 sizeof(struct frag_hdr
))))
556 fhdr
= (struct frag_hdr
*)skb_transport_header(skb
);
558 if (!(fhdr
->frag_off
& htons(0xFFF9))) {
559 /* It is not a fragmented frame */
560 skb
->transport_header
+= sizeof(struct frag_hdr
);
561 IP6_INC_STATS_BH(net
,
562 ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_REASMOKS
);
564 IP6CB(skb
)->nhoff
= (u8
*)fhdr
- skb_network_header(skb
);
568 if (atomic_read(&net
->ipv6
.frags
.mem
) > net
->ipv6
.frags
.high_thresh
)
569 ip6_evictor(net
, ip6_dst_idev(skb_dst(skb
)));
571 fq
= fq_find(net
, fhdr
->identification
, &hdr
->saddr
, &hdr
->daddr
);
575 spin_lock(&fq
->q
.lock
);
577 ret
= ip6_frag_queue(fq
, skb
, fhdr
, IP6CB(skb
)->nhoff
);
579 spin_unlock(&fq
->q
.lock
);
584 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_REASMFAILS
);
589 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_INHDRERRORS
);
590 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
, skb_network_header_len(skb
));
594 static const struct inet6_protocol frag_protocol
=
596 .handler
= ipv6_frag_rcv
,
597 .flags
= INET6_PROTO_NOPOLICY
,
601 static struct ctl_table ip6_frags_ns_ctl_table
[] = {
603 .procname
= "ip6frag_high_thresh",
604 .data
= &init_net
.ipv6
.frags
.high_thresh
,
605 .maxlen
= sizeof(int),
607 .proc_handler
= proc_dointvec
610 .procname
= "ip6frag_low_thresh",
611 .data
= &init_net
.ipv6
.frags
.low_thresh
,
612 .maxlen
= sizeof(int),
614 .proc_handler
= proc_dointvec
617 .procname
= "ip6frag_time",
618 .data
= &init_net
.ipv6
.frags
.timeout
,
619 .maxlen
= sizeof(int),
621 .proc_handler
= proc_dointvec_jiffies
,
626 static struct ctl_table ip6_frags_ctl_table
[] = {
628 .procname
= "ip6frag_secret_interval",
629 .data
= &ip6_frags
.secret_interval
,
630 .maxlen
= sizeof(int),
632 .proc_handler
= proc_dointvec_jiffies
,
637 static int __net_init
ip6_frags_ns_sysctl_register(struct net
*net
)
639 struct ctl_table
*table
;
640 struct ctl_table_header
*hdr
;
642 table
= ip6_frags_ns_ctl_table
;
643 if (!net_eq(net
, &init_net
)) {
644 table
= kmemdup(table
, sizeof(ip6_frags_ns_ctl_table
), GFP_KERNEL
);
648 table
[0].data
= &net
->ipv6
.frags
.high_thresh
;
649 table
[1].data
= &net
->ipv6
.frags
.low_thresh
;
650 table
[2].data
= &net
->ipv6
.frags
.timeout
;
653 hdr
= register_net_sysctl_table(net
, net_ipv6_ctl_path
, table
);
657 net
->ipv6
.sysctl
.frags_hdr
= hdr
;
661 if (!net_eq(net
, &init_net
))
667 static void __net_exit
ip6_frags_ns_sysctl_unregister(struct net
*net
)
669 struct ctl_table
*table
;
671 table
= net
->ipv6
.sysctl
.frags_hdr
->ctl_table_arg
;
672 unregister_net_sysctl_table(net
->ipv6
.sysctl
.frags_hdr
);
673 if (!net_eq(net
, &init_net
))
677 static struct ctl_table_header
*ip6_ctl_header
;
679 static int ip6_frags_sysctl_register(void)
681 ip6_ctl_header
= register_net_sysctl_rotable(net_ipv6_ctl_path
,
682 ip6_frags_ctl_table
);
683 return ip6_ctl_header
== NULL
? -ENOMEM
: 0;
686 static void ip6_frags_sysctl_unregister(void)
688 unregister_net_sysctl_table(ip6_ctl_header
);
691 static inline int ip6_frags_ns_sysctl_register(struct net
*net
)
696 static inline void ip6_frags_ns_sysctl_unregister(struct net
*net
)
700 static inline int ip6_frags_sysctl_register(void)
705 static inline void ip6_frags_sysctl_unregister(void)
710 static int __net_init
ipv6_frags_init_net(struct net
*net
)
712 net
->ipv6
.frags
.high_thresh
= IPV6_FRAG_HIGH_THRESH
;
713 net
->ipv6
.frags
.low_thresh
= IPV6_FRAG_LOW_THRESH
;
714 net
->ipv6
.frags
.timeout
= IPV6_FRAG_TIMEOUT
;
716 inet_frags_init_net(&net
->ipv6
.frags
);
718 return ip6_frags_ns_sysctl_register(net
);
721 static void __net_exit
ipv6_frags_exit_net(struct net
*net
)
723 ip6_frags_ns_sysctl_unregister(net
);
724 inet_frags_exit_net(&net
->ipv6
.frags
, &ip6_frags
);
727 static struct pernet_operations ip6_frags_ops
= {
728 .init
= ipv6_frags_init_net
,
729 .exit
= ipv6_frags_exit_net
,
732 int __init
ipv6_frag_init(void)
736 ret
= inet6_add_protocol(&frag_protocol
, IPPROTO_FRAGMENT
);
740 ret
= ip6_frags_sysctl_register();
744 ret
= register_pernet_subsys(&ip6_frags_ops
);
748 ip6_frags
.hashfn
= ip6_hashfn
;
749 ip6_frags
.constructor
= ip6_frag_init
;
750 ip6_frags
.destructor
= NULL
;
751 ip6_frags
.skb_free
= NULL
;
752 ip6_frags
.qsize
= sizeof(struct frag_queue
);
753 ip6_frags
.match
= ip6_frag_match
;
754 ip6_frags
.frag_expire
= ip6_frag_expire
;
755 ip6_frags
.secret_interval
= 10 * 60 * HZ
;
756 inet_frags_init(&ip6_frags
);
761 ip6_frags_sysctl_unregister();
763 inet6_del_protocol(&frag_protocol
, IPPROTO_FRAGMENT
);
767 void ipv6_frag_exit(void)
769 inet_frags_fini(&ip6_frags
);
770 ip6_frags_sysctl_unregister();
771 unregister_pernet_subsys(&ip6_frags_ops
);
772 inet6_del_protocol(&frag_protocol
, IPPROTO_FRAGMENT
);