ipv6: reassembly: use seperate reassembly queues for conntrack and local delivery
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / ipv6 / reassembly.c
blobd2fff416f24e20b5297a926a1336cddc00dd8f8a
1 /*
2 * IPv6 fragment reassembly
3 * Linux INET6 implementation
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on: net/ipv4/ip_fragment.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
17 * Fixes:
18 * Andi Kleen Make it work with multiple hosts.
19 * More RFC compliance.
21 * Horst von Brand Add missing #include <linux/string.h>
22 * Alexey Kuznetsov SMP races, threading, cleanup.
23 * Patrick McHardy LRU queue of frag heads for evictor.
24 * Mitsuru KANDA @USAGI Register inet6_protocol{}.
25 * David Stevens and
26 * YOSHIFUJI,H. @USAGI Always remove fragment header to
27 * calculate ICV correctly.
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/sockios.h>
34 #include <linux/jiffies.h>
35 #include <linux/net.h>
36 #include <linux/list.h>
37 #include <linux/netdevice.h>
38 #include <linux/in6.h>
39 #include <linux/ipv6.h>
40 #include <linux/icmpv6.h>
41 #include <linux/random.h>
42 #include <linux/jhash.h>
43 #include <linux/skbuff.h>
45 #include <net/sock.h>
46 #include <net/snmp.h>
48 #include <net/ipv6.h>
49 #include <net/ip6_route.h>
50 #include <net/protocol.h>
51 #include <net/transp_v6.h>
52 #include <net/rawv6.h>
53 #include <net/ndisc.h>
54 #include <net/addrconf.h>
55 #include <net/inet_frag.h>
57 struct ip6frag_skb_cb
59 struct inet6_skb_parm h;
60 int offset;
63 #define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb))
67 * Equivalent of ipv4 struct ipq
70 struct frag_queue
72 struct inet_frag_queue q;
74 __be32 id; /* fragment id */
75 u32 user;
76 struct in6_addr saddr;
77 struct in6_addr daddr;
79 int iif;
80 unsigned int csum;
81 __u16 nhoffset;
84 static struct inet_frags ip6_frags;
86 int ip6_frag_nqueues(struct net *net)
88 return net->ipv6.frags.nqueues;
91 int ip6_frag_mem(struct net *net)
93 return atomic_read(&net->ipv6.frags.mem);
96 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
97 struct net_device *dev);
100 * callers should be careful not to use the hash value outside the ipfrag_lock
101 * as doing so could race with ipfrag_hash_rnd being recalculated.
103 static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
104 struct in6_addr *daddr)
106 u32 a, b, c;
108 a = (__force u32)saddr->s6_addr32[0];
109 b = (__force u32)saddr->s6_addr32[1];
110 c = (__force u32)saddr->s6_addr32[2];
112 a += JHASH_GOLDEN_RATIO;
113 b += JHASH_GOLDEN_RATIO;
114 c += ip6_frags.rnd;
115 __jhash_mix(a, b, c);
117 a += (__force u32)saddr->s6_addr32[3];
118 b += (__force u32)daddr->s6_addr32[0];
119 c += (__force u32)daddr->s6_addr32[1];
120 __jhash_mix(a, b, c);
122 a += (__force u32)daddr->s6_addr32[2];
123 b += (__force u32)daddr->s6_addr32[3];
124 c += (__force u32)id;
125 __jhash_mix(a, b, c);
127 return c & (INETFRAGS_HASHSZ - 1);
130 static unsigned int ip6_hashfn(struct inet_frag_queue *q)
132 struct frag_queue *fq;
134 fq = container_of(q, struct frag_queue, q);
135 return ip6qhashfn(fq->id, &fq->saddr, &fq->daddr);
138 int ip6_frag_match(struct inet_frag_queue *q, void *a)
140 struct frag_queue *fq;
141 struct ip6_create_arg *arg = a;
143 fq = container_of(q, struct frag_queue, q);
144 return (fq->id == arg->id && fq->user == arg->user &&
145 ipv6_addr_equal(&fq->saddr, arg->src) &&
146 ipv6_addr_equal(&fq->daddr, arg->dst));
148 EXPORT_SYMBOL(ip6_frag_match);
150 /* Memory Tracking Functions. */
151 static inline void frag_kfree_skb(struct netns_frags *nf,
152 struct sk_buff *skb, int *work)
154 if (work)
155 *work -= skb->truesize;
156 atomic_sub(skb->truesize, &nf->mem);
157 kfree_skb(skb);
160 void ip6_frag_init(struct inet_frag_queue *q, void *a)
162 struct frag_queue *fq = container_of(q, struct frag_queue, q);
163 struct ip6_create_arg *arg = a;
165 fq->id = arg->id;
166 fq->user = arg->user;
167 ipv6_addr_copy(&fq->saddr, arg->src);
168 ipv6_addr_copy(&fq->daddr, arg->dst);
170 EXPORT_SYMBOL(ip6_frag_init);
172 /* Destruction primitives. */
174 static __inline__ void fq_put(struct frag_queue *fq)
176 inet_frag_put(&fq->q, &ip6_frags);
179 /* Kill fq entry. It is not destroyed immediately,
180 * because caller (and someone more) holds reference count.
182 static __inline__ void fq_kill(struct frag_queue *fq)
184 inet_frag_kill(&fq->q, &ip6_frags);
187 static void ip6_evictor(struct net *net, struct inet6_dev *idev)
189 int evicted;
191 evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags);
192 if (evicted)
193 IP6_ADD_STATS_BH(idev, IPSTATS_MIB_REASMFAILS, evicted);
196 static void ip6_frag_expire(unsigned long data)
198 struct frag_queue *fq;
199 struct net_device *dev = NULL;
200 struct net *net;
202 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
204 spin_lock(&fq->q.lock);
206 if (fq->q.last_in & INET_FRAG_COMPLETE)
207 goto out;
209 fq_kill(fq);
211 net = container_of(fq->q.net, struct net, ipv6.frags);
212 dev = dev_get_by_index(net, fq->iif);
213 if (!dev)
214 goto out;
216 rcu_read_lock();
217 IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
218 IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
219 rcu_read_unlock();
221 /* Don't send error if the first segment did not arrive. */
222 if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments)
223 goto out;
226 But use as source device on which LAST ARRIVED
227 segment was received. And do not use fq->dev
228 pointer directly, device might already disappeared.
230 fq->q.fragments->dev = dev;
231 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev);
232 out:
233 if (dev)
234 dev_put(dev);
235 spin_unlock(&fq->q.lock);
236 fq_put(fq);
239 static __inline__ struct frag_queue *
240 fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
241 struct inet6_dev *idev)
243 struct inet_frag_queue *q;
244 struct ip6_create_arg arg;
245 unsigned int hash;
247 arg.id = id;
248 arg.user = IP6_DEFRAG_LOCAL_DELIVER;
249 arg.src = src;
250 arg.dst = dst;
252 read_lock(&ip6_frags.lock);
253 hash = ip6qhashfn(id, src, dst);
255 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
256 if (q == NULL)
257 goto oom;
259 return container_of(q, struct frag_queue, q);
261 oom:
262 IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS);
263 return NULL;
266 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
267 struct frag_hdr *fhdr, int nhoff)
269 struct sk_buff *prev, *next;
270 struct net_device *dev;
271 int offset, end;
273 if (fq->q.last_in & INET_FRAG_COMPLETE)
274 goto err;
276 offset = ntohs(fhdr->frag_off) & ~0x7;
277 end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
278 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
280 if ((unsigned int)end > IPV6_MAXPLEN) {
281 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
282 IPSTATS_MIB_INHDRERRORS);
283 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
284 ((u8 *)&fhdr->frag_off -
285 skb_network_header(skb)));
286 return -1;
289 if (skb->ip_summed == CHECKSUM_COMPLETE) {
290 const unsigned char *nh = skb_network_header(skb);
291 skb->csum = csum_sub(skb->csum,
292 csum_partial(nh, (u8 *)(fhdr + 1) - nh,
293 0));
296 /* Is this the final fragment? */
297 if (!(fhdr->frag_off & htons(IP6_MF))) {
298 /* If we already have some bits beyond end
299 * or have different end, the segment is corrupted.
301 if (end < fq->q.len ||
302 ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
303 goto err;
304 fq->q.last_in |= INET_FRAG_LAST_IN;
305 fq->q.len = end;
306 } else {
307 /* Check if the fragment is rounded to 8 bytes.
308 * Required by the RFC.
310 if (end & 0x7) {
311 /* RFC2460 says always send parameter problem in
312 * this case. -DaveM
314 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
315 IPSTATS_MIB_INHDRERRORS);
316 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
317 offsetof(struct ipv6hdr, payload_len));
318 return -1;
320 if (end > fq->q.len) {
321 /* Some bits beyond end -> corruption. */
322 if (fq->q.last_in & INET_FRAG_LAST_IN)
323 goto err;
324 fq->q.len = end;
328 if (end == offset)
329 goto err;
331 /* Point into the IP datagram 'data' part. */
332 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
333 goto err;
335 if (pskb_trim_rcsum(skb, end - offset))
336 goto err;
338 /* Find out which fragments are in front and at the back of us
339 * in the chain of fragments so far. We must know where to put
340 * this fragment, right?
342 prev = NULL;
343 for(next = fq->q.fragments; next != NULL; next = next->next) {
344 if (FRAG6_CB(next)->offset >= offset)
345 break; /* bingo! */
346 prev = next;
349 /* We found where to put this one. Check for overlap with
350 * preceding fragment, and, if needed, align things so that
351 * any overlaps are eliminated.
353 if (prev) {
354 int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
356 if (i > 0) {
357 offset += i;
358 if (end <= offset)
359 goto err;
360 if (!pskb_pull(skb, i))
361 goto err;
362 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
363 skb->ip_summed = CHECKSUM_NONE;
367 /* Look for overlap with succeeding segments.
368 * If we can merge fragments, do it.
370 while (next && FRAG6_CB(next)->offset < end) {
371 int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
373 if (i < next->len) {
374 /* Eat head of the next overlapped fragment
375 * and leave the loop. The next ones cannot overlap.
377 if (!pskb_pull(next, i))
378 goto err;
379 FRAG6_CB(next)->offset += i; /* next fragment */
380 fq->q.meat -= i;
381 if (next->ip_summed != CHECKSUM_UNNECESSARY)
382 next->ip_summed = CHECKSUM_NONE;
383 break;
384 } else {
385 struct sk_buff *free_it = next;
387 /* Old fragment is completely overridden with
388 * new one drop it.
390 next = next->next;
392 if (prev)
393 prev->next = next;
394 else
395 fq->q.fragments = next;
397 fq->q.meat -= free_it->len;
398 frag_kfree_skb(fq->q.net, free_it, NULL);
402 FRAG6_CB(skb)->offset = offset;
404 /* Insert this fragment in the chain of fragments. */
405 skb->next = next;
406 if (prev)
407 prev->next = skb;
408 else
409 fq->q.fragments = skb;
411 dev = skb->dev;
412 if (dev) {
413 fq->iif = dev->ifindex;
414 skb->dev = NULL;
416 fq->q.stamp = skb->tstamp;
417 fq->q.meat += skb->len;
418 atomic_add(skb->truesize, &fq->q.net->mem);
420 /* The first fragment.
421 * nhoffset is obtained from the first fragment, of course.
423 if (offset == 0) {
424 fq->nhoffset = nhoff;
425 fq->q.last_in |= INET_FRAG_FIRST_IN;
428 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
429 fq->q.meat == fq->q.len)
430 return ip6_frag_reasm(fq, prev, dev);
432 write_lock(&ip6_frags.lock);
433 list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
434 write_unlock(&ip6_frags.lock);
435 return -1;
437 err:
438 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
439 kfree_skb(skb);
440 return -1;
444 * Check if this packet is complete.
445 * Returns NULL on failure by any reason, and pointer
446 * to current nexthdr field in reassembled frame.
448 * It is called with locked fq, and caller must check that
449 * queue is eligible for reassembly i.e. it is not COMPLETE,
450 * the last and the first frames arrived and all the bits are here.
452 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
453 struct net_device *dev)
455 struct sk_buff *fp, *head = fq->q.fragments;
456 int payload_len;
457 unsigned int nhoff;
459 fq_kill(fq);
461 /* Make the one we just received the head. */
462 if (prev) {
463 head = prev->next;
464 fp = skb_clone(head, GFP_ATOMIC);
466 if (!fp)
467 goto out_oom;
469 fp->next = head->next;
470 prev->next = fp;
472 skb_morph(head, fq->q.fragments);
473 head->next = fq->q.fragments->next;
475 kfree_skb(fq->q.fragments);
476 fq->q.fragments = head;
479 WARN_ON(head == NULL);
480 WARN_ON(FRAG6_CB(head)->offset != 0);
482 /* Unfragmented part is taken from the first segment. */
483 payload_len = ((head->data - skb_network_header(head)) -
484 sizeof(struct ipv6hdr) + fq->q.len -
485 sizeof(struct frag_hdr));
486 if (payload_len > IPV6_MAXPLEN)
487 goto out_oversize;
489 /* Head of list must not be cloned. */
490 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
491 goto out_oom;
493 /* If the first fragment is fragmented itself, we split
494 * it to two chunks: the first with data and paged part
495 * and the second, holding only fragments. */
496 if (skb_shinfo(head)->frag_list) {
497 struct sk_buff *clone;
498 int i, plen = 0;
500 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
501 goto out_oom;
502 clone->next = head->next;
503 head->next = clone;
504 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
505 skb_shinfo(head)->frag_list = NULL;
506 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
507 plen += skb_shinfo(head)->frags[i].size;
508 clone->len = clone->data_len = head->data_len - plen;
509 head->data_len -= clone->len;
510 head->len -= clone->len;
511 clone->csum = 0;
512 clone->ip_summed = head->ip_summed;
513 atomic_add(clone->truesize, &fq->q.net->mem);
516 /* We have to remove fragment header from datagram and to relocate
517 * header in order to calculate ICV correctly. */
518 nhoff = fq->nhoffset;
519 skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
520 memmove(head->head + sizeof(struct frag_hdr), head->head,
521 (head->data - head->head) - sizeof(struct frag_hdr));
522 head->mac_header += sizeof(struct frag_hdr);
523 head->network_header += sizeof(struct frag_hdr);
525 skb_shinfo(head)->frag_list = head->next;
526 skb_reset_transport_header(head);
527 skb_push(head, head->data - skb_network_header(head));
528 atomic_sub(head->truesize, &fq->q.net->mem);
530 for (fp=head->next; fp; fp = fp->next) {
531 head->data_len += fp->len;
532 head->len += fp->len;
533 if (head->ip_summed != fp->ip_summed)
534 head->ip_summed = CHECKSUM_NONE;
535 else if (head->ip_summed == CHECKSUM_COMPLETE)
536 head->csum = csum_add(head->csum, fp->csum);
537 head->truesize += fp->truesize;
538 atomic_sub(fp->truesize, &fq->q.net->mem);
541 head->next = NULL;
542 head->dev = dev;
543 head->tstamp = fq->q.stamp;
544 ipv6_hdr(head)->payload_len = htons(payload_len);
545 IP6CB(head)->nhoff = nhoff;
547 /* Yes, and fold redundant checksum back. 8) */
548 if (head->ip_summed == CHECKSUM_COMPLETE)
549 head->csum = csum_partial(skb_network_header(head),
550 skb_network_header_len(head),
551 head->csum);
553 rcu_read_lock();
554 IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
555 rcu_read_unlock();
556 fq->q.fragments = NULL;
557 return 1;
559 out_oversize:
560 if (net_ratelimit())
561 printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
562 goto out_fail;
563 out_oom:
564 if (net_ratelimit())
565 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
566 out_fail:
567 rcu_read_lock();
568 IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
569 rcu_read_unlock();
570 return -1;
573 static int ipv6_frag_rcv(struct sk_buff *skb)
575 struct frag_hdr *fhdr;
576 struct frag_queue *fq;
577 struct ipv6hdr *hdr = ipv6_hdr(skb);
578 struct net *net;
580 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMREQDS);
582 /* Jumbo payload inhibits frag. header */
583 if (hdr->payload_len==0) {
584 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
585 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
586 skb_network_header_len(skb));
587 return -1;
589 if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
590 sizeof(struct frag_hdr)))) {
591 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
592 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
593 skb_network_header_len(skb));
594 return -1;
597 hdr = ipv6_hdr(skb);
598 fhdr = (struct frag_hdr *)skb_transport_header(skb);
600 if (!(fhdr->frag_off & htons(0xFFF9))) {
601 /* It is not a fragmented frame */
602 skb->transport_header += sizeof(struct frag_hdr);
603 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMOKS);
605 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
606 return 1;
609 net = dev_net(skb->dev);
610 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
611 ip6_evictor(net, ip6_dst_idev(skb->dst));
613 if ((fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
614 ip6_dst_idev(skb->dst))) != NULL) {
615 int ret;
617 spin_lock(&fq->q.lock);
619 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
621 spin_unlock(&fq->q.lock);
622 fq_put(fq);
623 return ret;
626 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
627 kfree_skb(skb);
628 return -1;
631 static struct inet6_protocol frag_protocol =
633 .handler = ipv6_frag_rcv,
634 .flags = INET6_PROTO_NOPOLICY,
637 #ifdef CONFIG_SYSCTL
638 static struct ctl_table ip6_frags_ns_ctl_table[] = {
640 .ctl_name = NET_IPV6_IP6FRAG_HIGH_THRESH,
641 .procname = "ip6frag_high_thresh",
642 .data = &init_net.ipv6.frags.high_thresh,
643 .maxlen = sizeof(int),
644 .mode = 0644,
645 .proc_handler = &proc_dointvec
648 .ctl_name = NET_IPV6_IP6FRAG_LOW_THRESH,
649 .procname = "ip6frag_low_thresh",
650 .data = &init_net.ipv6.frags.low_thresh,
651 .maxlen = sizeof(int),
652 .mode = 0644,
653 .proc_handler = &proc_dointvec
656 .ctl_name = NET_IPV6_IP6FRAG_TIME,
657 .procname = "ip6frag_time",
658 .data = &init_net.ipv6.frags.timeout,
659 .maxlen = sizeof(int),
660 .mode = 0644,
661 .proc_handler = &proc_dointvec_jiffies,
662 .strategy = &sysctl_jiffies,
667 static struct ctl_table ip6_frags_ctl_table[] = {
669 .ctl_name = NET_IPV6_IP6FRAG_SECRET_INTERVAL,
670 .procname = "ip6frag_secret_interval",
671 .data = &ip6_frags.secret_interval,
672 .maxlen = sizeof(int),
673 .mode = 0644,
674 .proc_handler = &proc_dointvec_jiffies,
675 .strategy = &sysctl_jiffies
680 static int ip6_frags_ns_sysctl_register(struct net *net)
682 struct ctl_table *table;
683 struct ctl_table_header *hdr;
685 table = ip6_frags_ns_ctl_table;
686 if (net != &init_net) {
687 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
688 if (table == NULL)
689 goto err_alloc;
691 table[0].data = &net->ipv6.frags.high_thresh;
692 table[1].data = &net->ipv6.frags.low_thresh;
693 table[2].data = &net->ipv6.frags.timeout;
696 hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table);
697 if (hdr == NULL)
698 goto err_reg;
700 net->ipv6.sysctl.frags_hdr = hdr;
701 return 0;
703 err_reg:
704 if (net != &init_net)
705 kfree(table);
706 err_alloc:
707 return -ENOMEM;
710 static void ip6_frags_ns_sysctl_unregister(struct net *net)
712 struct ctl_table *table;
714 table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
715 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
716 kfree(table);
719 static struct ctl_table_header *ip6_ctl_header;
721 static int ip6_frags_sysctl_register(void)
723 ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path,
724 ip6_frags_ctl_table);
725 return ip6_ctl_header == NULL ? -ENOMEM : 0;
728 static void ip6_frags_sysctl_unregister(void)
730 unregister_net_sysctl_table(ip6_ctl_header);
732 #else
733 static inline int ip6_frags_ns_sysctl_register(struct net *net)
735 return 0;
738 static inline void ip6_frags_ns_sysctl_unregister(struct net *net)
742 static inline int ip6_frags_sysctl_register(void)
744 return 0;
747 static inline void ip6_frags_sysctl_unregister(void)
750 #endif
752 static int ipv6_frags_init_net(struct net *net)
754 net->ipv6.frags.high_thresh = 256 * 1024;
755 net->ipv6.frags.low_thresh = 192 * 1024;
756 net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
758 inet_frags_init_net(&net->ipv6.frags);
760 return ip6_frags_ns_sysctl_register(net);
763 static void ipv6_frags_exit_net(struct net *net)
765 ip6_frags_ns_sysctl_unregister(net);
766 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
769 static struct pernet_operations ip6_frags_ops = {
770 .init = ipv6_frags_init_net,
771 .exit = ipv6_frags_exit_net,
774 int __init ipv6_frag_init(void)
776 int ret;
778 ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
779 if (ret)
780 goto out;
782 ret = ip6_frags_sysctl_register();
783 if (ret)
784 goto err_sysctl;
786 ret = register_pernet_subsys(&ip6_frags_ops);
787 if (ret)
788 goto err_pernet;
790 ip6_frags.hashfn = ip6_hashfn;
791 ip6_frags.constructor = ip6_frag_init;
792 ip6_frags.destructor = NULL;
793 ip6_frags.skb_free = NULL;
794 ip6_frags.qsize = sizeof(struct frag_queue);
795 ip6_frags.match = ip6_frag_match;
796 ip6_frags.frag_expire = ip6_frag_expire;
797 ip6_frags.secret_interval = 10 * 60 * HZ;
798 inet_frags_init(&ip6_frags);
799 out:
800 return ret;
802 err_pernet:
803 ip6_frags_sysctl_unregister();
804 err_sysctl:
805 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
806 goto out;
809 void ipv6_frag_exit(void)
811 inet_frags_fini(&ip6_frags);
812 ip6_frags_sysctl_unregister();
813 unregister_pernet_subsys(&ip6_frags_ops);
814 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);