md: change ITERATE_RDEV to rdev_for_each
[linux-2.6/mini2440.git] / net / ipv6 / reassembly.c
blobf936d045a39da9436637421260e41b1613bf82db
1 /*
2 * IPv6 fragment reassembly
3 * Linux INET6 implementation
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: reassembly.c,v 1.26 2001/03/07 22:00:57 davem Exp $
10 * Based on: net/ipv4/ip_fragment.c
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 * Fixes:
20 * Andi Kleen Make it work with multiple hosts.
21 * More RFC compliance.
23 * Horst von Brand Add missing #include <linux/string.h>
24 * Alexey Kuznetsov SMP races, threading, cleanup.
25 * Patrick McHardy LRU queue of frag heads for evictor.
26 * Mitsuru KANDA @USAGI Register inet6_protocol{}.
27 * David Stevens and
28 * YOSHIFUJI,H. @USAGI Always remove fragment header to
29 * calculate ICV correctly.
31 #include <linux/errno.h>
32 #include <linux/types.h>
33 #include <linux/string.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/jiffies.h>
37 #include <linux/net.h>
38 #include <linux/list.h>
39 #include <linux/netdevice.h>
40 #include <linux/in6.h>
41 #include <linux/ipv6.h>
42 #include <linux/icmpv6.h>
43 #include <linux/random.h>
44 #include <linux/jhash.h>
45 #include <linux/skbuff.h>
47 #include <net/sock.h>
48 #include <net/snmp.h>
50 #include <net/ipv6.h>
51 #include <net/ip6_route.h>
52 #include <net/protocol.h>
53 #include <net/transp_v6.h>
54 #include <net/rawv6.h>
55 #include <net/ndisc.h>
56 #include <net/addrconf.h>
57 #include <net/inet_frag.h>
59 struct ip6frag_skb_cb
61 struct inet6_skb_parm h;
62 int offset;
65 #define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb))
69 * Equivalent of ipv4 struct ipq
72 struct frag_queue
74 struct inet_frag_queue q;
76 __be32 id; /* fragment id */
77 struct in6_addr saddr;
78 struct in6_addr daddr;
80 int iif;
81 unsigned int csum;
82 __u16 nhoffset;
85 static struct inet_frags ip6_frags;
87 int ip6_frag_nqueues(struct net *net)
89 return net->ipv6.frags.nqueues;
92 int ip6_frag_mem(struct net *net)
94 return atomic_read(&net->ipv6.frags.mem);
97 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
98 struct net_device *dev);
101 * callers should be careful not to use the hash value outside the ipfrag_lock
102 * as doing so could race with ipfrag_hash_rnd being recalculated.
104 static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
105 struct in6_addr *daddr)
107 u32 a, b, c;
109 a = (__force u32)saddr->s6_addr32[0];
110 b = (__force u32)saddr->s6_addr32[1];
111 c = (__force u32)saddr->s6_addr32[2];
113 a += JHASH_GOLDEN_RATIO;
114 b += JHASH_GOLDEN_RATIO;
115 c += ip6_frags.rnd;
116 __jhash_mix(a, b, c);
118 a += (__force u32)saddr->s6_addr32[3];
119 b += (__force u32)daddr->s6_addr32[0];
120 c += (__force u32)daddr->s6_addr32[1];
121 __jhash_mix(a, b, c);
123 a += (__force u32)daddr->s6_addr32[2];
124 b += (__force u32)daddr->s6_addr32[3];
125 c += (__force u32)id;
126 __jhash_mix(a, b, c);
128 return c & (INETFRAGS_HASHSZ - 1);
131 static unsigned int ip6_hashfn(struct inet_frag_queue *q)
133 struct frag_queue *fq;
135 fq = container_of(q, struct frag_queue, q);
136 return ip6qhashfn(fq->id, &fq->saddr, &fq->daddr);
139 int ip6_frag_match(struct inet_frag_queue *q, void *a)
141 struct frag_queue *fq;
142 struct ip6_create_arg *arg = a;
144 fq = container_of(q, struct frag_queue, q);
145 return (fq->id == arg->id &&
146 ipv6_addr_equal(&fq->saddr, arg->src) &&
147 ipv6_addr_equal(&fq->daddr, arg->dst));
149 EXPORT_SYMBOL(ip6_frag_match);
151 /* Memory Tracking Functions. */
152 static inline void frag_kfree_skb(struct netns_frags *nf,
153 struct sk_buff *skb, int *work)
155 if (work)
156 *work -= skb->truesize;
157 atomic_sub(skb->truesize, &nf->mem);
158 kfree_skb(skb);
161 void ip6_frag_init(struct inet_frag_queue *q, void *a)
163 struct frag_queue *fq = container_of(q, struct frag_queue, q);
164 struct ip6_create_arg *arg = a;
166 fq->id = arg->id;
167 ipv6_addr_copy(&fq->saddr, arg->src);
168 ipv6_addr_copy(&fq->daddr, arg->dst);
170 EXPORT_SYMBOL(ip6_frag_init);
172 /* Destruction primitives. */
174 static __inline__ void fq_put(struct frag_queue *fq)
176 inet_frag_put(&fq->q, &ip6_frags);
179 /* Kill fq entry. It is not destroyed immediately,
180 * because caller (and someone more) holds reference count.
182 static __inline__ void fq_kill(struct frag_queue *fq)
184 inet_frag_kill(&fq->q, &ip6_frags);
187 static void ip6_evictor(struct net *net, struct inet6_dev *idev)
189 int evicted;
191 evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags);
192 if (evicted)
193 IP6_ADD_STATS_BH(idev, IPSTATS_MIB_REASMFAILS, evicted);
196 static void ip6_frag_expire(unsigned long data)
198 struct frag_queue *fq;
199 struct net_device *dev = NULL;
201 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
203 spin_lock(&fq->q.lock);
205 if (fq->q.last_in & COMPLETE)
206 goto out;
208 fq_kill(fq);
210 dev = dev_get_by_index(&init_net, fq->iif);
211 if (!dev)
212 goto out;
214 rcu_read_lock();
215 IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
216 IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
217 rcu_read_unlock();
219 /* Don't send error if the first segment did not arrive. */
220 if (!(fq->q.last_in&FIRST_IN) || !fq->q.fragments)
221 goto out;
224 But use as source device on which LAST ARRIVED
225 segment was received. And do not use fq->dev
226 pointer directly, device might already disappeared.
228 fq->q.fragments->dev = dev;
229 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev);
230 out:
231 if (dev)
232 dev_put(dev);
233 spin_unlock(&fq->q.lock);
234 fq_put(fq);
237 static __inline__ struct frag_queue *
238 fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
239 struct inet6_dev *idev)
241 struct inet_frag_queue *q;
242 struct ip6_create_arg arg;
243 unsigned int hash;
245 arg.id = id;
246 arg.src = src;
247 arg.dst = dst;
248 hash = ip6qhashfn(id, src, dst);
250 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
251 if (q == NULL)
252 goto oom;
254 return container_of(q, struct frag_queue, q);
256 oom:
257 IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS);
258 return NULL;
261 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
262 struct frag_hdr *fhdr, int nhoff)
264 struct sk_buff *prev, *next;
265 struct net_device *dev;
266 int offset, end;
268 if (fq->q.last_in & COMPLETE)
269 goto err;
271 offset = ntohs(fhdr->frag_off) & ~0x7;
272 end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
273 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
275 if ((unsigned int)end > IPV6_MAXPLEN) {
276 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
277 IPSTATS_MIB_INHDRERRORS);
278 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
279 ((u8 *)&fhdr->frag_off -
280 skb_network_header(skb)));
281 return -1;
284 if (skb->ip_summed == CHECKSUM_COMPLETE) {
285 const unsigned char *nh = skb_network_header(skb);
286 skb->csum = csum_sub(skb->csum,
287 csum_partial(nh, (u8 *)(fhdr + 1) - nh,
288 0));
291 /* Is this the final fragment? */
292 if (!(fhdr->frag_off & htons(IP6_MF))) {
293 /* If we already have some bits beyond end
294 * or have different end, the segment is corrupted.
296 if (end < fq->q.len ||
297 ((fq->q.last_in & LAST_IN) && end != fq->q.len))
298 goto err;
299 fq->q.last_in |= LAST_IN;
300 fq->q.len = end;
301 } else {
302 /* Check if the fragment is rounded to 8 bytes.
303 * Required by the RFC.
305 if (end & 0x7) {
306 /* RFC2460 says always send parameter problem in
307 * this case. -DaveM
309 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
310 IPSTATS_MIB_INHDRERRORS);
311 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
312 offsetof(struct ipv6hdr, payload_len));
313 return -1;
315 if (end > fq->q.len) {
316 /* Some bits beyond end -> corruption. */
317 if (fq->q.last_in & LAST_IN)
318 goto err;
319 fq->q.len = end;
323 if (end == offset)
324 goto err;
326 /* Point into the IP datagram 'data' part. */
327 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
328 goto err;
330 if (pskb_trim_rcsum(skb, end - offset))
331 goto err;
333 /* Find out which fragments are in front and at the back of us
334 * in the chain of fragments so far. We must know where to put
335 * this fragment, right?
337 prev = NULL;
338 for(next = fq->q.fragments; next != NULL; next = next->next) {
339 if (FRAG6_CB(next)->offset >= offset)
340 break; /* bingo! */
341 prev = next;
344 /* We found where to put this one. Check for overlap with
345 * preceding fragment, and, if needed, align things so that
346 * any overlaps are eliminated.
348 if (prev) {
349 int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
351 if (i > 0) {
352 offset += i;
353 if (end <= offset)
354 goto err;
355 if (!pskb_pull(skb, i))
356 goto err;
357 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
358 skb->ip_summed = CHECKSUM_NONE;
362 /* Look for overlap with succeeding segments.
363 * If we can merge fragments, do it.
365 while (next && FRAG6_CB(next)->offset < end) {
366 int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
368 if (i < next->len) {
369 /* Eat head of the next overlapped fragment
370 * and leave the loop. The next ones cannot overlap.
372 if (!pskb_pull(next, i))
373 goto err;
374 FRAG6_CB(next)->offset += i; /* next fragment */
375 fq->q.meat -= i;
376 if (next->ip_summed != CHECKSUM_UNNECESSARY)
377 next->ip_summed = CHECKSUM_NONE;
378 break;
379 } else {
380 struct sk_buff *free_it = next;
382 /* Old fragment is completely overridden with
383 * new one drop it.
385 next = next->next;
387 if (prev)
388 prev->next = next;
389 else
390 fq->q.fragments = next;
392 fq->q.meat -= free_it->len;
393 frag_kfree_skb(fq->q.net, free_it, NULL);
397 FRAG6_CB(skb)->offset = offset;
399 /* Insert this fragment in the chain of fragments. */
400 skb->next = next;
401 if (prev)
402 prev->next = skb;
403 else
404 fq->q.fragments = skb;
406 dev = skb->dev;
407 if (dev) {
408 fq->iif = dev->ifindex;
409 skb->dev = NULL;
411 fq->q.stamp = skb->tstamp;
412 fq->q.meat += skb->len;
413 atomic_add(skb->truesize, &fq->q.net->mem);
415 /* The first fragment.
416 * nhoffset is obtained from the first fragment, of course.
418 if (offset == 0) {
419 fq->nhoffset = nhoff;
420 fq->q.last_in |= FIRST_IN;
423 if (fq->q.last_in == (FIRST_IN | LAST_IN) && fq->q.meat == fq->q.len)
424 return ip6_frag_reasm(fq, prev, dev);
426 write_lock(&ip6_frags.lock);
427 list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
428 write_unlock(&ip6_frags.lock);
429 return -1;
431 err:
432 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
433 kfree_skb(skb);
434 return -1;
438 * Check if this packet is complete.
439 * Returns NULL on failure by any reason, and pointer
440 * to current nexthdr field in reassembled frame.
442 * It is called with locked fq, and caller must check that
443 * queue is eligible for reassembly i.e. it is not COMPLETE,
444 * the last and the first frames arrived and all the bits are here.
446 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
447 struct net_device *dev)
449 struct sk_buff *fp, *head = fq->q.fragments;
450 int payload_len;
451 unsigned int nhoff;
453 fq_kill(fq);
455 /* Make the one we just received the head. */
456 if (prev) {
457 head = prev->next;
458 fp = skb_clone(head, GFP_ATOMIC);
460 if (!fp)
461 goto out_oom;
463 fp->next = head->next;
464 prev->next = fp;
466 skb_morph(head, fq->q.fragments);
467 head->next = fq->q.fragments->next;
469 kfree_skb(fq->q.fragments);
470 fq->q.fragments = head;
473 BUG_TRAP(head != NULL);
474 BUG_TRAP(FRAG6_CB(head)->offset == 0);
476 /* Unfragmented part is taken from the first segment. */
477 payload_len = ((head->data - skb_network_header(head)) -
478 sizeof(struct ipv6hdr) + fq->q.len -
479 sizeof(struct frag_hdr));
480 if (payload_len > IPV6_MAXPLEN)
481 goto out_oversize;
483 /* Head of list must not be cloned. */
484 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
485 goto out_oom;
487 /* If the first fragment is fragmented itself, we split
488 * it to two chunks: the first with data and paged part
489 * and the second, holding only fragments. */
490 if (skb_shinfo(head)->frag_list) {
491 struct sk_buff *clone;
492 int i, plen = 0;
494 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
495 goto out_oom;
496 clone->next = head->next;
497 head->next = clone;
498 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
499 skb_shinfo(head)->frag_list = NULL;
500 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
501 plen += skb_shinfo(head)->frags[i].size;
502 clone->len = clone->data_len = head->data_len - plen;
503 head->data_len -= clone->len;
504 head->len -= clone->len;
505 clone->csum = 0;
506 clone->ip_summed = head->ip_summed;
507 atomic_add(clone->truesize, &fq->q.net->mem);
510 /* We have to remove fragment header from datagram and to relocate
511 * header in order to calculate ICV correctly. */
512 nhoff = fq->nhoffset;
513 skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
514 memmove(head->head + sizeof(struct frag_hdr), head->head,
515 (head->data - head->head) - sizeof(struct frag_hdr));
516 head->mac_header += sizeof(struct frag_hdr);
517 head->network_header += sizeof(struct frag_hdr);
519 skb_shinfo(head)->frag_list = head->next;
520 skb_reset_transport_header(head);
521 skb_push(head, head->data - skb_network_header(head));
522 atomic_sub(head->truesize, &fq->q.net->mem);
524 for (fp=head->next; fp; fp = fp->next) {
525 head->data_len += fp->len;
526 head->len += fp->len;
527 if (head->ip_summed != fp->ip_summed)
528 head->ip_summed = CHECKSUM_NONE;
529 else if (head->ip_summed == CHECKSUM_COMPLETE)
530 head->csum = csum_add(head->csum, fp->csum);
531 head->truesize += fp->truesize;
532 atomic_sub(fp->truesize, &fq->q.net->mem);
535 head->next = NULL;
536 head->dev = dev;
537 head->tstamp = fq->q.stamp;
538 ipv6_hdr(head)->payload_len = htons(payload_len);
539 IP6CB(head)->nhoff = nhoff;
541 /* Yes, and fold redundant checksum back. 8) */
542 if (head->ip_summed == CHECKSUM_COMPLETE)
543 head->csum = csum_partial(skb_network_header(head),
544 skb_network_header_len(head),
545 head->csum);
547 rcu_read_lock();
548 IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
549 rcu_read_unlock();
550 fq->q.fragments = NULL;
551 return 1;
553 out_oversize:
554 if (net_ratelimit())
555 printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
556 goto out_fail;
557 out_oom:
558 if (net_ratelimit())
559 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
560 out_fail:
561 rcu_read_lock();
562 IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
563 rcu_read_unlock();
564 return -1;
567 static int ipv6_frag_rcv(struct sk_buff *skb)
569 struct frag_hdr *fhdr;
570 struct frag_queue *fq;
571 struct ipv6hdr *hdr = ipv6_hdr(skb);
572 struct net *net;
574 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMREQDS);
576 /* Jumbo payload inhibits frag. header */
577 if (hdr->payload_len==0) {
578 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
579 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
580 skb_network_header_len(skb));
581 return -1;
583 if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
584 sizeof(struct frag_hdr)))) {
585 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
586 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
587 skb_network_header_len(skb));
588 return -1;
591 hdr = ipv6_hdr(skb);
592 fhdr = (struct frag_hdr *)skb_transport_header(skb);
594 if (!(fhdr->frag_off & htons(0xFFF9))) {
595 /* It is not a fragmented frame */
596 skb->transport_header += sizeof(struct frag_hdr);
597 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMOKS);
599 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
600 return 1;
603 net = skb->dev->nd_net;
604 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
605 ip6_evictor(net, ip6_dst_idev(skb->dst));
607 if ((fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
608 ip6_dst_idev(skb->dst))) != NULL) {
609 int ret;
611 spin_lock(&fq->q.lock);
613 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
615 spin_unlock(&fq->q.lock);
616 fq_put(fq);
617 return ret;
620 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
621 kfree_skb(skb);
622 return -1;
625 static struct inet6_protocol frag_protocol =
627 .handler = ipv6_frag_rcv,
628 .flags = INET6_PROTO_NOPOLICY,
631 #ifdef CONFIG_SYSCTL
632 static struct ctl_table ip6_frags_ctl_table[] = {
634 .ctl_name = NET_IPV6_IP6FRAG_HIGH_THRESH,
635 .procname = "ip6frag_high_thresh",
636 .data = &init_net.ipv6.frags.high_thresh,
637 .maxlen = sizeof(int),
638 .mode = 0644,
639 .proc_handler = &proc_dointvec
642 .ctl_name = NET_IPV6_IP6FRAG_LOW_THRESH,
643 .procname = "ip6frag_low_thresh",
644 .data = &init_net.ipv6.frags.low_thresh,
645 .maxlen = sizeof(int),
646 .mode = 0644,
647 .proc_handler = &proc_dointvec
650 .ctl_name = NET_IPV6_IP6FRAG_TIME,
651 .procname = "ip6frag_time",
652 .data = &init_net.ipv6.frags.timeout,
653 .maxlen = sizeof(int),
654 .mode = 0644,
655 .proc_handler = &proc_dointvec_jiffies,
656 .strategy = &sysctl_jiffies,
659 .ctl_name = NET_IPV6_IP6FRAG_SECRET_INTERVAL,
660 .procname = "ip6frag_secret_interval",
661 .data = &ip6_frags.secret_interval,
662 .maxlen = sizeof(int),
663 .mode = 0644,
664 .proc_handler = &proc_dointvec_jiffies,
665 .strategy = &sysctl_jiffies
670 static int ip6_frags_sysctl_register(struct net *net)
672 struct ctl_table *table;
673 struct ctl_table_header *hdr;
675 table = ip6_frags_ctl_table;
676 if (net != &init_net) {
677 table = kmemdup(table, sizeof(ip6_frags_ctl_table), GFP_KERNEL);
678 if (table == NULL)
679 goto err_alloc;
681 table[0].data = &net->ipv6.frags.high_thresh;
682 table[1].data = &net->ipv6.frags.low_thresh;
683 table[2].data = &net->ipv6.frags.timeout;
684 table[3].mode &= ~0222;
687 hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table);
688 if (hdr == NULL)
689 goto err_reg;
691 net->ipv6.sysctl.frags_hdr = hdr;
692 return 0;
694 err_reg:
695 if (net != &init_net)
696 kfree(table);
697 err_alloc:
698 return -ENOMEM;
701 static void ip6_frags_sysctl_unregister(struct net *net)
703 struct ctl_table *table;
705 table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
706 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
707 kfree(table);
709 #else
710 static inline int ip6_frags_sysctl_register(struct net *net)
712 return 0;
715 static inline void ip6_frags_sysctl_unregister(struct net *net)
718 #endif
720 static int ipv6_frags_init_net(struct net *net)
722 net->ipv6.frags.high_thresh = 256 * 1024;
723 net->ipv6.frags.low_thresh = 192 * 1024;
724 net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
726 inet_frags_init_net(&net->ipv6.frags);
728 return ip6_frags_sysctl_register(net);
731 static void ipv6_frags_exit_net(struct net *net)
733 ip6_frags_sysctl_unregister(net);
734 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
737 static struct pernet_operations ip6_frags_ops = {
738 .init = ipv6_frags_init_net,
739 .exit = ipv6_frags_exit_net,
742 int __init ipv6_frag_init(void)
744 int ret;
746 ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
747 if (ret)
748 goto out;
750 register_pernet_subsys(&ip6_frags_ops);
752 ip6_frags.hashfn = ip6_hashfn;
753 ip6_frags.constructor = ip6_frag_init;
754 ip6_frags.destructor = NULL;
755 ip6_frags.skb_free = NULL;
756 ip6_frags.qsize = sizeof(struct frag_queue);
757 ip6_frags.match = ip6_frag_match;
758 ip6_frags.frag_expire = ip6_frag_expire;
759 ip6_frags.secret_interval = 10 * 60 * HZ;
760 inet_frags_init(&ip6_frags);
761 out:
762 return ret;
765 void ipv6_frag_exit(void)
767 inet_frags_fini(&ip6_frags);
768 unregister_pernet_subsys(&ip6_frags_ops);
769 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);