2 * ip6_flowlabel.c IPv6 flowlabel manager.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/capability.h>
13 #include <linux/errno.h>
14 #include <linux/types.h>
15 #include <linux/socket.h>
16 #include <linux/net.h>
17 #include <linux/netdevice.h>
18 #include <linux/if_arp.h>
19 #include <linux/in6.h>
20 #include <linux/route.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/slab.h>
24 #include <linux/export.h>
25 #include <linux/pid_namespace.h>
27 #include <net/net_namespace.h>
31 #include <net/ndisc.h>
32 #include <net/protocol.h>
33 #include <net/ip6_route.h>
34 #include <net/addrconf.h>
35 #include <net/rawv6.h>
37 #include <net/transp_v6.h>
39 #include <asm/uaccess.h>
41 #define FL_MIN_LINGER 6 /* Minimal linger. It is set to 6sec specified
42 in old IPv6 RFC. Well, it was reasonable value.
44 #define FL_MAX_LINGER 60 /* Maximal linger timeout */
48 #define FL_MAX_PER_SOCK 32
49 #define FL_MAX_SIZE 4096
50 #define FL_HASH_MASK 255
51 #define FL_HASH(l) (ntohl(l)&FL_HASH_MASK)
53 static atomic_t fl_size
= ATOMIC_INIT(0);
54 static struct ip6_flowlabel __rcu
*fl_ht
[FL_HASH_MASK
+1];
56 static void ip6_fl_gc(unsigned long dummy
);
57 static DEFINE_TIMER(ip6_fl_gc_timer
, ip6_fl_gc
, 0, 0);
59 /* FL hash table lock: it protects only of GC */
61 static DEFINE_SPINLOCK(ip6_fl_lock
);
65 static DEFINE_SPINLOCK(ip6_sk_fl_lock
);
67 #define for_each_fl_rcu(hash, fl) \
68 for (fl = rcu_dereference_bh(fl_ht[(hash)]); \
70 fl = rcu_dereference_bh(fl->next))
71 #define for_each_fl_continue_rcu(fl) \
72 for (fl = rcu_dereference_bh(fl->next); \
74 fl = rcu_dereference_bh(fl->next))
76 #define for_each_sk_fl_rcu(np, sfl) \
77 for (sfl = rcu_dereference_bh(np->ipv6_fl_list); \
79 sfl = rcu_dereference_bh(sfl->next))
81 static inline struct ip6_flowlabel
*__fl_lookup(struct net
*net
, __be32 label
)
83 struct ip6_flowlabel
*fl
;
85 for_each_fl_rcu(FL_HASH(label
), fl
) {
86 if (fl
->label
== label
&& net_eq(fl
->fl_net
, net
))
92 static struct ip6_flowlabel
*fl_lookup(struct net
*net
, __be32 label
)
94 struct ip6_flowlabel
*fl
;
97 fl
= __fl_lookup(net
, label
);
98 if (fl
&& !atomic_inc_not_zero(&fl
->users
))
100 rcu_read_unlock_bh();
105 static void fl_free(struct ip6_flowlabel
*fl
)
108 if (fl
->share
== IPV6_FL_S_PROCESS
)
109 put_pid(fl
->owner
.pid
);
110 release_net(fl
->fl_net
);
116 static void fl_release(struct ip6_flowlabel
*fl
)
118 spin_lock_bh(&ip6_fl_lock
);
120 fl
->lastuse
= jiffies
;
121 if (atomic_dec_and_test(&fl
->users
)) {
122 unsigned long ttd
= fl
->lastuse
+ fl
->linger
;
123 if (time_after(ttd
, fl
->expires
))
126 if (fl
->opt
&& fl
->share
== IPV6_FL_S_EXCL
) {
127 struct ipv6_txoptions
*opt
= fl
->opt
;
131 if (!timer_pending(&ip6_fl_gc_timer
) ||
132 time_after(ip6_fl_gc_timer
.expires
, ttd
))
133 mod_timer(&ip6_fl_gc_timer
, ttd
);
135 spin_unlock_bh(&ip6_fl_lock
);
138 static void ip6_fl_gc(unsigned long dummy
)
141 unsigned long now
= jiffies
;
142 unsigned long sched
= 0;
144 spin_lock(&ip6_fl_lock
);
146 for (i
=0; i
<=FL_HASH_MASK
; i
++) {
147 struct ip6_flowlabel
*fl
, **flp
;
149 while ((fl
= rcu_dereference_protected(*flp
,
150 lockdep_is_held(&ip6_fl_lock
))) != NULL
) {
151 if (atomic_read(&fl
->users
) == 0) {
152 unsigned long ttd
= fl
->lastuse
+ fl
->linger
;
153 if (time_after(ttd
, fl
->expires
))
156 if (time_after_eq(now
, ttd
)) {
159 atomic_dec(&fl_size
);
162 if (!sched
|| time_before(ttd
, sched
))
168 if (!sched
&& atomic_read(&fl_size
))
169 sched
= now
+ FL_MAX_LINGER
;
171 mod_timer(&ip6_fl_gc_timer
, sched
);
173 spin_unlock(&ip6_fl_lock
);
176 static void __net_exit
ip6_fl_purge(struct net
*net
)
180 spin_lock(&ip6_fl_lock
);
181 for (i
= 0; i
<= FL_HASH_MASK
; i
++) {
182 struct ip6_flowlabel
*fl
, **flp
;
184 while ((fl
= rcu_dereference_protected(*flp
,
185 lockdep_is_held(&ip6_fl_lock
))) != NULL
) {
186 if (net_eq(fl
->fl_net
, net
) &&
187 atomic_read(&fl
->users
) == 0) {
190 atomic_dec(&fl_size
);
196 spin_unlock(&ip6_fl_lock
);
199 static struct ip6_flowlabel
*fl_intern(struct net
*net
,
200 struct ip6_flowlabel
*fl
, __be32 label
)
202 struct ip6_flowlabel
*lfl
;
204 fl
->label
= label
& IPV6_FLOWLABEL_MASK
;
206 spin_lock_bh(&ip6_fl_lock
);
209 fl
->label
= htonl(net_random())&IPV6_FLOWLABEL_MASK
;
211 lfl
= __fl_lookup(net
, fl
->label
);
218 * we dropper the ip6_fl_lock, so this entry could reappear
219 * and we need to recheck with it.
221 * OTOH no need to search the active socket first, like it is
222 * done in ipv6_flowlabel_opt - sock is locked, so new entry
223 * with the same label can only appear on another sock
225 lfl
= __fl_lookup(net
, fl
->label
);
227 atomic_inc(&lfl
->users
);
228 spin_unlock_bh(&ip6_fl_lock
);
233 fl
->lastuse
= jiffies
;
234 fl
->next
= fl_ht
[FL_HASH(fl
->label
)];
235 rcu_assign_pointer(fl_ht
[FL_HASH(fl
->label
)], fl
);
236 atomic_inc(&fl_size
);
237 spin_unlock_bh(&ip6_fl_lock
);
243 /* Socket flowlabel lists */
245 struct ip6_flowlabel
* fl6_sock_lookup(struct sock
*sk
, __be32 label
)
247 struct ipv6_fl_socklist
*sfl
;
248 struct ipv6_pinfo
*np
= inet6_sk(sk
);
250 label
&= IPV6_FLOWLABEL_MASK
;
253 for_each_sk_fl_rcu(np
, sfl
) {
254 struct ip6_flowlabel
*fl
= sfl
->fl
;
255 if (fl
->label
== label
) {
256 fl
->lastuse
= jiffies
;
257 atomic_inc(&fl
->users
);
258 rcu_read_unlock_bh();
262 rcu_read_unlock_bh();
266 EXPORT_SYMBOL_GPL(fl6_sock_lookup
);
268 void fl6_free_socklist(struct sock
*sk
)
270 struct ipv6_pinfo
*np
= inet6_sk(sk
);
271 struct ipv6_fl_socklist
*sfl
;
273 if (!rcu_access_pointer(np
->ipv6_fl_list
))
276 spin_lock_bh(&ip6_sk_fl_lock
);
277 while ((sfl
= rcu_dereference_protected(np
->ipv6_fl_list
,
278 lockdep_is_held(&ip6_sk_fl_lock
))) != NULL
) {
279 np
->ipv6_fl_list
= sfl
->next
;
280 spin_unlock_bh(&ip6_sk_fl_lock
);
285 spin_lock_bh(&ip6_sk_fl_lock
);
287 spin_unlock_bh(&ip6_sk_fl_lock
);
290 /* Service routines */
294 It is the only difficult place. flowlabel enforces equal headers
295 before and including routing header, however user may supply options
299 struct ipv6_txoptions
*fl6_merge_options(struct ipv6_txoptions
* opt_space
,
300 struct ip6_flowlabel
* fl
,
301 struct ipv6_txoptions
* fopt
)
303 struct ipv6_txoptions
* fl_opt
= fl
->opt
;
305 if (fopt
== NULL
|| fopt
->opt_flen
== 0)
308 if (fl_opt
!= NULL
) {
309 opt_space
->hopopt
= fl_opt
->hopopt
;
310 opt_space
->dst0opt
= fl_opt
->dst0opt
;
311 opt_space
->srcrt
= fl_opt
->srcrt
;
312 opt_space
->opt_nflen
= fl_opt
->opt_nflen
;
314 if (fopt
->opt_nflen
== 0)
316 opt_space
->hopopt
= NULL
;
317 opt_space
->dst0opt
= NULL
;
318 opt_space
->srcrt
= NULL
;
319 opt_space
->opt_nflen
= 0;
321 opt_space
->dst1opt
= fopt
->dst1opt
;
322 opt_space
->opt_flen
= fopt
->opt_flen
;
325 EXPORT_SYMBOL_GPL(fl6_merge_options
);
327 static unsigned long check_linger(unsigned long ttl
)
329 if (ttl
< FL_MIN_LINGER
)
330 return FL_MIN_LINGER
*HZ
;
331 if (ttl
> FL_MAX_LINGER
&& !capable(CAP_NET_ADMIN
))
336 static int fl6_renew(struct ip6_flowlabel
*fl
, unsigned long linger
, unsigned long expires
)
338 linger
= check_linger(linger
);
341 expires
= check_linger(expires
);
344 fl
->lastuse
= jiffies
;
345 if (time_before(fl
->linger
, linger
))
347 if (time_before(expires
, fl
->linger
))
348 expires
= fl
->linger
;
349 if (time_before(fl
->expires
, fl
->lastuse
+ expires
))
350 fl
->expires
= fl
->lastuse
+ expires
;
354 static struct ip6_flowlabel
*
355 fl_create(struct net
*net
, struct sock
*sk
, struct in6_flowlabel_req
*freq
,
356 char __user
*optval
, int optlen
, int *err_p
)
358 struct ip6_flowlabel
*fl
= NULL
;
363 olen
= optlen
- CMSG_ALIGN(sizeof(*freq
));
365 if (olen
> 64 * 1024)
369 fl
= kzalloc(sizeof(*fl
), GFP_KERNEL
);
375 struct flowi6 flowi6
;
379 fl
->opt
= kmalloc(sizeof(*fl
->opt
) + olen
, GFP_KERNEL
);
383 memset(fl
->opt
, 0, sizeof(*fl
->opt
));
384 fl
->opt
->tot_len
= sizeof(*fl
->opt
) + olen
;
386 if (copy_from_user(fl
->opt
+1, optval
+CMSG_ALIGN(sizeof(*freq
)), olen
))
389 msg
.msg_controllen
= olen
;
390 msg
.msg_control
= (void*)(fl
->opt
+1);
391 memset(&flowi6
, 0, sizeof(flowi6
));
393 err
= ip6_datagram_send_ctl(net
, sk
, &msg
, &flowi6
, fl
->opt
,
394 &junk
, &junk
, &junk
);
398 if (fl
->opt
->opt_flen
)
400 if (fl
->opt
->opt_nflen
== 0) {
406 fl
->fl_net
= hold_net(net
);
407 fl
->expires
= jiffies
;
408 err
= fl6_renew(fl
, freq
->flr_linger
, freq
->flr_expires
);
411 fl
->share
= freq
->flr_share
;
412 addr_type
= ipv6_addr_type(&freq
->flr_dst
);
413 if ((addr_type
& IPV6_ADDR_MAPPED
) ||
414 addr_type
== IPV6_ADDR_ANY
) {
418 fl
->dst
= freq
->flr_dst
;
419 atomic_set(&fl
->users
, 1);
424 case IPV6_FL_S_PROCESS
:
425 fl
->owner
.pid
= get_task_pid(current
, PIDTYPE_PID
);
428 fl
->owner
.uid
= current_euid();
442 static int mem_check(struct sock
*sk
)
444 struct ipv6_pinfo
*np
= inet6_sk(sk
);
445 struct ipv6_fl_socklist
*sfl
;
446 int room
= FL_MAX_SIZE
- atomic_read(&fl_size
);
449 if (room
> FL_MAX_SIZE
- FL_MAX_PER_SOCK
)
452 for_each_sk_fl_rcu(np
, sfl
)
456 ((count
>= FL_MAX_PER_SOCK
||
457 (count
> 0 && room
< FL_MAX_SIZE
/2) || room
< FL_MAX_SIZE
/4) &&
458 !capable(CAP_NET_ADMIN
)))
464 static bool ipv6_hdr_cmp(struct ipv6_opt_hdr
*h1
, struct ipv6_opt_hdr
*h2
)
468 if (h1
== NULL
|| h2
== NULL
)
470 if (h1
->hdrlen
!= h2
->hdrlen
)
472 return memcmp(h1
+1, h2
+1, ((h1
->hdrlen
+1)<<3) - sizeof(*h1
));
475 static bool ipv6_opt_cmp(struct ipv6_txoptions
*o1
, struct ipv6_txoptions
*o2
)
479 if (o1
== NULL
|| o2
== NULL
)
481 if (o1
->opt_nflen
!= o2
->opt_nflen
)
483 if (ipv6_hdr_cmp(o1
->hopopt
, o2
->hopopt
))
485 if (ipv6_hdr_cmp(o1
->dst0opt
, o2
->dst0opt
))
487 if (ipv6_hdr_cmp((struct ipv6_opt_hdr
*)o1
->srcrt
, (struct ipv6_opt_hdr
*)o2
->srcrt
))
492 static inline void fl_link(struct ipv6_pinfo
*np
, struct ipv6_fl_socklist
*sfl
,
493 struct ip6_flowlabel
*fl
)
495 spin_lock_bh(&ip6_sk_fl_lock
);
497 sfl
->next
= np
->ipv6_fl_list
;
498 rcu_assign_pointer(np
->ipv6_fl_list
, sfl
);
499 spin_unlock_bh(&ip6_sk_fl_lock
);
502 int ipv6_flowlabel_opt(struct sock
*sk
, char __user
*optval
, int optlen
)
504 int uninitialized_var(err
);
505 struct net
*net
= sock_net(sk
);
506 struct ipv6_pinfo
*np
= inet6_sk(sk
);
507 struct in6_flowlabel_req freq
;
508 struct ipv6_fl_socklist
*sfl1
=NULL
;
509 struct ipv6_fl_socklist
*sfl
, **sflp
;
510 struct ip6_flowlabel
*fl
, *fl1
= NULL
;
513 if (optlen
< sizeof(freq
))
516 if (copy_from_user(&freq
, optval
, sizeof(freq
)))
519 switch (freq
.flr_action
) {
521 spin_lock_bh(&ip6_sk_fl_lock
);
522 for (sflp
= &np
->ipv6_fl_list
;
523 (sfl
= rcu_dereference(*sflp
))!=NULL
;
525 if (sfl
->fl
->label
== freq
.flr_label
) {
526 if (freq
.flr_label
== (np
->flow_label
&IPV6_FLOWLABEL_MASK
))
527 np
->flow_label
&= ~IPV6_FLOWLABEL_MASK
;
528 *sflp
= rcu_dereference(sfl
->next
);
529 spin_unlock_bh(&ip6_sk_fl_lock
);
535 spin_unlock_bh(&ip6_sk_fl_lock
);
538 case IPV6_FL_A_RENEW
:
540 for_each_sk_fl_rcu(np
, sfl
) {
541 if (sfl
->fl
->label
== freq
.flr_label
) {
542 err
= fl6_renew(sfl
->fl
, freq
.flr_linger
, freq
.flr_expires
);
543 rcu_read_unlock_bh();
547 rcu_read_unlock_bh();
549 if (freq
.flr_share
== IPV6_FL_S_NONE
&&
550 ns_capable(net
->user_ns
, CAP_NET_ADMIN
)) {
551 fl
= fl_lookup(net
, freq
.flr_label
);
553 err
= fl6_renew(fl
, freq
.flr_linger
, freq
.flr_expires
);
561 if (freq
.flr_label
& ~IPV6_FLOWLABEL_MASK
)
564 fl
= fl_create(net
, sk
, &freq
, optval
, optlen
, &err
);
567 sfl1
= kmalloc(sizeof(*sfl1
), GFP_KERNEL
);
569 if (freq
.flr_label
) {
572 for_each_sk_fl_rcu(np
, sfl
) {
573 if (sfl
->fl
->label
== freq
.flr_label
) {
574 if (freq
.flr_flags
&IPV6_FL_F_EXCL
) {
575 rcu_read_unlock_bh();
579 atomic_inc(&fl1
->users
);
583 rcu_read_unlock_bh();
586 fl1
= fl_lookup(net
, freq
.flr_label
);
590 if (freq
.flr_flags
&IPV6_FL_F_EXCL
)
593 if (fl1
->share
== IPV6_FL_S_EXCL
||
594 fl1
->share
!= fl
->share
||
595 ((fl1
->share
== IPV6_FL_S_PROCESS
) &&
596 (fl1
->owner
.pid
== fl
->owner
.pid
)) ||
597 ((fl1
->share
== IPV6_FL_S_USER
) &&
598 uid_eq(fl1
->owner
.uid
, fl
->owner
.uid
)))
602 if (!ipv6_addr_equal(&fl1
->dst
, &fl
->dst
) ||
603 ipv6_opt_cmp(fl1
->opt
, fl
->opt
))
609 if (fl
->linger
> fl1
->linger
)
610 fl1
->linger
= fl
->linger
;
611 if ((long)(fl
->expires
- fl1
->expires
) > 0)
612 fl1
->expires
= fl
->expires
;
613 fl_link(np
, sfl1
, fl1
);
623 if (!(freq
.flr_flags
&IPV6_FL_F_CREATE
))
627 if (sfl1
== NULL
|| (err
= mem_check(sk
)) != 0)
630 fl1
= fl_intern(net
, fl
, freq
.flr_label
);
634 if (!freq
.flr_label
) {
635 if (copy_to_user(&((struct in6_flowlabel_req __user
*) optval
)->flr_label
,
636 &fl
->label
, sizeof(fl
->label
))) {
637 /* Intentionally ignore fault. */
641 fl_link(np
, sfl1
, fl
);
654 #ifdef CONFIG_PROC_FS
656 struct ip6fl_iter_state
{
657 struct seq_net_private p
;
658 struct pid_namespace
*pid_ns
;
662 #define ip6fl_seq_private(seq) ((struct ip6fl_iter_state *)(seq)->private)
664 static struct ip6_flowlabel
*ip6fl_get_first(struct seq_file
*seq
)
666 struct ip6_flowlabel
*fl
= NULL
;
667 struct ip6fl_iter_state
*state
= ip6fl_seq_private(seq
);
668 struct net
*net
= seq_file_net(seq
);
670 for (state
->bucket
= 0; state
->bucket
<= FL_HASH_MASK
; ++state
->bucket
) {
671 for_each_fl_rcu(state
->bucket
, fl
) {
672 if (net_eq(fl
->fl_net
, net
))
681 static struct ip6_flowlabel
*ip6fl_get_next(struct seq_file
*seq
, struct ip6_flowlabel
*fl
)
683 struct ip6fl_iter_state
*state
= ip6fl_seq_private(seq
);
684 struct net
*net
= seq_file_net(seq
);
686 for_each_fl_continue_rcu(fl
) {
687 if (net_eq(fl
->fl_net
, net
))
692 if (++state
->bucket
<= FL_HASH_MASK
) {
693 for_each_fl_rcu(state
->bucket
, fl
) {
694 if (net_eq(fl
->fl_net
, net
))
705 static struct ip6_flowlabel
*ip6fl_get_idx(struct seq_file
*seq
, loff_t pos
)
707 struct ip6_flowlabel
*fl
= ip6fl_get_first(seq
);
709 while (pos
&& (fl
= ip6fl_get_next(seq
, fl
)) != NULL
)
711 return pos
? NULL
: fl
;
714 static void *ip6fl_seq_start(struct seq_file
*seq
, loff_t
*pos
)
718 return *pos
? ip6fl_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
721 static void *ip6fl_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
723 struct ip6_flowlabel
*fl
;
725 if (v
== SEQ_START_TOKEN
)
726 fl
= ip6fl_get_first(seq
);
728 fl
= ip6fl_get_next(seq
, v
);
733 static void ip6fl_seq_stop(struct seq_file
*seq
, void *v
)
736 rcu_read_unlock_bh();
739 static int ip6fl_seq_show(struct seq_file
*seq
, void *v
)
741 struct ip6fl_iter_state
*state
= ip6fl_seq_private(seq
);
742 if (v
== SEQ_START_TOKEN
)
743 seq_printf(seq
, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
744 "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
746 struct ip6_flowlabel
*fl
= v
;
748 "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
749 (unsigned int)ntohl(fl
->label
),
751 ((fl
->share
== IPV6_FL_S_PROCESS
) ?
752 pid_nr_ns(fl
->owner
.pid
, state
->pid_ns
) :
753 ((fl
->share
== IPV6_FL_S_USER
) ?
754 from_kuid_munged(seq_user_ns(seq
), fl
->owner
.uid
) :
756 atomic_read(&fl
->users
),
758 (long)(fl
->expires
- jiffies
)/HZ
,
760 fl
->opt
? fl
->opt
->opt_nflen
: 0);
765 static const struct seq_operations ip6fl_seq_ops
= {
766 .start
= ip6fl_seq_start
,
767 .next
= ip6fl_seq_next
,
768 .stop
= ip6fl_seq_stop
,
769 .show
= ip6fl_seq_show
,
772 static int ip6fl_seq_open(struct inode
*inode
, struct file
*file
)
774 struct seq_file
*seq
;
775 struct ip6fl_iter_state
*state
;
778 err
= seq_open_net(inode
, file
, &ip6fl_seq_ops
,
779 sizeof(struct ip6fl_iter_state
));
782 seq
= file
->private_data
;
783 state
= ip6fl_seq_private(seq
);
785 state
->pid_ns
= get_pid_ns(task_active_pid_ns(current
));
791 static int ip6fl_seq_release(struct inode
*inode
, struct file
*file
)
793 struct seq_file
*seq
= file
->private_data
;
794 struct ip6fl_iter_state
*state
= ip6fl_seq_private(seq
);
795 put_pid_ns(state
->pid_ns
);
796 return seq_release_net(inode
, file
);
799 static const struct file_operations ip6fl_seq_fops
= {
800 .owner
= THIS_MODULE
,
801 .open
= ip6fl_seq_open
,
804 .release
= ip6fl_seq_release
,
807 static int __net_init
ip6_flowlabel_proc_init(struct net
*net
)
809 if (!proc_create("ip6_flowlabel", S_IRUGO
, net
->proc_net
,
815 static void __net_exit
ip6_flowlabel_proc_fini(struct net
*net
)
817 remove_proc_entry("ip6_flowlabel", net
->proc_net
);
820 static inline int ip6_flowlabel_proc_init(struct net
*net
)
824 static inline void ip6_flowlabel_proc_fini(struct net
*net
)
829 static void __net_exit
ip6_flowlabel_net_exit(struct net
*net
)
832 ip6_flowlabel_proc_fini(net
);
835 static struct pernet_operations ip6_flowlabel_net_ops
= {
836 .init
= ip6_flowlabel_proc_init
,
837 .exit
= ip6_flowlabel_net_exit
,
840 int ip6_flowlabel_init(void)
842 return register_pernet_subsys(&ip6_flowlabel_net_ops
);
845 void ip6_flowlabel_cleanup(void)
847 del_timer(&ip6_fl_gc_timer
);
848 unregister_pernet_subsys(&ip6_flowlabel_net_ops
);