mmc: omap_hsmmc: remove unused get_context_loss_count callback
[linux-2.6/btrfs-unstable.git] / net / ipv6 / ip6_flowlabel.c
blob3dd7d4ebd7cd9dff75c7a89a3a872cf8a073543b
1 /*
2 * ip6_flowlabel.c IPv6 flowlabel manager.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/capability.h>
13 #include <linux/errno.h>
14 #include <linux/types.h>
15 #include <linux/socket.h>
16 #include <linux/net.h>
17 #include <linux/netdevice.h>
18 #include <linux/in6.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/slab.h>
22 #include <linux/export.h>
23 #include <linux/pid_namespace.h>
25 #include <net/net_namespace.h>
26 #include <net/sock.h>
28 #include <net/ipv6.h>
29 #include <net/rawv6.h>
30 #include <net/transp_v6.h>
32 #include <asm/uaccess.h>
34 #define FL_MIN_LINGER 6 /* Minimal linger. It is set to 6sec specified
35 in old IPv6 RFC. Well, it was reasonable value.
37 #define FL_MAX_LINGER 150 /* Maximal linger timeout */
39 /* FL hash table */
41 #define FL_MAX_PER_SOCK 32
42 #define FL_MAX_SIZE 4096
43 #define FL_HASH_MASK 255
44 #define FL_HASH(l) (ntohl(l)&FL_HASH_MASK)
46 static atomic_t fl_size = ATOMIC_INIT(0);
47 static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
49 static void ip6_fl_gc(unsigned long dummy);
50 static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0);
52 /* FL hash table lock: it protects only of GC */
54 static DEFINE_SPINLOCK(ip6_fl_lock);
56 /* Big socket sock */
58 static DEFINE_SPINLOCK(ip6_sk_fl_lock);
60 #define for_each_fl_rcu(hash, fl) \
61 for (fl = rcu_dereference_bh(fl_ht[(hash)]); \
62 fl != NULL; \
63 fl = rcu_dereference_bh(fl->next))
64 #define for_each_fl_continue_rcu(fl) \
65 for (fl = rcu_dereference_bh(fl->next); \
66 fl != NULL; \
67 fl = rcu_dereference_bh(fl->next))
69 #define for_each_sk_fl_rcu(np, sfl) \
70 for (sfl = rcu_dereference_bh(np->ipv6_fl_list); \
71 sfl != NULL; \
72 sfl = rcu_dereference_bh(sfl->next))
74 static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
76 struct ip6_flowlabel *fl;
78 for_each_fl_rcu(FL_HASH(label), fl) {
79 if (fl->label == label && net_eq(fl->fl_net, net))
80 return fl;
82 return NULL;
85 static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
87 struct ip6_flowlabel *fl;
89 rcu_read_lock_bh();
90 fl = __fl_lookup(net, label);
91 if (fl && !atomic_inc_not_zero(&fl->users))
92 fl = NULL;
93 rcu_read_unlock_bh();
94 return fl;
98 static void fl_free(struct ip6_flowlabel *fl)
100 if (fl) {
101 if (fl->share == IPV6_FL_S_PROCESS)
102 put_pid(fl->owner.pid);
103 release_net(fl->fl_net);
104 kfree(fl->opt);
105 kfree_rcu(fl, rcu);
109 static void fl_release(struct ip6_flowlabel *fl)
111 spin_lock_bh(&ip6_fl_lock);
113 fl->lastuse = jiffies;
114 if (atomic_dec_and_test(&fl->users)) {
115 unsigned long ttd = fl->lastuse + fl->linger;
116 if (time_after(ttd, fl->expires))
117 fl->expires = ttd;
118 ttd = fl->expires;
119 if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
120 struct ipv6_txoptions *opt = fl->opt;
121 fl->opt = NULL;
122 kfree(opt);
124 if (!timer_pending(&ip6_fl_gc_timer) ||
125 time_after(ip6_fl_gc_timer.expires, ttd))
126 mod_timer(&ip6_fl_gc_timer, ttd);
128 spin_unlock_bh(&ip6_fl_lock);
131 static void ip6_fl_gc(unsigned long dummy)
133 int i;
134 unsigned long now = jiffies;
135 unsigned long sched = 0;
137 spin_lock(&ip6_fl_lock);
139 for (i = 0; i <= FL_HASH_MASK; i++) {
140 struct ip6_flowlabel *fl;
141 struct ip6_flowlabel __rcu **flp;
143 flp = &fl_ht[i];
144 while ((fl = rcu_dereference_protected(*flp,
145 lockdep_is_held(&ip6_fl_lock))) != NULL) {
146 if (atomic_read(&fl->users) == 0) {
147 unsigned long ttd = fl->lastuse + fl->linger;
148 if (time_after(ttd, fl->expires))
149 fl->expires = ttd;
150 ttd = fl->expires;
151 if (time_after_eq(now, ttd)) {
152 *flp = fl->next;
153 fl_free(fl);
154 atomic_dec(&fl_size);
155 continue;
157 if (!sched || time_before(ttd, sched))
158 sched = ttd;
160 flp = &fl->next;
163 if (!sched && atomic_read(&fl_size))
164 sched = now + FL_MAX_LINGER;
165 if (sched) {
166 mod_timer(&ip6_fl_gc_timer, sched);
168 spin_unlock(&ip6_fl_lock);
171 static void __net_exit ip6_fl_purge(struct net *net)
173 int i;
175 spin_lock(&ip6_fl_lock);
176 for (i = 0; i <= FL_HASH_MASK; i++) {
177 struct ip6_flowlabel *fl;
178 struct ip6_flowlabel __rcu **flp;
180 flp = &fl_ht[i];
181 while ((fl = rcu_dereference_protected(*flp,
182 lockdep_is_held(&ip6_fl_lock))) != NULL) {
183 if (net_eq(fl->fl_net, net) &&
184 atomic_read(&fl->users) == 0) {
185 *flp = fl->next;
186 fl_free(fl);
187 atomic_dec(&fl_size);
188 continue;
190 flp = &fl->next;
193 spin_unlock(&ip6_fl_lock);
196 static struct ip6_flowlabel *fl_intern(struct net *net,
197 struct ip6_flowlabel *fl, __be32 label)
199 struct ip6_flowlabel *lfl;
201 fl->label = label & IPV6_FLOWLABEL_MASK;
203 spin_lock_bh(&ip6_fl_lock);
204 if (label == 0) {
205 for (;;) {
206 fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK;
207 if (fl->label) {
208 lfl = __fl_lookup(net, fl->label);
209 if (lfl == NULL)
210 break;
213 } else {
215 * we dropper the ip6_fl_lock, so this entry could reappear
216 * and we need to recheck with it.
218 * OTOH no need to search the active socket first, like it is
219 * done in ipv6_flowlabel_opt - sock is locked, so new entry
220 * with the same label can only appear on another sock
222 lfl = __fl_lookup(net, fl->label);
223 if (lfl != NULL) {
224 atomic_inc(&lfl->users);
225 spin_unlock_bh(&ip6_fl_lock);
226 return lfl;
230 fl->lastuse = jiffies;
231 fl->next = fl_ht[FL_HASH(fl->label)];
232 rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
233 atomic_inc(&fl_size);
234 spin_unlock_bh(&ip6_fl_lock);
235 return NULL;
240 /* Socket flowlabel lists */
242 struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
244 struct ipv6_fl_socklist *sfl;
245 struct ipv6_pinfo *np = inet6_sk(sk);
247 label &= IPV6_FLOWLABEL_MASK;
249 rcu_read_lock_bh();
250 for_each_sk_fl_rcu(np, sfl) {
251 struct ip6_flowlabel *fl = sfl->fl;
252 if (fl->label == label) {
253 fl->lastuse = jiffies;
254 atomic_inc(&fl->users);
255 rcu_read_unlock_bh();
256 return fl;
259 rcu_read_unlock_bh();
260 return NULL;
262 EXPORT_SYMBOL_GPL(fl6_sock_lookup);
264 void fl6_free_socklist(struct sock *sk)
266 struct ipv6_pinfo *np = inet6_sk(sk);
267 struct ipv6_fl_socklist *sfl;
269 if (!rcu_access_pointer(np->ipv6_fl_list))
270 return;
272 spin_lock_bh(&ip6_sk_fl_lock);
273 while ((sfl = rcu_dereference_protected(np->ipv6_fl_list,
274 lockdep_is_held(&ip6_sk_fl_lock))) != NULL) {
275 np->ipv6_fl_list = sfl->next;
276 spin_unlock_bh(&ip6_sk_fl_lock);
278 fl_release(sfl->fl);
279 kfree_rcu(sfl, rcu);
281 spin_lock_bh(&ip6_sk_fl_lock);
283 spin_unlock_bh(&ip6_sk_fl_lock);
286 /* Service routines */
290 It is the only difficult place. flowlabel enforces equal headers
291 before and including routing header, however user may supply options
292 following rthdr.
295 struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
296 struct ip6_flowlabel *fl,
297 struct ipv6_txoptions *fopt)
299 struct ipv6_txoptions *fl_opt = fl->opt;
301 if (fopt == NULL || fopt->opt_flen == 0)
302 return fl_opt;
304 if (fl_opt != NULL) {
305 opt_space->hopopt = fl_opt->hopopt;
306 opt_space->dst0opt = fl_opt->dst0opt;
307 opt_space->srcrt = fl_opt->srcrt;
308 opt_space->opt_nflen = fl_opt->opt_nflen;
309 } else {
310 if (fopt->opt_nflen == 0)
311 return fopt;
312 opt_space->hopopt = NULL;
313 opt_space->dst0opt = NULL;
314 opt_space->srcrt = NULL;
315 opt_space->opt_nflen = 0;
317 opt_space->dst1opt = fopt->dst1opt;
318 opt_space->opt_flen = fopt->opt_flen;
319 return opt_space;
321 EXPORT_SYMBOL_GPL(fl6_merge_options);
323 static unsigned long check_linger(unsigned long ttl)
325 if (ttl < FL_MIN_LINGER)
326 return FL_MIN_LINGER*HZ;
327 if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
328 return 0;
329 return ttl*HZ;
332 static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
334 linger = check_linger(linger);
335 if (!linger)
336 return -EPERM;
337 expires = check_linger(expires);
338 if (!expires)
339 return -EPERM;
341 spin_lock_bh(&ip6_fl_lock);
342 fl->lastuse = jiffies;
343 if (time_before(fl->linger, linger))
344 fl->linger = linger;
345 if (time_before(expires, fl->linger))
346 expires = fl->linger;
347 if (time_before(fl->expires, fl->lastuse + expires))
348 fl->expires = fl->lastuse + expires;
349 spin_unlock_bh(&ip6_fl_lock);
351 return 0;
354 static struct ip6_flowlabel *
355 fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
356 char __user *optval, int optlen, int *err_p)
358 struct ip6_flowlabel *fl = NULL;
359 int olen;
360 int addr_type;
361 int err;
363 olen = optlen - CMSG_ALIGN(sizeof(*freq));
364 err = -EINVAL;
365 if (olen > 64 * 1024)
366 goto done;
368 err = -ENOMEM;
369 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
370 if (fl == NULL)
371 goto done;
373 if (olen > 0) {
374 struct msghdr msg;
375 struct flowi6 flowi6;
376 int junk;
378 err = -ENOMEM;
379 fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
380 if (fl->opt == NULL)
381 goto done;
383 memset(fl->opt, 0, sizeof(*fl->opt));
384 fl->opt->tot_len = sizeof(*fl->opt) + olen;
385 err = -EFAULT;
386 if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
387 goto done;
389 msg.msg_controllen = olen;
390 msg.msg_control = (void *)(fl->opt+1);
391 memset(&flowi6, 0, sizeof(flowi6));
393 err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
394 &junk, &junk, &junk);
395 if (err)
396 goto done;
397 err = -EINVAL;
398 if (fl->opt->opt_flen)
399 goto done;
400 if (fl->opt->opt_nflen == 0) {
401 kfree(fl->opt);
402 fl->opt = NULL;
406 fl->fl_net = hold_net(net);
407 fl->expires = jiffies;
408 err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
409 if (err)
410 goto done;
411 fl->share = freq->flr_share;
412 addr_type = ipv6_addr_type(&freq->flr_dst);
413 if ((addr_type & IPV6_ADDR_MAPPED) ||
414 addr_type == IPV6_ADDR_ANY) {
415 err = -EINVAL;
416 goto done;
418 fl->dst = freq->flr_dst;
419 atomic_set(&fl->users, 1);
420 switch (fl->share) {
421 case IPV6_FL_S_EXCL:
422 case IPV6_FL_S_ANY:
423 break;
424 case IPV6_FL_S_PROCESS:
425 fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
426 break;
427 case IPV6_FL_S_USER:
428 fl->owner.uid = current_euid();
429 break;
430 default:
431 err = -EINVAL;
432 goto done;
434 return fl;
436 done:
437 fl_free(fl);
438 *err_p = err;
439 return NULL;
442 static int mem_check(struct sock *sk)
444 struct ipv6_pinfo *np = inet6_sk(sk);
445 struct ipv6_fl_socklist *sfl;
446 int room = FL_MAX_SIZE - atomic_read(&fl_size);
447 int count = 0;
449 if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
450 return 0;
452 rcu_read_lock_bh();
453 for_each_sk_fl_rcu(np, sfl)
454 count++;
455 rcu_read_unlock_bh();
457 if (room <= 0 ||
458 ((count >= FL_MAX_PER_SOCK ||
459 (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
460 !capable(CAP_NET_ADMIN)))
461 return -ENOBUFS;
463 return 0;
466 static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
467 struct ip6_flowlabel *fl)
469 spin_lock_bh(&ip6_sk_fl_lock);
470 sfl->fl = fl;
471 sfl->next = np->ipv6_fl_list;
472 rcu_assign_pointer(np->ipv6_fl_list, sfl);
473 spin_unlock_bh(&ip6_sk_fl_lock);
476 int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
477 int flags)
479 struct ipv6_pinfo *np = inet6_sk(sk);
480 struct ipv6_fl_socklist *sfl;
482 if (flags & IPV6_FL_F_REMOTE) {
483 freq->flr_label = np->rcv_flowinfo & IPV6_FLOWLABEL_MASK;
484 return 0;
487 if (np->repflow) {
488 freq->flr_label = np->flow_label;
489 return 0;
492 rcu_read_lock_bh();
494 for_each_sk_fl_rcu(np, sfl) {
495 if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) {
496 spin_lock_bh(&ip6_fl_lock);
497 freq->flr_label = sfl->fl->label;
498 freq->flr_dst = sfl->fl->dst;
499 freq->flr_share = sfl->fl->share;
500 freq->flr_expires = (sfl->fl->expires - jiffies) / HZ;
501 freq->flr_linger = sfl->fl->linger / HZ;
503 spin_unlock_bh(&ip6_fl_lock);
504 rcu_read_unlock_bh();
505 return 0;
508 rcu_read_unlock_bh();
510 return -ENOENT;
513 int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
515 int uninitialized_var(err);
516 struct net *net = sock_net(sk);
517 struct ipv6_pinfo *np = inet6_sk(sk);
518 struct in6_flowlabel_req freq;
519 struct ipv6_fl_socklist *sfl1 = NULL;
520 struct ipv6_fl_socklist *sfl;
521 struct ipv6_fl_socklist __rcu **sflp;
522 struct ip6_flowlabel *fl, *fl1 = NULL;
525 if (optlen < sizeof(freq))
526 return -EINVAL;
528 if (copy_from_user(&freq, optval, sizeof(freq)))
529 return -EFAULT;
531 switch (freq.flr_action) {
532 case IPV6_FL_A_PUT:
533 if (freq.flr_flags & IPV6_FL_F_REFLECT) {
534 if (sk->sk_protocol != IPPROTO_TCP)
535 return -ENOPROTOOPT;
536 if (!np->repflow)
537 return -ESRCH;
538 np->flow_label = 0;
539 np->repflow = 0;
540 return 0;
542 spin_lock_bh(&ip6_sk_fl_lock);
543 for (sflp = &np->ipv6_fl_list;
544 (sfl = rcu_dereference(*sflp)) != NULL;
545 sflp = &sfl->next) {
546 if (sfl->fl->label == freq.flr_label) {
547 if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
548 np->flow_label &= ~IPV6_FLOWLABEL_MASK;
549 *sflp = rcu_dereference(sfl->next);
550 spin_unlock_bh(&ip6_sk_fl_lock);
551 fl_release(sfl->fl);
552 kfree_rcu(sfl, rcu);
553 return 0;
556 spin_unlock_bh(&ip6_sk_fl_lock);
557 return -ESRCH;
559 case IPV6_FL_A_RENEW:
560 rcu_read_lock_bh();
561 for_each_sk_fl_rcu(np, sfl) {
562 if (sfl->fl->label == freq.flr_label) {
563 err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
564 rcu_read_unlock_bh();
565 return err;
568 rcu_read_unlock_bh();
570 if (freq.flr_share == IPV6_FL_S_NONE &&
571 ns_capable(net->user_ns, CAP_NET_ADMIN)) {
572 fl = fl_lookup(net, freq.flr_label);
573 if (fl) {
574 err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
575 fl_release(fl);
576 return err;
579 return -ESRCH;
581 case IPV6_FL_A_GET:
582 if (freq.flr_flags & IPV6_FL_F_REFLECT) {
583 struct net *net = sock_net(sk);
584 if (net->ipv6.sysctl.flowlabel_consistency) {
585 net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
586 return -EPERM;
589 if (sk->sk_protocol != IPPROTO_TCP)
590 return -ENOPROTOOPT;
592 np->repflow = 1;
593 return 0;
596 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
597 return -EINVAL;
599 fl = fl_create(net, sk, &freq, optval, optlen, &err);
600 if (fl == NULL)
601 return err;
602 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
604 if (freq.flr_label) {
605 err = -EEXIST;
606 rcu_read_lock_bh();
607 for_each_sk_fl_rcu(np, sfl) {
608 if (sfl->fl->label == freq.flr_label) {
609 if (freq.flr_flags&IPV6_FL_F_EXCL) {
610 rcu_read_unlock_bh();
611 goto done;
613 fl1 = sfl->fl;
614 atomic_inc(&fl1->users);
615 break;
618 rcu_read_unlock_bh();
620 if (fl1 == NULL)
621 fl1 = fl_lookup(net, freq.flr_label);
622 if (fl1) {
623 recheck:
624 err = -EEXIST;
625 if (freq.flr_flags&IPV6_FL_F_EXCL)
626 goto release;
627 err = -EPERM;
628 if (fl1->share == IPV6_FL_S_EXCL ||
629 fl1->share != fl->share ||
630 ((fl1->share == IPV6_FL_S_PROCESS) &&
631 (fl1->owner.pid == fl->owner.pid)) ||
632 ((fl1->share == IPV6_FL_S_USER) &&
633 uid_eq(fl1->owner.uid, fl->owner.uid)))
634 goto release;
636 err = -ENOMEM;
637 if (sfl1 == NULL)
638 goto release;
639 if (fl->linger > fl1->linger)
640 fl1->linger = fl->linger;
641 if ((long)(fl->expires - fl1->expires) > 0)
642 fl1->expires = fl->expires;
643 fl_link(np, sfl1, fl1);
644 fl_free(fl);
645 return 0;
647 release:
648 fl_release(fl1);
649 goto done;
652 err = -ENOENT;
653 if (!(freq.flr_flags&IPV6_FL_F_CREATE))
654 goto done;
656 err = -ENOMEM;
657 if (sfl1 == NULL || (err = mem_check(sk)) != 0)
658 goto done;
660 fl1 = fl_intern(net, fl, freq.flr_label);
661 if (fl1 != NULL)
662 goto recheck;
664 if (!freq.flr_label) {
665 if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
666 &fl->label, sizeof(fl->label))) {
667 /* Intentionally ignore fault. */
671 fl_link(np, sfl1, fl);
672 return 0;
674 default:
675 return -EINVAL;
678 done:
679 fl_free(fl);
680 kfree(sfl1);
681 return err;
684 #ifdef CONFIG_PROC_FS
686 struct ip6fl_iter_state {
687 struct seq_net_private p;
688 struct pid_namespace *pid_ns;
689 int bucket;
692 #define ip6fl_seq_private(seq) ((struct ip6fl_iter_state *)(seq)->private)
694 static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
696 struct ip6_flowlabel *fl = NULL;
697 struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
698 struct net *net = seq_file_net(seq);
700 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
701 for_each_fl_rcu(state->bucket, fl) {
702 if (net_eq(fl->fl_net, net))
703 goto out;
706 fl = NULL;
707 out:
708 return fl;
711 static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
713 struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
714 struct net *net = seq_file_net(seq);
716 for_each_fl_continue_rcu(fl) {
717 if (net_eq(fl->fl_net, net))
718 goto out;
721 try_again:
722 if (++state->bucket <= FL_HASH_MASK) {
723 for_each_fl_rcu(state->bucket, fl) {
724 if (net_eq(fl->fl_net, net))
725 goto out;
727 goto try_again;
729 fl = NULL;
731 out:
732 return fl;
735 static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
737 struct ip6_flowlabel *fl = ip6fl_get_first(seq);
738 if (fl)
739 while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
740 --pos;
741 return pos ? NULL : fl;
744 static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
745 __acquires(RCU)
747 rcu_read_lock_bh();
748 return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
751 static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
753 struct ip6_flowlabel *fl;
755 if (v == SEQ_START_TOKEN)
756 fl = ip6fl_get_first(seq);
757 else
758 fl = ip6fl_get_next(seq, v);
759 ++*pos;
760 return fl;
763 static void ip6fl_seq_stop(struct seq_file *seq, void *v)
764 __releases(RCU)
766 rcu_read_unlock_bh();
769 static int ip6fl_seq_show(struct seq_file *seq, void *v)
771 struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
772 if (v == SEQ_START_TOKEN)
773 seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
774 "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
775 else {
776 struct ip6_flowlabel *fl = v;
777 seq_printf(seq,
778 "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
779 (unsigned int)ntohl(fl->label),
780 fl->share,
781 ((fl->share == IPV6_FL_S_PROCESS) ?
782 pid_nr_ns(fl->owner.pid, state->pid_ns) :
783 ((fl->share == IPV6_FL_S_USER) ?
784 from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
785 0)),
786 atomic_read(&fl->users),
787 fl->linger/HZ,
788 (long)(fl->expires - jiffies)/HZ,
789 &fl->dst,
790 fl->opt ? fl->opt->opt_nflen : 0);
792 return 0;
795 static const struct seq_operations ip6fl_seq_ops = {
796 .start = ip6fl_seq_start,
797 .next = ip6fl_seq_next,
798 .stop = ip6fl_seq_stop,
799 .show = ip6fl_seq_show,
802 static int ip6fl_seq_open(struct inode *inode, struct file *file)
804 struct seq_file *seq;
805 struct ip6fl_iter_state *state;
806 int err;
808 err = seq_open_net(inode, file, &ip6fl_seq_ops,
809 sizeof(struct ip6fl_iter_state));
811 if (!err) {
812 seq = file->private_data;
813 state = ip6fl_seq_private(seq);
814 rcu_read_lock();
815 state->pid_ns = get_pid_ns(task_active_pid_ns(current));
816 rcu_read_unlock();
818 return err;
821 static int ip6fl_seq_release(struct inode *inode, struct file *file)
823 struct seq_file *seq = file->private_data;
824 struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
825 put_pid_ns(state->pid_ns);
826 return seq_release_net(inode, file);
829 static const struct file_operations ip6fl_seq_fops = {
830 .owner = THIS_MODULE,
831 .open = ip6fl_seq_open,
832 .read = seq_read,
833 .llseek = seq_lseek,
834 .release = ip6fl_seq_release,
837 static int __net_init ip6_flowlabel_proc_init(struct net *net)
839 if (!proc_create("ip6_flowlabel", S_IRUGO, net->proc_net,
840 &ip6fl_seq_fops))
841 return -ENOMEM;
842 return 0;
845 static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
847 remove_proc_entry("ip6_flowlabel", net->proc_net);
849 #else
850 static inline int ip6_flowlabel_proc_init(struct net *net)
852 return 0;
854 static inline void ip6_flowlabel_proc_fini(struct net *net)
857 #endif
859 static void __net_exit ip6_flowlabel_net_exit(struct net *net)
861 ip6_fl_purge(net);
862 ip6_flowlabel_proc_fini(net);
865 static struct pernet_operations ip6_flowlabel_net_ops = {
866 .init = ip6_flowlabel_proc_init,
867 .exit = ip6_flowlabel_net_exit,
870 int ip6_flowlabel_init(void)
872 return register_pernet_subsys(&ip6_flowlabel_net_ops);
875 void ip6_flowlabel_cleanup(void)
877 del_timer(&ip6_fl_gc_timer);
878 unregister_pernet_subsys(&ip6_flowlabel_net_ops);