ipv6: ip6mr: support multiple tables
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / ipv6 / ip6mr.c
blobc2920a1a6db38732fff6be3690aae2c9fe64cf0d
1 /*
2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
8 * 6WIND, Paris, France
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <asm/system.h>
20 #include <asm/uaccess.h>
21 #include <linux/types.h>
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/timer.h>
25 #include <linux/mm.h>
26 #include <linux/kernel.h>
27 #include <linux/fcntl.h>
28 #include <linux/stat.h>
29 #include <linux/socket.h>
30 #include <linux/inet.h>
31 #include <linux/netdevice.h>
32 #include <linux/inetdevice.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <net/protocol.h>
38 #include <linux/skbuff.h>
39 #include <net/sock.h>
40 #include <net/raw.h>
41 #include <linux/notifier.h>
42 #include <linux/if_arp.h>
43 #include <net/checksum.h>
44 #include <net/netlink.h>
45 #include <net/fib_rules.h>
47 #include <net/ipv6.h>
48 #include <net/ip6_route.h>
49 #include <linux/mroute6.h>
50 #include <linux/pim.h>
51 #include <net/addrconf.h>
52 #include <linux/netfilter_ipv6.h>
53 #include <net/ip6_checksum.h>
55 struct mr6_table {
56 struct list_head list;
57 #ifdef CONFIG_NET_NS
58 struct net *net;
59 #endif
60 u32 id;
61 struct sock *mroute6_sk;
62 struct timer_list ipmr_expire_timer;
63 struct list_head mfc6_unres_queue;
64 struct list_head mfc6_cache_array[MFC6_LINES];
65 struct mif_device vif6_table[MAXMIFS];
66 int maxvif;
67 atomic_t cache_resolve_queue_len;
68 int mroute_do_assert;
69 int mroute_do_pim;
70 #ifdef CONFIG_IPV6_PIMSM_V2
71 int mroute_reg_vif_num;
72 #endif
75 struct ip6mr_rule {
76 struct fib_rule common;
79 struct ip6mr_result {
80 struct mr6_table *mrt;
83 /* Big lock, protecting vif table, mrt cache and mroute socket state.
84 Note that the changes are semaphored via rtnl_lock.
87 static DEFINE_RWLOCK(mrt_lock);
90 * Multicast router control variables
93 #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
95 /* Special spinlock for queue of unresolved entries */
96 static DEFINE_SPINLOCK(mfc_unres_lock);
98 /* We return to original Alan's scheme. Hash table of resolved
99 entries is changed only in process context and protected
100 with weak lock mrt_lock. Queue of unresolved entries is protected
101 with strong spinlock mfc_unres_lock.
103 In this case data path is free of exclusive locks at all.
106 static struct kmem_cache *mrt_cachep __read_mostly;
108 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
109 static void ip6mr_free_table(struct mr6_table *mrt);
111 static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
112 struct sk_buff *skb, struct mfc6_cache *cache);
113 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
114 mifi_t mifi, int assert);
115 static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
116 struct mfc6_cache *c, struct rtmsg *rtm);
117 static void mroute_clean_tables(struct mr6_table *mrt);
118 static void ipmr_expire_process(unsigned long arg);
120 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
121 #define ip6mr_for_each_table(mrt, met) \
122 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
124 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
126 struct mr6_table *mrt;
128 ip6mr_for_each_table(mrt, net) {
129 if (mrt->id == id)
130 return mrt;
132 return NULL;
135 static int ip6mr_fib_lookup(struct net *net, struct flowi *flp,
136 struct mr6_table **mrt)
138 struct ip6mr_result res;
139 struct fib_lookup_arg arg = { .result = &res, };
140 int err;
142 err = fib_rules_lookup(net->ipv6.mr6_rules_ops, flp, 0, &arg);
143 if (err < 0)
144 return err;
145 *mrt = res.mrt;
146 return 0;
149 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
150 int flags, struct fib_lookup_arg *arg)
152 struct ip6mr_result *res = arg->result;
153 struct mr6_table *mrt;
155 switch (rule->action) {
156 case FR_ACT_TO_TBL:
157 break;
158 case FR_ACT_UNREACHABLE:
159 return -ENETUNREACH;
160 case FR_ACT_PROHIBIT:
161 return -EACCES;
162 case FR_ACT_BLACKHOLE:
163 default:
164 return -EINVAL;
167 mrt = ip6mr_get_table(rule->fr_net, rule->table);
168 if (mrt == NULL)
169 return -EAGAIN;
170 res->mrt = mrt;
171 return 0;
174 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
176 return 1;
179 static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
180 FRA_GENERIC_POLICY,
183 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
184 struct fib_rule_hdr *frh, struct nlattr **tb)
186 return 0;
189 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
190 struct nlattr **tb)
192 return 1;
195 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
196 struct fib_rule_hdr *frh)
198 frh->dst_len = 0;
199 frh->src_len = 0;
200 frh->tos = 0;
201 return 0;
204 static const struct fib_rules_ops __net_initdata ip6mr_rules_ops_template = {
205 .family = RTNL_FAMILY_IP6MR,
206 .rule_size = sizeof(struct ip6mr_rule),
207 .addr_size = sizeof(struct in6_addr),
208 .action = ip6mr_rule_action,
209 .match = ip6mr_rule_match,
210 .configure = ip6mr_rule_configure,
211 .compare = ip6mr_rule_compare,
212 .default_pref = fib_default_rule_pref,
213 .fill = ip6mr_rule_fill,
214 .nlgroup = RTNLGRP_IPV6_RULE,
215 .policy = ip6mr_rule_policy,
216 .owner = THIS_MODULE,
219 static int __net_init ip6mr_rules_init(struct net *net)
221 struct fib_rules_ops *ops;
222 struct mr6_table *mrt;
223 int err;
225 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
226 if (IS_ERR(ops))
227 return PTR_ERR(ops);
229 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
231 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
232 if (mrt == NULL) {
233 err = -ENOMEM;
234 goto err1;
237 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
238 if (err < 0)
239 goto err2;
241 net->ipv6.mr6_rules_ops = ops;
242 return 0;
244 err2:
245 kfree(mrt);
246 err1:
247 fib_rules_unregister(ops);
248 return err;
251 static void __net_exit ip6mr_rules_exit(struct net *net)
253 struct mr6_table *mrt, *next;
255 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list)
256 ip6mr_free_table(mrt);
257 fib_rules_unregister(net->ipv6.mr6_rules_ops);
259 #else
260 #define ip6mr_for_each_table(mrt, net) \
261 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
263 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
265 return net->ipv6.mrt6;
268 static int ip6mr_fib_lookup(struct net *net, struct flowi *flp,
269 struct mr6_table **mrt)
271 *mrt = net->ipv6.mrt6;
272 return 0;
275 static int __net_init ip6mr_rules_init(struct net *net)
277 net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
278 return net->ipv6.mrt6 ? 0 : -ENOMEM;
281 static void __net_exit ip6mr_rules_exit(struct net *net)
283 ip6mr_free_table(net->ipv6.mrt6);
285 #endif
287 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
289 struct mr6_table *mrt;
290 unsigned int i;
292 mrt = ip6mr_get_table(net, id);
293 if (mrt != NULL)
294 return mrt;
296 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
297 if (mrt == NULL)
298 return NULL;
299 mrt->id = id;
300 write_pnet(&mrt->net, net);
302 /* Forwarding cache */
303 for (i = 0; i < MFC6_LINES; i++)
304 INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
306 INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
308 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
309 (unsigned long)mrt);
311 #ifdef CONFIG_IPV6_PIMSM_V2
312 mrt->mroute_reg_vif_num = -1;
313 #endif
314 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
315 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
316 #endif
317 return mrt;
320 static void ip6mr_free_table(struct mr6_table *mrt)
322 del_timer(&mrt->ipmr_expire_timer);
323 mroute_clean_tables(mrt);
324 kfree(mrt);
327 #ifdef CONFIG_PROC_FS
329 struct ipmr_mfc_iter {
330 struct seq_net_private p;
331 struct mr6_table *mrt;
332 struct list_head *cache;
333 int ct;
337 static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
338 struct ipmr_mfc_iter *it, loff_t pos)
340 struct mr6_table *mrt = it->mrt;
341 struct mfc6_cache *mfc;
343 read_lock(&mrt_lock);
344 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
345 it->cache = &mrt->mfc6_cache_array[it->ct];
346 list_for_each_entry(mfc, it->cache, list)
347 if (pos-- == 0)
348 return mfc;
350 read_unlock(&mrt_lock);
352 spin_lock_bh(&mfc_unres_lock);
353 it->cache = &mrt->mfc6_unres_queue;
354 list_for_each_entry(mfc, it->cache, list)
355 if (pos-- == 0)
356 return mfc;
357 spin_unlock_bh(&mfc_unres_lock);
359 it->cache = NULL;
360 return NULL;
364 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
367 struct ipmr_vif_iter {
368 struct seq_net_private p;
369 struct mr6_table *mrt;
370 int ct;
373 static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
374 struct ipmr_vif_iter *iter,
375 loff_t pos)
377 struct mr6_table *mrt = iter->mrt;
379 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
380 if (!MIF_EXISTS(mrt, iter->ct))
381 continue;
382 if (pos-- == 0)
383 return &mrt->vif6_table[iter->ct];
385 return NULL;
388 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
389 __acquires(mrt_lock)
391 struct ipmr_vif_iter *iter = seq->private;
392 struct net *net = seq_file_net(seq);
393 struct mr6_table *mrt;
395 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
396 if (mrt == NULL)
397 return ERR_PTR(-ENOENT);
399 iter->mrt = mrt;
401 read_lock(&mrt_lock);
402 return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
403 : SEQ_START_TOKEN;
406 static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
408 struct ipmr_vif_iter *iter = seq->private;
409 struct net *net = seq_file_net(seq);
410 struct mr6_table *mrt = iter->mrt;
412 ++*pos;
413 if (v == SEQ_START_TOKEN)
414 return ip6mr_vif_seq_idx(net, iter, 0);
416 while (++iter->ct < mrt->maxvif) {
417 if (!MIF_EXISTS(mrt, iter->ct))
418 continue;
419 return &mrt->vif6_table[iter->ct];
421 return NULL;
424 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
425 __releases(mrt_lock)
427 read_unlock(&mrt_lock);
430 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
432 struct ipmr_vif_iter *iter = seq->private;
433 struct mr6_table *mrt = iter->mrt;
435 if (v == SEQ_START_TOKEN) {
436 seq_puts(seq,
437 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
438 } else {
439 const struct mif_device *vif = v;
440 const char *name = vif->dev ? vif->dev->name : "none";
442 seq_printf(seq,
443 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
444 vif - mrt->vif6_table,
445 name, vif->bytes_in, vif->pkt_in,
446 vif->bytes_out, vif->pkt_out,
447 vif->flags);
449 return 0;
452 static const struct seq_operations ip6mr_vif_seq_ops = {
453 .start = ip6mr_vif_seq_start,
454 .next = ip6mr_vif_seq_next,
455 .stop = ip6mr_vif_seq_stop,
456 .show = ip6mr_vif_seq_show,
459 static int ip6mr_vif_open(struct inode *inode, struct file *file)
461 return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
462 sizeof(struct ipmr_vif_iter));
465 static const struct file_operations ip6mr_vif_fops = {
466 .owner = THIS_MODULE,
467 .open = ip6mr_vif_open,
468 .read = seq_read,
469 .llseek = seq_lseek,
470 .release = seq_release_net,
473 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
475 struct ipmr_mfc_iter *it = seq->private;
476 struct net *net = seq_file_net(seq);
477 struct mr6_table *mrt;
479 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
480 if (mrt == NULL)
481 return ERR_PTR(-ENOENT);
483 it->mrt = mrt;
484 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
485 : SEQ_START_TOKEN;
488 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
490 struct mfc6_cache *mfc = v;
491 struct ipmr_mfc_iter *it = seq->private;
492 struct net *net = seq_file_net(seq);
493 struct mr6_table *mrt = it->mrt;
495 ++*pos;
497 if (v == SEQ_START_TOKEN)
498 return ipmr_mfc_seq_idx(net, seq->private, 0);
500 if (mfc->list.next != it->cache)
501 return list_entry(mfc->list.next, struct mfc6_cache, list);
503 if (it->cache == &mrt->mfc6_unres_queue)
504 goto end_of_list;
506 BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
508 while (++it->ct < MFC6_LINES) {
509 it->cache = &mrt->mfc6_cache_array[it->ct];
510 if (list_empty(it->cache))
511 continue;
512 return list_first_entry(it->cache, struct mfc6_cache, list);
515 /* exhausted cache_array, show unresolved */
516 read_unlock(&mrt_lock);
517 it->cache = &mrt->mfc6_unres_queue;
518 it->ct = 0;
520 spin_lock_bh(&mfc_unres_lock);
521 if (!list_empty(it->cache))
522 return list_first_entry(it->cache, struct mfc6_cache, list);
524 end_of_list:
525 spin_unlock_bh(&mfc_unres_lock);
526 it->cache = NULL;
528 return NULL;
531 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
533 struct ipmr_mfc_iter *it = seq->private;
534 struct mr6_table *mrt = it->mrt;
536 if (it->cache == &mrt->mfc6_unres_queue)
537 spin_unlock_bh(&mfc_unres_lock);
538 else if (it->cache == mrt->mfc6_cache_array)
539 read_unlock(&mrt_lock);
542 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
544 int n;
546 if (v == SEQ_START_TOKEN) {
547 seq_puts(seq,
548 "Group "
549 "Origin "
550 "Iif Pkts Bytes Wrong Oifs\n");
551 } else {
552 const struct mfc6_cache *mfc = v;
553 const struct ipmr_mfc_iter *it = seq->private;
554 struct mr6_table *mrt = it->mrt;
556 seq_printf(seq, "%pI6 %pI6 %-3hd",
557 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
558 mfc->mf6c_parent);
560 if (it->cache != &mrt->mfc6_unres_queue) {
561 seq_printf(seq, " %8lu %8lu %8lu",
562 mfc->mfc_un.res.pkt,
563 mfc->mfc_un.res.bytes,
564 mfc->mfc_un.res.wrong_if);
565 for (n = mfc->mfc_un.res.minvif;
566 n < mfc->mfc_un.res.maxvif; n++) {
567 if (MIF_EXISTS(mrt, n) &&
568 mfc->mfc_un.res.ttls[n] < 255)
569 seq_printf(seq,
570 " %2d:%-3d",
571 n, mfc->mfc_un.res.ttls[n]);
573 } else {
574 /* unresolved mfc_caches don't contain
575 * pkt, bytes and wrong_if values
577 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
579 seq_putc(seq, '\n');
581 return 0;
584 static const struct seq_operations ipmr_mfc_seq_ops = {
585 .start = ipmr_mfc_seq_start,
586 .next = ipmr_mfc_seq_next,
587 .stop = ipmr_mfc_seq_stop,
588 .show = ipmr_mfc_seq_show,
591 static int ipmr_mfc_open(struct inode *inode, struct file *file)
593 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
594 sizeof(struct ipmr_mfc_iter));
597 static const struct file_operations ip6mr_mfc_fops = {
598 .owner = THIS_MODULE,
599 .open = ipmr_mfc_open,
600 .read = seq_read,
601 .llseek = seq_lseek,
602 .release = seq_release_net,
604 #endif
606 #ifdef CONFIG_IPV6_PIMSM_V2
608 static int pim6_rcv(struct sk_buff *skb)
610 struct pimreghdr *pim;
611 struct ipv6hdr *encap;
612 struct net_device *reg_dev = NULL;
613 struct net *net = dev_net(skb->dev);
614 struct mr6_table *mrt;
615 struct flowi fl = {
616 .iif = skb->dev->ifindex,
617 .mark = skb->mark,
619 int reg_vif_num;
621 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
622 goto drop;
624 pim = (struct pimreghdr *)skb_transport_header(skb);
625 if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
626 (pim->flags & PIM_NULL_REGISTER) ||
627 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
628 sizeof(*pim), IPPROTO_PIM,
629 csum_partial((void *)pim, sizeof(*pim), 0)) &&
630 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
631 goto drop;
633 /* check if the inner packet is destined to mcast group */
634 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
635 sizeof(*pim));
637 if (!ipv6_addr_is_multicast(&encap->daddr) ||
638 encap->payload_len == 0 ||
639 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
640 goto drop;
642 if (ip6mr_fib_lookup(net, &fl, &mrt) < 0)
643 goto drop;
644 reg_vif_num = mrt->mroute_reg_vif_num;
646 read_lock(&mrt_lock);
647 if (reg_vif_num >= 0)
648 reg_dev = mrt->vif6_table[reg_vif_num].dev;
649 if (reg_dev)
650 dev_hold(reg_dev);
651 read_unlock(&mrt_lock);
653 if (reg_dev == NULL)
654 goto drop;
656 skb->mac_header = skb->network_header;
657 skb_pull(skb, (u8 *)encap - skb->data);
658 skb_reset_network_header(skb);
659 skb->dev = reg_dev;
660 skb->protocol = htons(ETH_P_IPV6);
661 skb->ip_summed = 0;
662 skb->pkt_type = PACKET_HOST;
663 skb_dst_drop(skb);
664 reg_dev->stats.rx_bytes += skb->len;
665 reg_dev->stats.rx_packets++;
666 nf_reset(skb);
667 netif_rx(skb);
668 dev_put(reg_dev);
669 return 0;
670 drop:
671 kfree_skb(skb);
672 return 0;
675 static const struct inet6_protocol pim6_protocol = {
676 .handler = pim6_rcv,
679 /* Service routines creating virtual interfaces: PIMREG */
681 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
682 struct net_device *dev)
684 struct net *net = dev_net(dev);
685 struct mr6_table *mrt;
686 struct flowi fl = {
687 .oif = dev->ifindex,
688 .iif = skb->skb_iif,
689 .mark = skb->mark,
691 int err;
693 err = ip6mr_fib_lookup(net, &fl, &mrt);
694 if (err < 0)
695 return err;
697 read_lock(&mrt_lock);
698 dev->stats.tx_bytes += skb->len;
699 dev->stats.tx_packets++;
700 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
701 read_unlock(&mrt_lock);
702 kfree_skb(skb);
703 return NETDEV_TX_OK;
706 static const struct net_device_ops reg_vif_netdev_ops = {
707 .ndo_start_xmit = reg_vif_xmit,
710 static void reg_vif_setup(struct net_device *dev)
712 dev->type = ARPHRD_PIMREG;
713 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
714 dev->flags = IFF_NOARP;
715 dev->netdev_ops = &reg_vif_netdev_ops;
716 dev->destructor = free_netdev;
717 dev->features |= NETIF_F_NETNS_LOCAL;
720 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
722 struct net_device *dev;
723 char name[IFNAMSIZ];
725 if (mrt->id == RT6_TABLE_DFLT)
726 sprintf(name, "pim6reg");
727 else
728 sprintf(name, "pim6reg%u", mrt->id);
730 dev = alloc_netdev(0, name, reg_vif_setup);
731 if (dev == NULL)
732 return NULL;
734 dev_net_set(dev, net);
736 if (register_netdevice(dev)) {
737 free_netdev(dev);
738 return NULL;
740 dev->iflink = 0;
742 if (dev_open(dev))
743 goto failure;
745 dev_hold(dev);
746 return dev;
748 failure:
749 /* allow the register to be completed before unregistering. */
750 rtnl_unlock();
751 rtnl_lock();
753 unregister_netdevice(dev);
754 return NULL;
756 #endif
759 * Delete a VIF entry
762 static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
764 struct mif_device *v;
765 struct net_device *dev;
766 struct inet6_dev *in6_dev;
768 if (vifi < 0 || vifi >= mrt->maxvif)
769 return -EADDRNOTAVAIL;
771 v = &mrt->vif6_table[vifi];
773 write_lock_bh(&mrt_lock);
774 dev = v->dev;
775 v->dev = NULL;
777 if (!dev) {
778 write_unlock_bh(&mrt_lock);
779 return -EADDRNOTAVAIL;
782 #ifdef CONFIG_IPV6_PIMSM_V2
783 if (vifi == mrt->mroute_reg_vif_num)
784 mrt->mroute_reg_vif_num = -1;
785 #endif
787 if (vifi + 1 == mrt->maxvif) {
788 int tmp;
789 for (tmp = vifi - 1; tmp >= 0; tmp--) {
790 if (MIF_EXISTS(mrt, tmp))
791 break;
793 mrt->maxvif = tmp + 1;
796 write_unlock_bh(&mrt_lock);
798 dev_set_allmulti(dev, -1);
800 in6_dev = __in6_dev_get(dev);
801 if (in6_dev)
802 in6_dev->cnf.mc_forwarding--;
804 if (v->flags & MIFF_REGISTER)
805 unregister_netdevice_queue(dev, head);
807 dev_put(dev);
808 return 0;
811 static inline void ip6mr_cache_free(struct mfc6_cache *c)
813 kmem_cache_free(mrt_cachep, c);
816 /* Destroy an unresolved cache entry, killing queued skbs
817 and reporting error to netlink readers.
820 static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
822 struct net *net = read_pnet(&mrt->net);
823 struct sk_buff *skb;
825 atomic_dec(&mrt->cache_resolve_queue_len);
827 while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
828 if (ipv6_hdr(skb)->version == 0) {
829 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
830 nlh->nlmsg_type = NLMSG_ERROR;
831 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
832 skb_trim(skb, nlh->nlmsg_len);
833 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
834 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
835 } else
836 kfree_skb(skb);
839 ip6mr_cache_free(c);
843 /* Timer process for all the unresolved queue. */
845 static void ipmr_do_expire_process(struct mr6_table *mrt)
847 unsigned long now = jiffies;
848 unsigned long expires = 10 * HZ;
849 struct mfc6_cache *c, *next;
851 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
852 if (time_after(c->mfc_un.unres.expires, now)) {
853 /* not yet... */
854 unsigned long interval = c->mfc_un.unres.expires - now;
855 if (interval < expires)
856 expires = interval;
857 continue;
860 list_del(&c->list);
861 ip6mr_destroy_unres(mrt, c);
864 if (!list_empty(&mrt->mfc6_unres_queue))
865 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
868 static void ipmr_expire_process(unsigned long arg)
870 struct mr6_table *mrt = (struct mr6_table *)arg;
872 if (!spin_trylock(&mfc_unres_lock)) {
873 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
874 return;
877 if (!list_empty(&mrt->mfc6_unres_queue))
878 ipmr_do_expire_process(mrt);
880 spin_unlock(&mfc_unres_lock);
883 /* Fill oifs list. It is called under write locked mrt_lock. */
885 static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
886 unsigned char *ttls)
888 int vifi;
890 cache->mfc_un.res.minvif = MAXMIFS;
891 cache->mfc_un.res.maxvif = 0;
892 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
894 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
895 if (MIF_EXISTS(mrt, vifi) &&
896 ttls[vifi] && ttls[vifi] < 255) {
897 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
898 if (cache->mfc_un.res.minvif > vifi)
899 cache->mfc_un.res.minvif = vifi;
900 if (cache->mfc_un.res.maxvif <= vifi)
901 cache->mfc_un.res.maxvif = vifi + 1;
906 static int mif6_add(struct net *net, struct mr6_table *mrt,
907 struct mif6ctl *vifc, int mrtsock)
909 int vifi = vifc->mif6c_mifi;
910 struct mif_device *v = &mrt->vif6_table[vifi];
911 struct net_device *dev;
912 struct inet6_dev *in6_dev;
913 int err;
915 /* Is vif busy ? */
916 if (MIF_EXISTS(mrt, vifi))
917 return -EADDRINUSE;
919 switch (vifc->mif6c_flags) {
920 #ifdef CONFIG_IPV6_PIMSM_V2
921 case MIFF_REGISTER:
923 * Special Purpose VIF in PIM
924 * All the packets will be sent to the daemon
926 if (mrt->mroute_reg_vif_num >= 0)
927 return -EADDRINUSE;
928 dev = ip6mr_reg_vif(net, mrt);
929 if (!dev)
930 return -ENOBUFS;
931 err = dev_set_allmulti(dev, 1);
932 if (err) {
933 unregister_netdevice(dev);
934 dev_put(dev);
935 return err;
937 break;
938 #endif
939 case 0:
940 dev = dev_get_by_index(net, vifc->mif6c_pifi);
941 if (!dev)
942 return -EADDRNOTAVAIL;
943 err = dev_set_allmulti(dev, 1);
944 if (err) {
945 dev_put(dev);
946 return err;
948 break;
949 default:
950 return -EINVAL;
953 in6_dev = __in6_dev_get(dev);
954 if (in6_dev)
955 in6_dev->cnf.mc_forwarding++;
958 * Fill in the VIF structures
960 v->rate_limit = vifc->vifc_rate_limit;
961 v->flags = vifc->mif6c_flags;
962 if (!mrtsock)
963 v->flags |= VIFF_STATIC;
964 v->threshold = vifc->vifc_threshold;
965 v->bytes_in = 0;
966 v->bytes_out = 0;
967 v->pkt_in = 0;
968 v->pkt_out = 0;
969 v->link = dev->ifindex;
970 if (v->flags & MIFF_REGISTER)
971 v->link = dev->iflink;
973 /* And finish update writing critical data */
974 write_lock_bh(&mrt_lock);
975 v->dev = dev;
976 #ifdef CONFIG_IPV6_PIMSM_V2
977 if (v->flags & MIFF_REGISTER)
978 mrt->mroute_reg_vif_num = vifi;
979 #endif
980 if (vifi + 1 > mrt->maxvif)
981 mrt->maxvif = vifi + 1;
982 write_unlock_bh(&mrt_lock);
983 return 0;
986 static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
987 struct in6_addr *origin,
988 struct in6_addr *mcastgrp)
990 int line = MFC6_HASH(mcastgrp, origin);
991 struct mfc6_cache *c;
993 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
994 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
995 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
996 return c;
998 return NULL;
1002 * Allocate a multicast cache entry
1004 static struct mfc6_cache *ip6mr_cache_alloc(void)
1006 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1007 if (c == NULL)
1008 return NULL;
1009 c->mfc_un.res.minvif = MAXMIFS;
1010 return c;
1013 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
1015 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1016 if (c == NULL)
1017 return NULL;
1018 skb_queue_head_init(&c->mfc_un.unres.unresolved);
1019 c->mfc_un.unres.expires = jiffies + 10 * HZ;
1020 return c;
1024 * A cache entry has gone into a resolved state from queued
1027 static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1028 struct mfc6_cache *uc, struct mfc6_cache *c)
1030 struct sk_buff *skb;
1033 * Play the pending entries through our router
1036 while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1037 if (ipv6_hdr(skb)->version == 0) {
1038 int err;
1039 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
1041 if (ip6mr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
1042 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1043 } else {
1044 nlh->nlmsg_type = NLMSG_ERROR;
1045 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
1046 skb_trim(skb, nlh->nlmsg_len);
1047 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
1049 err = rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
1050 } else
1051 ip6_mr_forward(net, mrt, skb, c);
1056 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
1057 * expects the following bizarre scheme.
1059 * Called under mrt_lock.
1062 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1063 mifi_t mifi, int assert)
1065 struct sk_buff *skb;
1066 struct mrt6msg *msg;
1067 int ret;
1069 #ifdef CONFIG_IPV6_PIMSM_V2
1070 if (assert == MRT6MSG_WHOLEPKT)
1071 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1072 +sizeof(*msg));
1073 else
1074 #endif
1075 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1077 if (!skb)
1078 return -ENOBUFS;
1080 /* I suppose that internal messages
1081 * do not require checksums */
1083 skb->ip_summed = CHECKSUM_UNNECESSARY;
1085 #ifdef CONFIG_IPV6_PIMSM_V2
1086 if (assert == MRT6MSG_WHOLEPKT) {
1087 /* Ugly, but we have no choice with this interface.
1088 Duplicate old header, fix length etc.
1089 And all this only to mangle msg->im6_msgtype and
1090 to set msg->im6_mbz to "mbz" :-)
1092 skb_push(skb, -skb_network_offset(pkt));
1094 skb_push(skb, sizeof(*msg));
1095 skb_reset_transport_header(skb);
1096 msg = (struct mrt6msg *)skb_transport_header(skb);
1097 msg->im6_mbz = 0;
1098 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1099 msg->im6_mif = mrt->mroute_reg_vif_num;
1100 msg->im6_pad = 0;
1101 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
1102 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
1104 skb->ip_summed = CHECKSUM_UNNECESSARY;
1105 } else
1106 #endif
1109 * Copy the IP header
1112 skb_put(skb, sizeof(struct ipv6hdr));
1113 skb_reset_network_header(skb);
1114 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1117 * Add our header
1119 skb_put(skb, sizeof(*msg));
1120 skb_reset_transport_header(skb);
1121 msg = (struct mrt6msg *)skb_transport_header(skb);
1123 msg->im6_mbz = 0;
1124 msg->im6_msgtype = assert;
1125 msg->im6_mif = mifi;
1126 msg->im6_pad = 0;
1127 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
1128 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
1130 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1131 skb->ip_summed = CHECKSUM_UNNECESSARY;
1134 if (mrt->mroute6_sk == NULL) {
1135 kfree_skb(skb);
1136 return -EINVAL;
1140 * Deliver to user space multicast routing algorithms
1142 ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
1143 if (ret < 0) {
1144 if (net_ratelimit())
1145 printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
1146 kfree_skb(skb);
1149 return ret;
1153 * Queue a packet for resolution. It gets locked cache entry!
1156 static int
1157 ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
1159 bool found = false;
1160 int err;
1161 struct mfc6_cache *c;
1163 spin_lock_bh(&mfc_unres_lock);
1164 list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
1165 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1166 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1167 found = true;
1168 break;
1172 if (!found) {
1174 * Create a new entry if allowable
1177 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1178 (c = ip6mr_cache_alloc_unres()) == NULL) {
1179 spin_unlock_bh(&mfc_unres_lock);
1181 kfree_skb(skb);
1182 return -ENOBUFS;
1186 * Fill in the new cache entry
1188 c->mf6c_parent = -1;
1189 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1190 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1193 * Reflect first query at pim6sd
1195 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1196 if (err < 0) {
1197 /* If the report failed throw the cache entry
1198 out - Brad Parker
1200 spin_unlock_bh(&mfc_unres_lock);
1202 ip6mr_cache_free(c);
1203 kfree_skb(skb);
1204 return err;
1207 atomic_inc(&mrt->cache_resolve_queue_len);
1208 list_add(&c->list, &mrt->mfc6_unres_queue);
1210 ipmr_do_expire_process(mrt);
1214 * See if we can append the packet
1216 if (c->mfc_un.unres.unresolved.qlen > 3) {
1217 kfree_skb(skb);
1218 err = -ENOBUFS;
1219 } else {
1220 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1221 err = 0;
1224 spin_unlock_bh(&mfc_unres_lock);
1225 return err;
1229 * MFC6 cache manipulation by user space
1232 static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc)
1234 int line;
1235 struct mfc6_cache *c, *next;
1237 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1239 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
1240 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1241 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
1242 write_lock_bh(&mrt_lock);
1243 list_del(&c->list);
1244 write_unlock_bh(&mrt_lock);
1246 ip6mr_cache_free(c);
1247 return 0;
1250 return -ENOENT;
1253 static int ip6mr_device_event(struct notifier_block *this,
1254 unsigned long event, void *ptr)
1256 struct net_device *dev = ptr;
1257 struct net *net = dev_net(dev);
1258 struct mr6_table *mrt;
1259 struct mif_device *v;
1260 int ct;
1261 LIST_HEAD(list);
1263 if (event != NETDEV_UNREGISTER)
1264 return NOTIFY_DONE;
1266 ip6mr_for_each_table(mrt, net) {
1267 v = &mrt->vif6_table[0];
1268 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1269 if (v->dev == dev)
1270 mif6_delete(mrt, ct, &list);
1273 unregister_netdevice_many(&list);
1275 return NOTIFY_DONE;
1278 static struct notifier_block ip6_mr_notifier = {
1279 .notifier_call = ip6mr_device_event
1283 * Setup for IP multicast routing
1286 static int __net_init ip6mr_net_init(struct net *net)
1288 int err;
1290 err = ip6mr_rules_init(net);
1291 if (err < 0)
1292 goto fail;
1294 #ifdef CONFIG_PROC_FS
1295 err = -ENOMEM;
1296 if (!proc_net_fops_create(net, "ip6_mr_vif", 0, &ip6mr_vif_fops))
1297 goto proc_vif_fail;
1298 if (!proc_net_fops_create(net, "ip6_mr_cache", 0, &ip6mr_mfc_fops))
1299 goto proc_cache_fail;
1300 #endif
1302 return 0;
1304 #ifdef CONFIG_PROC_FS
1305 proc_cache_fail:
1306 proc_net_remove(net, "ip6_mr_vif");
1307 proc_vif_fail:
1308 ip6mr_rules_exit(net);
1309 #endif
1310 fail:
1311 return err;
1314 static void __net_exit ip6mr_net_exit(struct net *net)
1316 #ifdef CONFIG_PROC_FS
1317 proc_net_remove(net, "ip6_mr_cache");
1318 proc_net_remove(net, "ip6_mr_vif");
1319 #endif
1320 ip6mr_rules_exit(net);
1323 static struct pernet_operations ip6mr_net_ops = {
1324 .init = ip6mr_net_init,
1325 .exit = ip6mr_net_exit,
1328 int __init ip6_mr_init(void)
1330 int err;
1332 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1333 sizeof(struct mfc6_cache),
1334 0, SLAB_HWCACHE_ALIGN,
1335 NULL);
1336 if (!mrt_cachep)
1337 return -ENOMEM;
1339 err = register_pernet_subsys(&ip6mr_net_ops);
1340 if (err)
1341 goto reg_pernet_fail;
1343 err = register_netdevice_notifier(&ip6_mr_notifier);
1344 if (err)
1345 goto reg_notif_fail;
1346 #ifdef CONFIG_IPV6_PIMSM_V2
1347 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1348 printk(KERN_ERR "ip6_mr_init: can't add PIM protocol\n");
1349 err = -EAGAIN;
1350 goto add_proto_fail;
1352 #endif
1353 return 0;
1354 #ifdef CONFIG_IPV6_PIMSM_V2
1355 add_proto_fail:
1356 unregister_netdevice_notifier(&ip6_mr_notifier);
1357 #endif
1358 reg_notif_fail:
1359 unregister_pernet_subsys(&ip6mr_net_ops);
1360 reg_pernet_fail:
1361 kmem_cache_destroy(mrt_cachep);
1362 return err;
1365 void ip6_mr_cleanup(void)
1367 unregister_netdevice_notifier(&ip6_mr_notifier);
1368 unregister_pernet_subsys(&ip6mr_net_ops);
1369 kmem_cache_destroy(mrt_cachep);
1372 static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1373 struct mf6cctl *mfc, int mrtsock)
1375 bool found = false;
1376 int line;
1377 struct mfc6_cache *uc, *c;
1378 unsigned char ttls[MAXMIFS];
1379 int i;
1381 if (mfc->mf6cc_parent >= MAXMIFS)
1382 return -ENFILE;
1384 memset(ttls, 255, MAXMIFS);
1385 for (i = 0; i < MAXMIFS; i++) {
1386 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1387 ttls[i] = 1;
1391 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1393 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1394 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1395 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
1396 found = true;
1397 break;
1401 if (found) {
1402 write_lock_bh(&mrt_lock);
1403 c->mf6c_parent = mfc->mf6cc_parent;
1404 ip6mr_update_thresholds(mrt, c, ttls);
1405 if (!mrtsock)
1406 c->mfc_flags |= MFC_STATIC;
1407 write_unlock_bh(&mrt_lock);
1408 return 0;
1411 if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1412 return -EINVAL;
1414 c = ip6mr_cache_alloc();
1415 if (c == NULL)
1416 return -ENOMEM;
1418 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1419 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1420 c->mf6c_parent = mfc->mf6cc_parent;
1421 ip6mr_update_thresholds(mrt, c, ttls);
1422 if (!mrtsock)
1423 c->mfc_flags |= MFC_STATIC;
1425 write_lock_bh(&mrt_lock);
1426 list_add(&c->list, &mrt->mfc6_cache_array[line]);
1427 write_unlock_bh(&mrt_lock);
1430 * Check to see if we resolved a queued list. If so we
1431 * need to send on the frames and tidy up.
1433 found = false;
1434 spin_lock_bh(&mfc_unres_lock);
1435 list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
1436 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1437 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1438 list_del(&uc->list);
1439 atomic_dec(&mrt->cache_resolve_queue_len);
1440 found = true;
1441 break;
1444 if (list_empty(&mrt->mfc6_unres_queue))
1445 del_timer(&mrt->ipmr_expire_timer);
1446 spin_unlock_bh(&mfc_unres_lock);
1448 if (found) {
1449 ip6mr_cache_resolve(net, mrt, uc, c);
1450 ip6mr_cache_free(uc);
1452 return 0;
1456 * Close the multicast socket, and clear the vif tables etc
1459 static void mroute_clean_tables(struct mr6_table *mrt)
1461 int i;
1462 LIST_HEAD(list);
1463 struct mfc6_cache *c, *next;
1466 * Shut down all active vif entries
1468 for (i = 0; i < mrt->maxvif; i++) {
1469 if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
1470 mif6_delete(mrt, i, &list);
1472 unregister_netdevice_many(&list);
1475 * Wipe the cache
1477 for (i = 0; i < MFC6_LINES; i++) {
1478 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1479 if (c->mfc_flags & MFC_STATIC)
1480 continue;
1481 write_lock_bh(&mrt_lock);
1482 list_del(&c->list);
1483 write_unlock_bh(&mrt_lock);
1485 ip6mr_cache_free(c);
1489 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1490 spin_lock_bh(&mfc_unres_lock);
1491 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
1492 list_del(&c->list);
1493 ip6mr_destroy_unres(mrt, c);
1495 spin_unlock_bh(&mfc_unres_lock);
1499 static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
1501 int err = 0;
1502 struct net *net = sock_net(sk);
1504 rtnl_lock();
1505 write_lock_bh(&mrt_lock);
1506 if (likely(mrt->mroute6_sk == NULL)) {
1507 mrt->mroute6_sk = sk;
1508 net->ipv6.devconf_all->mc_forwarding++;
1510 else
1511 err = -EADDRINUSE;
1512 write_unlock_bh(&mrt_lock);
1514 rtnl_unlock();
1516 return err;
1519 int ip6mr_sk_done(struct sock *sk)
1521 int err = -EACCES;
1522 struct net *net = sock_net(sk);
1523 struct mr6_table *mrt;
1525 rtnl_lock();
1526 ip6mr_for_each_table(mrt, net) {
1527 if (sk == mrt->mroute6_sk) {
1528 write_lock_bh(&mrt_lock);
1529 mrt->mroute6_sk = NULL;
1530 net->ipv6.devconf_all->mc_forwarding--;
1531 write_unlock_bh(&mrt_lock);
1533 mroute_clean_tables(mrt);
1534 err = 0;
1535 break;
1538 rtnl_unlock();
1540 return err;
1543 struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
1545 struct mr6_table *mrt;
1546 struct flowi fl = {
1547 .iif = skb->skb_iif,
1548 .oif = skb->dev->ifindex,
1549 .mark = skb->mark,
1552 if (ip6mr_fib_lookup(net, &fl, &mrt) < 0)
1553 return NULL;
1555 return mrt->mroute6_sk;
1559 * Socket options and virtual interface manipulation. The whole
1560 * virtual interface system is a complete heap, but unfortunately
1561 * that's how BSD mrouted happens to think. Maybe one day with a proper
1562 * MOSPF/PIM router set up we can clean this up.
1565 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1567 int ret;
1568 struct mif6ctl vif;
1569 struct mf6cctl mfc;
1570 mifi_t mifi;
1571 struct net *net = sock_net(sk);
1572 struct mr6_table *mrt;
1574 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1575 if (mrt == NULL)
1576 return -ENOENT;
1578 if (optname != MRT6_INIT) {
1579 if (sk != mrt->mroute6_sk && !capable(CAP_NET_ADMIN))
1580 return -EACCES;
1583 switch (optname) {
1584 case MRT6_INIT:
1585 if (sk->sk_type != SOCK_RAW ||
1586 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1587 return -EOPNOTSUPP;
1588 if (optlen < sizeof(int))
1589 return -EINVAL;
1591 return ip6mr_sk_init(mrt, sk);
1593 case MRT6_DONE:
1594 return ip6mr_sk_done(sk);
1596 case MRT6_ADD_MIF:
1597 if (optlen < sizeof(vif))
1598 return -EINVAL;
1599 if (copy_from_user(&vif, optval, sizeof(vif)))
1600 return -EFAULT;
1601 if (vif.mif6c_mifi >= MAXMIFS)
1602 return -ENFILE;
1603 rtnl_lock();
1604 ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
1605 rtnl_unlock();
1606 return ret;
1608 case MRT6_DEL_MIF:
1609 if (optlen < sizeof(mifi_t))
1610 return -EINVAL;
1611 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1612 return -EFAULT;
1613 rtnl_lock();
1614 ret = mif6_delete(mrt, mifi, NULL);
1615 rtnl_unlock();
1616 return ret;
1619 * Manipulate the forwarding caches. These live
1620 * in a sort of kernel/user symbiosis.
1622 case MRT6_ADD_MFC:
1623 case MRT6_DEL_MFC:
1624 if (optlen < sizeof(mfc))
1625 return -EINVAL;
1626 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1627 return -EFAULT;
1628 rtnl_lock();
1629 if (optname == MRT6_DEL_MFC)
1630 ret = ip6mr_mfc_delete(mrt, &mfc);
1631 else
1632 ret = ip6mr_mfc_add(net, mrt, &mfc, sk == mrt->mroute6_sk);
1633 rtnl_unlock();
1634 return ret;
1637 * Control PIM assert (to activate pim will activate assert)
1639 case MRT6_ASSERT:
1641 int v;
1642 if (get_user(v, (int __user *)optval))
1643 return -EFAULT;
1644 mrt->mroute_do_assert = !!v;
1645 return 0;
1648 #ifdef CONFIG_IPV6_PIMSM_V2
1649 case MRT6_PIM:
1651 int v;
1652 if (get_user(v, (int __user *)optval))
1653 return -EFAULT;
1654 v = !!v;
1655 rtnl_lock();
1656 ret = 0;
1657 if (v != mrt->mroute_do_pim) {
1658 mrt->mroute_do_pim = v;
1659 mrt->mroute_do_assert = v;
1661 rtnl_unlock();
1662 return ret;
1665 #endif
1666 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1667 case MRT6_TABLE:
1669 u32 v;
1671 if (optlen != sizeof(u32))
1672 return -EINVAL;
1673 if (get_user(v, (u32 __user *)optval))
1674 return -EFAULT;
1675 if (sk == mrt->mroute6_sk)
1676 return -EBUSY;
1678 rtnl_lock();
1679 ret = 0;
1680 if (!ip6mr_new_table(net, v))
1681 ret = -ENOMEM;
1682 raw6_sk(sk)->ip6mr_table = v;
1683 rtnl_unlock();
1684 return ret;
1686 #endif
1688 * Spurious command, or MRT6_VERSION which you cannot
1689 * set.
1691 default:
1692 return -ENOPROTOOPT;
1697 * Getsock opt support for the multicast routing system.
1700 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1701 int __user *optlen)
1703 int olr;
1704 int val;
1705 struct net *net = sock_net(sk);
1706 struct mr6_table *mrt;
1708 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1709 if (mrt == NULL)
1710 return -ENOENT;
1712 switch (optname) {
1713 case MRT6_VERSION:
1714 val = 0x0305;
1715 break;
1716 #ifdef CONFIG_IPV6_PIMSM_V2
1717 case MRT6_PIM:
1718 val = mrt->mroute_do_pim;
1719 break;
1720 #endif
1721 case MRT6_ASSERT:
1722 val = mrt->mroute_do_assert;
1723 break;
1724 default:
1725 return -ENOPROTOOPT;
1728 if (get_user(olr, optlen))
1729 return -EFAULT;
1731 olr = min_t(int, olr, sizeof(int));
1732 if (olr < 0)
1733 return -EINVAL;
1735 if (put_user(olr, optlen))
1736 return -EFAULT;
1737 if (copy_to_user(optval, &val, olr))
1738 return -EFAULT;
1739 return 0;
1743 * The IP multicast ioctl support routines.
1746 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1748 struct sioc_sg_req6 sr;
1749 struct sioc_mif_req6 vr;
1750 struct mif_device *vif;
1751 struct mfc6_cache *c;
1752 struct net *net = sock_net(sk);
1753 struct mr6_table *mrt;
1755 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1756 if (mrt == NULL)
1757 return -ENOENT;
1759 switch (cmd) {
1760 case SIOCGETMIFCNT_IN6:
1761 if (copy_from_user(&vr, arg, sizeof(vr)))
1762 return -EFAULT;
1763 if (vr.mifi >= mrt->maxvif)
1764 return -EINVAL;
1765 read_lock(&mrt_lock);
1766 vif = &mrt->vif6_table[vr.mifi];
1767 if (MIF_EXISTS(mrt, vr.mifi)) {
1768 vr.icount = vif->pkt_in;
1769 vr.ocount = vif->pkt_out;
1770 vr.ibytes = vif->bytes_in;
1771 vr.obytes = vif->bytes_out;
1772 read_unlock(&mrt_lock);
1774 if (copy_to_user(arg, &vr, sizeof(vr)))
1775 return -EFAULT;
1776 return 0;
1778 read_unlock(&mrt_lock);
1779 return -EADDRNOTAVAIL;
1780 case SIOCGETSGCNT_IN6:
1781 if (copy_from_user(&sr, arg, sizeof(sr)))
1782 return -EFAULT;
1784 read_lock(&mrt_lock);
1785 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1786 if (c) {
1787 sr.pktcnt = c->mfc_un.res.pkt;
1788 sr.bytecnt = c->mfc_un.res.bytes;
1789 sr.wrong_if = c->mfc_un.res.wrong_if;
1790 read_unlock(&mrt_lock);
1792 if (copy_to_user(arg, &sr, sizeof(sr)))
1793 return -EFAULT;
1794 return 0;
1796 read_unlock(&mrt_lock);
1797 return -EADDRNOTAVAIL;
1798 default:
1799 return -ENOIOCTLCMD;
1804 static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1806 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1807 IPSTATS_MIB_OUTFORWDATAGRAMS);
1808 return dst_output(skb);
1812 * Processing handlers for ip6mr_forward
1815 static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
1816 struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1818 struct ipv6hdr *ipv6h;
1819 struct mif_device *vif = &mrt->vif6_table[vifi];
1820 struct net_device *dev;
1821 struct dst_entry *dst;
1822 struct flowi fl;
1824 if (vif->dev == NULL)
1825 goto out_free;
1827 #ifdef CONFIG_IPV6_PIMSM_V2
1828 if (vif->flags & MIFF_REGISTER) {
1829 vif->pkt_out++;
1830 vif->bytes_out += skb->len;
1831 vif->dev->stats.tx_bytes += skb->len;
1832 vif->dev->stats.tx_packets++;
1833 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
1834 goto out_free;
1836 #endif
1838 ipv6h = ipv6_hdr(skb);
1840 fl = (struct flowi) {
1841 .oif = vif->link,
1842 .nl_u = { .ip6_u =
1843 { .daddr = ipv6h->daddr, }
1847 dst = ip6_route_output(net, NULL, &fl);
1848 if (!dst)
1849 goto out_free;
1851 skb_dst_drop(skb);
1852 skb_dst_set(skb, dst);
1855 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1856 * not only before forwarding, but after forwarding on all output
1857 * interfaces. It is clear, if mrouter runs a multicasting
1858 * program, it should receive packets not depending to what interface
1859 * program is joined.
1860 * If we will not make it, the program will have to join on all
1861 * interfaces. On the other hand, multihoming host (or router, but
1862 * not mrouter) cannot join to more than one interface - it will
1863 * result in receiving multiple packets.
1865 dev = vif->dev;
1866 skb->dev = dev;
1867 vif->pkt_out++;
1868 vif->bytes_out += skb->len;
1870 /* We are about to write */
1871 /* XXX: extension headers? */
1872 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
1873 goto out_free;
1875 ipv6h = ipv6_hdr(skb);
1876 ipv6h->hop_limit--;
1878 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
1880 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev,
1881 ip6mr_forward2_finish);
1883 out_free:
1884 kfree_skb(skb);
1885 return 0;
1888 static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
1890 int ct;
1892 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
1893 if (mrt->vif6_table[ct].dev == dev)
1894 break;
1896 return ct;
1899 static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
1900 struct sk_buff *skb, struct mfc6_cache *cache)
1902 int psend = -1;
1903 int vif, ct;
1905 vif = cache->mf6c_parent;
1906 cache->mfc_un.res.pkt++;
1907 cache->mfc_un.res.bytes += skb->len;
1910 * Wrong interface: drop packet and (maybe) send PIM assert.
1912 if (mrt->vif6_table[vif].dev != skb->dev) {
1913 int true_vifi;
1915 cache->mfc_un.res.wrong_if++;
1916 true_vifi = ip6mr_find_vif(mrt, skb->dev);
1918 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1919 /* pimsm uses asserts, when switching from RPT to SPT,
1920 so that we cannot check that packet arrived on an oif.
1921 It is bad, but otherwise we would need to move pretty
1922 large chunk of pimd to kernel. Ough... --ANK
1924 (mrt->mroute_do_pim ||
1925 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1926 time_after(jiffies,
1927 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1928 cache->mfc_un.res.last_assert = jiffies;
1929 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
1931 goto dont_forward;
1934 mrt->vif6_table[vif].pkt_in++;
1935 mrt->vif6_table[vif].bytes_in += skb->len;
1938 * Forward the frame
1940 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
1941 if (ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
1942 if (psend != -1) {
1943 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1944 if (skb2)
1945 ip6mr_forward2(net, mrt, skb2, cache, psend);
1947 psend = ct;
1950 if (psend != -1) {
1951 ip6mr_forward2(net, mrt, skb, cache, psend);
1952 return 0;
1955 dont_forward:
1956 kfree_skb(skb);
1957 return 0;
1962 * Multicast packets for forwarding arrive here
1965 int ip6_mr_input(struct sk_buff *skb)
1967 struct mfc6_cache *cache;
1968 struct net *net = dev_net(skb->dev);
1969 struct mr6_table *mrt;
1970 struct flowi fl = {
1971 .iif = skb->dev->ifindex,
1972 .mark = skb->mark,
1974 int err;
1976 err = ip6mr_fib_lookup(net, &fl, &mrt);
1977 if (err < 0)
1978 return err;
1980 read_lock(&mrt_lock);
1981 cache = ip6mr_cache_find(mrt,
1982 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
1985 * No usable cache entry
1987 if (cache == NULL) {
1988 int vif;
1990 vif = ip6mr_find_vif(mrt, skb->dev);
1991 if (vif >= 0) {
1992 int err = ip6mr_cache_unresolved(mrt, vif, skb);
1993 read_unlock(&mrt_lock);
1995 return err;
1997 read_unlock(&mrt_lock);
1998 kfree_skb(skb);
1999 return -ENODEV;
2002 ip6_mr_forward(net, mrt, skb, cache);
2004 read_unlock(&mrt_lock);
2006 return 0;
2010 static int
2011 ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2012 struct mfc6_cache *c, struct rtmsg *rtm)
2014 int ct;
2015 struct rtnexthop *nhp;
2016 u8 *b = skb_tail_pointer(skb);
2017 struct rtattr *mp_head;
2019 /* If cache is unresolved, don't try to parse IIF and OIF */
2020 if (c->mf6c_parent > MAXMIFS)
2021 return -ENOENT;
2023 if (MIF_EXISTS(mrt, c->mf6c_parent))
2024 RTA_PUT(skb, RTA_IIF, 4, &mrt->vif6_table[c->mf6c_parent].dev->ifindex);
2026 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
2028 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2029 if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2030 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
2031 goto rtattr_failure;
2032 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
2033 nhp->rtnh_flags = 0;
2034 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2035 nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
2036 nhp->rtnh_len = sizeof(*nhp);
2039 mp_head->rta_type = RTA_MULTIPATH;
2040 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
2041 rtm->rtm_type = RTN_MULTICAST;
2042 return 1;
2044 rtattr_failure:
2045 nlmsg_trim(skb, b);
2046 return -EMSGSIZE;
2049 int ip6mr_get_route(struct net *net,
2050 struct sk_buff *skb, struct rtmsg *rtm, int nowait)
2052 int err;
2053 struct mr6_table *mrt;
2054 struct mfc6_cache *cache;
2055 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2057 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2058 if (mrt == NULL)
2059 return -ENOENT;
2061 read_lock(&mrt_lock);
2062 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2064 if (!cache) {
2065 struct sk_buff *skb2;
2066 struct ipv6hdr *iph;
2067 struct net_device *dev;
2068 int vif;
2070 if (nowait) {
2071 read_unlock(&mrt_lock);
2072 return -EAGAIN;
2075 dev = skb->dev;
2076 if (dev == NULL || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2077 read_unlock(&mrt_lock);
2078 return -ENODEV;
2081 /* really correct? */
2082 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2083 if (!skb2) {
2084 read_unlock(&mrt_lock);
2085 return -ENOMEM;
2088 skb_reset_transport_header(skb2);
2090 skb_put(skb2, sizeof(struct ipv6hdr));
2091 skb_reset_network_header(skb2);
2093 iph = ipv6_hdr(skb2);
2094 iph->version = 0;
2095 iph->priority = 0;
2096 iph->flow_lbl[0] = 0;
2097 iph->flow_lbl[1] = 0;
2098 iph->flow_lbl[2] = 0;
2099 iph->payload_len = 0;
2100 iph->nexthdr = IPPROTO_NONE;
2101 iph->hop_limit = 0;
2102 ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
2103 ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
2105 err = ip6mr_cache_unresolved(mrt, vif, skb2);
2106 read_unlock(&mrt_lock);
2108 return err;
2111 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
2112 cache->mfc_flags |= MFC_NOTIFY;
2114 err = ip6mr_fill_mroute(mrt, skb, cache, rtm);
2115 read_unlock(&mrt_lock);
2116 return err;