2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <asm/system.h>
20 #include <asm/uaccess.h>
21 #include <linux/types.h>
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/timer.h>
26 #include <linux/kernel.h>
27 #include <linux/fcntl.h>
28 #include <linux/stat.h>
29 #include <linux/socket.h>
30 #include <linux/inet.h>
31 #include <linux/netdevice.h>
32 #include <linux/inetdevice.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/compat.h>
38 #include <net/protocol.h>
39 #include <linux/skbuff.h>
42 #include <linux/notifier.h>
43 #include <linux/if_arp.h>
44 #include <net/checksum.h>
45 #include <net/netlink.h>
46 #include <net/fib_rules.h>
49 #include <net/ip6_route.h>
50 #include <linux/mroute6.h>
51 #include <linux/pim.h>
52 #include <net/addrconf.h>
53 #include <linux/netfilter_ipv6.h>
54 #include <net/ip6_checksum.h>
57 struct list_head list
;
62 struct sock
*mroute6_sk
;
63 struct timer_list ipmr_expire_timer
;
64 struct list_head mfc6_unres_queue
;
65 struct list_head mfc6_cache_array
[MFC6_LINES
];
66 struct mif_device vif6_table
[MAXMIFS
];
68 atomic_t cache_resolve_queue_len
;
71 #ifdef CONFIG_IPV6_PIMSM_V2
72 int mroute_reg_vif_num
;
77 struct fib_rule common
;
81 struct mr6_table
*mrt
;
84 /* Big lock, protecting vif table, mrt cache and mroute socket state.
85 Note that the changes are semaphored via rtnl_lock.
88 static DEFINE_RWLOCK(mrt_lock
);
91 * Multicast router control variables
94 #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
96 /* Special spinlock for queue of unresolved entries */
97 static DEFINE_SPINLOCK(mfc_unres_lock
);
99 /* We return to original Alan's scheme. Hash table of resolved
100 entries is changed only in process context and protected
101 with weak lock mrt_lock. Queue of unresolved entries is protected
102 with strong spinlock mfc_unres_lock.
104 In this case data path is free of exclusive locks at all.
107 static struct kmem_cache
*mrt_cachep __read_mostly
;
109 static struct mr6_table
*ip6mr_new_table(struct net
*net
, u32 id
);
110 static void ip6mr_free_table(struct mr6_table
*mrt
);
112 static int ip6_mr_forward(struct net
*net
, struct mr6_table
*mrt
,
113 struct sk_buff
*skb
, struct mfc6_cache
*cache
);
114 static int ip6mr_cache_report(struct mr6_table
*mrt
, struct sk_buff
*pkt
,
115 mifi_t mifi
, int assert);
116 static int __ip6mr_fill_mroute(struct mr6_table
*mrt
, struct sk_buff
*skb
,
117 struct mfc6_cache
*c
, struct rtmsg
*rtm
);
118 static int ip6mr_rtm_dumproute(struct sk_buff
*skb
,
119 struct netlink_callback
*cb
);
120 static void mroute_clean_tables(struct mr6_table
*mrt
);
121 static void ipmr_expire_process(unsigned long arg
);
123 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
124 #define ip6mr_for_each_table(mrt, net) \
125 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
127 static struct mr6_table
*ip6mr_get_table(struct net
*net
, u32 id
)
129 struct mr6_table
*mrt
;
131 ip6mr_for_each_table(mrt
, net
) {
138 static int ip6mr_fib_lookup(struct net
*net
, struct flowi6
*flp6
,
139 struct mr6_table
**mrt
)
141 struct ip6mr_result res
;
142 struct fib_lookup_arg arg
= { .result
= &res
, };
145 err
= fib_rules_lookup(net
->ipv6
.mr6_rules_ops
,
146 flowi6_to_flowi(flp6
), 0, &arg
);
153 static int ip6mr_rule_action(struct fib_rule
*rule
, struct flowi
*flp
,
154 int flags
, struct fib_lookup_arg
*arg
)
156 struct ip6mr_result
*res
= arg
->result
;
157 struct mr6_table
*mrt
;
159 switch (rule
->action
) {
162 case FR_ACT_UNREACHABLE
:
164 case FR_ACT_PROHIBIT
:
166 case FR_ACT_BLACKHOLE
:
171 mrt
= ip6mr_get_table(rule
->fr_net
, rule
->table
);
178 static int ip6mr_rule_match(struct fib_rule
*rule
, struct flowi
*flp
, int flags
)
183 static const struct nla_policy ip6mr_rule_policy
[FRA_MAX
+ 1] = {
187 static int ip6mr_rule_configure(struct fib_rule
*rule
, struct sk_buff
*skb
,
188 struct fib_rule_hdr
*frh
, struct nlattr
**tb
)
193 static int ip6mr_rule_compare(struct fib_rule
*rule
, struct fib_rule_hdr
*frh
,
199 static int ip6mr_rule_fill(struct fib_rule
*rule
, struct sk_buff
*skb
,
200 struct fib_rule_hdr
*frh
)
208 static const struct fib_rules_ops __net_initdata ip6mr_rules_ops_template
= {
209 .family
= RTNL_FAMILY_IP6MR
,
210 .rule_size
= sizeof(struct ip6mr_rule
),
211 .addr_size
= sizeof(struct in6_addr
),
212 .action
= ip6mr_rule_action
,
213 .match
= ip6mr_rule_match
,
214 .configure
= ip6mr_rule_configure
,
215 .compare
= ip6mr_rule_compare
,
216 .default_pref
= fib_default_rule_pref
,
217 .fill
= ip6mr_rule_fill
,
218 .nlgroup
= RTNLGRP_IPV6_RULE
,
219 .policy
= ip6mr_rule_policy
,
220 .owner
= THIS_MODULE
,
223 static int __net_init
ip6mr_rules_init(struct net
*net
)
225 struct fib_rules_ops
*ops
;
226 struct mr6_table
*mrt
;
229 ops
= fib_rules_register(&ip6mr_rules_ops_template
, net
);
233 INIT_LIST_HEAD(&net
->ipv6
.mr6_tables
);
235 mrt
= ip6mr_new_table(net
, RT6_TABLE_DFLT
);
241 err
= fib_default_rule_add(ops
, 0x7fff, RT6_TABLE_DFLT
, 0);
245 net
->ipv6
.mr6_rules_ops
= ops
;
251 fib_rules_unregister(ops
);
255 static void __net_exit
ip6mr_rules_exit(struct net
*net
)
257 struct mr6_table
*mrt
, *next
;
259 list_for_each_entry_safe(mrt
, next
, &net
->ipv6
.mr6_tables
, list
) {
260 list_del(&mrt
->list
);
261 ip6mr_free_table(mrt
);
263 fib_rules_unregister(net
->ipv6
.mr6_rules_ops
);
266 #define ip6mr_for_each_table(mrt, net) \
267 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
269 static struct mr6_table
*ip6mr_get_table(struct net
*net
, u32 id
)
271 return net
->ipv6
.mrt6
;
274 static int ip6mr_fib_lookup(struct net
*net
, struct flowi6
*flp6
,
275 struct mr6_table
**mrt
)
277 *mrt
= net
->ipv6
.mrt6
;
281 static int __net_init
ip6mr_rules_init(struct net
*net
)
283 net
->ipv6
.mrt6
= ip6mr_new_table(net
, RT6_TABLE_DFLT
);
284 return net
->ipv6
.mrt6
? 0 : -ENOMEM
;
287 static void __net_exit
ip6mr_rules_exit(struct net
*net
)
289 ip6mr_free_table(net
->ipv6
.mrt6
);
293 static struct mr6_table
*ip6mr_new_table(struct net
*net
, u32 id
)
295 struct mr6_table
*mrt
;
298 mrt
= ip6mr_get_table(net
, id
);
302 mrt
= kzalloc(sizeof(*mrt
), GFP_KERNEL
);
306 write_pnet(&mrt
->net
, net
);
308 /* Forwarding cache */
309 for (i
= 0; i
< MFC6_LINES
; i
++)
310 INIT_LIST_HEAD(&mrt
->mfc6_cache_array
[i
]);
312 INIT_LIST_HEAD(&mrt
->mfc6_unres_queue
);
314 setup_timer(&mrt
->ipmr_expire_timer
, ipmr_expire_process
,
317 #ifdef CONFIG_IPV6_PIMSM_V2
318 mrt
->mroute_reg_vif_num
= -1;
320 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
321 list_add_tail_rcu(&mrt
->list
, &net
->ipv6
.mr6_tables
);
326 static void ip6mr_free_table(struct mr6_table
*mrt
)
328 del_timer(&mrt
->ipmr_expire_timer
);
329 mroute_clean_tables(mrt
);
333 #ifdef CONFIG_PROC_FS
335 struct ipmr_mfc_iter
{
336 struct seq_net_private p
;
337 struct mr6_table
*mrt
;
338 struct list_head
*cache
;
343 static struct mfc6_cache
*ipmr_mfc_seq_idx(struct net
*net
,
344 struct ipmr_mfc_iter
*it
, loff_t pos
)
346 struct mr6_table
*mrt
= it
->mrt
;
347 struct mfc6_cache
*mfc
;
349 read_lock(&mrt_lock
);
350 for (it
->ct
= 0; it
->ct
< MFC6_LINES
; it
->ct
++) {
351 it
->cache
= &mrt
->mfc6_cache_array
[it
->ct
];
352 list_for_each_entry(mfc
, it
->cache
, list
)
356 read_unlock(&mrt_lock
);
358 spin_lock_bh(&mfc_unres_lock
);
359 it
->cache
= &mrt
->mfc6_unres_queue
;
360 list_for_each_entry(mfc
, it
->cache
, list
)
363 spin_unlock_bh(&mfc_unres_lock
);
370 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
373 struct ipmr_vif_iter
{
374 struct seq_net_private p
;
375 struct mr6_table
*mrt
;
379 static struct mif_device
*ip6mr_vif_seq_idx(struct net
*net
,
380 struct ipmr_vif_iter
*iter
,
383 struct mr6_table
*mrt
= iter
->mrt
;
385 for (iter
->ct
= 0; iter
->ct
< mrt
->maxvif
; ++iter
->ct
) {
386 if (!MIF_EXISTS(mrt
, iter
->ct
))
389 return &mrt
->vif6_table
[iter
->ct
];
394 static void *ip6mr_vif_seq_start(struct seq_file
*seq
, loff_t
*pos
)
397 struct ipmr_vif_iter
*iter
= seq
->private;
398 struct net
*net
= seq_file_net(seq
);
399 struct mr6_table
*mrt
;
401 mrt
= ip6mr_get_table(net
, RT6_TABLE_DFLT
);
403 return ERR_PTR(-ENOENT
);
407 read_lock(&mrt_lock
);
408 return *pos
? ip6mr_vif_seq_idx(net
, seq
->private, *pos
- 1)
412 static void *ip6mr_vif_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
414 struct ipmr_vif_iter
*iter
= seq
->private;
415 struct net
*net
= seq_file_net(seq
);
416 struct mr6_table
*mrt
= iter
->mrt
;
419 if (v
== SEQ_START_TOKEN
)
420 return ip6mr_vif_seq_idx(net
, iter
, 0);
422 while (++iter
->ct
< mrt
->maxvif
) {
423 if (!MIF_EXISTS(mrt
, iter
->ct
))
425 return &mrt
->vif6_table
[iter
->ct
];
430 static void ip6mr_vif_seq_stop(struct seq_file
*seq
, void *v
)
433 read_unlock(&mrt_lock
);
436 static int ip6mr_vif_seq_show(struct seq_file
*seq
, void *v
)
438 struct ipmr_vif_iter
*iter
= seq
->private;
439 struct mr6_table
*mrt
= iter
->mrt
;
441 if (v
== SEQ_START_TOKEN
) {
443 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
445 const struct mif_device
*vif
= v
;
446 const char *name
= vif
->dev
? vif
->dev
->name
: "none";
449 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
450 vif
- mrt
->vif6_table
,
451 name
, vif
->bytes_in
, vif
->pkt_in
,
452 vif
->bytes_out
, vif
->pkt_out
,
458 static const struct seq_operations ip6mr_vif_seq_ops
= {
459 .start
= ip6mr_vif_seq_start
,
460 .next
= ip6mr_vif_seq_next
,
461 .stop
= ip6mr_vif_seq_stop
,
462 .show
= ip6mr_vif_seq_show
,
465 static int ip6mr_vif_open(struct inode
*inode
, struct file
*file
)
467 return seq_open_net(inode
, file
, &ip6mr_vif_seq_ops
,
468 sizeof(struct ipmr_vif_iter
));
471 static const struct file_operations ip6mr_vif_fops
= {
472 .owner
= THIS_MODULE
,
473 .open
= ip6mr_vif_open
,
476 .release
= seq_release_net
,
479 static void *ipmr_mfc_seq_start(struct seq_file
*seq
, loff_t
*pos
)
481 struct ipmr_mfc_iter
*it
= seq
->private;
482 struct net
*net
= seq_file_net(seq
);
483 struct mr6_table
*mrt
;
485 mrt
= ip6mr_get_table(net
, RT6_TABLE_DFLT
);
487 return ERR_PTR(-ENOENT
);
490 return *pos
? ipmr_mfc_seq_idx(net
, seq
->private, *pos
- 1)
494 static void *ipmr_mfc_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
496 struct mfc6_cache
*mfc
= v
;
497 struct ipmr_mfc_iter
*it
= seq
->private;
498 struct net
*net
= seq_file_net(seq
);
499 struct mr6_table
*mrt
= it
->mrt
;
503 if (v
== SEQ_START_TOKEN
)
504 return ipmr_mfc_seq_idx(net
, seq
->private, 0);
506 if (mfc
->list
.next
!= it
->cache
)
507 return list_entry(mfc
->list
.next
, struct mfc6_cache
, list
);
509 if (it
->cache
== &mrt
->mfc6_unres_queue
)
512 BUG_ON(it
->cache
!= &mrt
->mfc6_cache_array
[it
->ct
]);
514 while (++it
->ct
< MFC6_LINES
) {
515 it
->cache
= &mrt
->mfc6_cache_array
[it
->ct
];
516 if (list_empty(it
->cache
))
518 return list_first_entry(it
->cache
, struct mfc6_cache
, list
);
521 /* exhausted cache_array, show unresolved */
522 read_unlock(&mrt_lock
);
523 it
->cache
= &mrt
->mfc6_unres_queue
;
526 spin_lock_bh(&mfc_unres_lock
);
527 if (!list_empty(it
->cache
))
528 return list_first_entry(it
->cache
, struct mfc6_cache
, list
);
531 spin_unlock_bh(&mfc_unres_lock
);
537 static void ipmr_mfc_seq_stop(struct seq_file
*seq
, void *v
)
539 struct ipmr_mfc_iter
*it
= seq
->private;
540 struct mr6_table
*mrt
= it
->mrt
;
542 if (it
->cache
== &mrt
->mfc6_unres_queue
)
543 spin_unlock_bh(&mfc_unres_lock
);
544 else if (it
->cache
== mrt
->mfc6_cache_array
)
545 read_unlock(&mrt_lock
);
548 static int ipmr_mfc_seq_show(struct seq_file
*seq
, void *v
)
552 if (v
== SEQ_START_TOKEN
) {
556 "Iif Pkts Bytes Wrong Oifs\n");
558 const struct mfc6_cache
*mfc
= v
;
559 const struct ipmr_mfc_iter
*it
= seq
->private;
560 struct mr6_table
*mrt
= it
->mrt
;
562 seq_printf(seq
, "%pI6 %pI6 %-3hd",
563 &mfc
->mf6c_mcastgrp
, &mfc
->mf6c_origin
,
566 if (it
->cache
!= &mrt
->mfc6_unres_queue
) {
567 seq_printf(seq
, " %8lu %8lu %8lu",
569 mfc
->mfc_un
.res
.bytes
,
570 mfc
->mfc_un
.res
.wrong_if
);
571 for (n
= mfc
->mfc_un
.res
.minvif
;
572 n
< mfc
->mfc_un
.res
.maxvif
; n
++) {
573 if (MIF_EXISTS(mrt
, n
) &&
574 mfc
->mfc_un
.res
.ttls
[n
] < 255)
577 n
, mfc
->mfc_un
.res
.ttls
[n
]);
580 /* unresolved mfc_caches don't contain
581 * pkt, bytes and wrong_if values
583 seq_printf(seq
, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
590 static const struct seq_operations ipmr_mfc_seq_ops
= {
591 .start
= ipmr_mfc_seq_start
,
592 .next
= ipmr_mfc_seq_next
,
593 .stop
= ipmr_mfc_seq_stop
,
594 .show
= ipmr_mfc_seq_show
,
597 static int ipmr_mfc_open(struct inode
*inode
, struct file
*file
)
599 return seq_open_net(inode
, file
, &ipmr_mfc_seq_ops
,
600 sizeof(struct ipmr_mfc_iter
));
603 static const struct file_operations ip6mr_mfc_fops
= {
604 .owner
= THIS_MODULE
,
605 .open
= ipmr_mfc_open
,
608 .release
= seq_release_net
,
612 #ifdef CONFIG_IPV6_PIMSM_V2
614 static int pim6_rcv(struct sk_buff
*skb
)
616 struct pimreghdr
*pim
;
617 struct ipv6hdr
*encap
;
618 struct net_device
*reg_dev
= NULL
;
619 struct net
*net
= dev_net(skb
->dev
);
620 struct mr6_table
*mrt
;
621 struct flowi6 fl6
= {
622 .flowi6_iif
= skb
->dev
->ifindex
,
623 .flowi6_mark
= skb
->mark
,
627 if (!pskb_may_pull(skb
, sizeof(*pim
) + sizeof(*encap
)))
630 pim
= (struct pimreghdr
*)skb_transport_header(skb
);
631 if (pim
->type
!= ((PIM_VERSION
<< 4) | PIM_REGISTER
) ||
632 (pim
->flags
& PIM_NULL_REGISTER
) ||
633 (csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
, &ipv6_hdr(skb
)->daddr
,
634 sizeof(*pim
), IPPROTO_PIM
,
635 csum_partial((void *)pim
, sizeof(*pim
), 0)) &&
636 csum_fold(skb_checksum(skb
, 0, skb
->len
, 0))))
639 /* check if the inner packet is destined to mcast group */
640 encap
= (struct ipv6hdr
*)(skb_transport_header(skb
) +
643 if (!ipv6_addr_is_multicast(&encap
->daddr
) ||
644 encap
->payload_len
== 0 ||
645 ntohs(encap
->payload_len
) + sizeof(*pim
) > skb
->len
)
648 if (ip6mr_fib_lookup(net
, &fl6
, &mrt
) < 0)
650 reg_vif_num
= mrt
->mroute_reg_vif_num
;
652 read_lock(&mrt_lock
);
653 if (reg_vif_num
>= 0)
654 reg_dev
= mrt
->vif6_table
[reg_vif_num
].dev
;
657 read_unlock(&mrt_lock
);
662 skb
->mac_header
= skb
->network_header
;
663 skb_pull(skb
, (u8
*)encap
- skb
->data
);
664 skb_reset_network_header(skb
);
665 skb
->protocol
= htons(ETH_P_IPV6
);
666 skb
->ip_summed
= CHECKSUM_NONE
;
667 skb
->pkt_type
= PACKET_HOST
;
669 skb_tunnel_rx(skb
, reg_dev
);
680 static const struct inet6_protocol pim6_protocol
= {
684 /* Service routines creating virtual interfaces: PIMREG */
686 static netdev_tx_t
reg_vif_xmit(struct sk_buff
*skb
,
687 struct net_device
*dev
)
689 struct net
*net
= dev_net(dev
);
690 struct mr6_table
*mrt
;
691 struct flowi6 fl6
= {
692 .flowi6_oif
= dev
->ifindex
,
693 .flowi6_iif
= skb
->skb_iif
,
694 .flowi6_mark
= skb
->mark
,
698 err
= ip6mr_fib_lookup(net
, &fl6
, &mrt
);
702 read_lock(&mrt_lock
);
703 dev
->stats
.tx_bytes
+= skb
->len
;
704 dev
->stats
.tx_packets
++;
705 ip6mr_cache_report(mrt
, skb
, mrt
->mroute_reg_vif_num
, MRT6MSG_WHOLEPKT
);
706 read_unlock(&mrt_lock
);
711 static const struct net_device_ops reg_vif_netdev_ops
= {
712 .ndo_start_xmit
= reg_vif_xmit
,
715 static void reg_vif_setup(struct net_device
*dev
)
717 dev
->type
= ARPHRD_PIMREG
;
718 dev
->mtu
= 1500 - sizeof(struct ipv6hdr
) - 8;
719 dev
->flags
= IFF_NOARP
;
720 dev
->netdev_ops
= ®_vif_netdev_ops
;
721 dev
->destructor
= free_netdev
;
722 dev
->features
|= NETIF_F_NETNS_LOCAL
;
725 static struct net_device
*ip6mr_reg_vif(struct net
*net
, struct mr6_table
*mrt
)
727 struct net_device
*dev
;
730 if (mrt
->id
== RT6_TABLE_DFLT
)
731 sprintf(name
, "pim6reg");
733 sprintf(name
, "pim6reg%u", mrt
->id
);
735 dev
= alloc_netdev(0, name
, reg_vif_setup
);
739 dev_net_set(dev
, net
);
741 if (register_netdevice(dev
)) {
754 /* allow the register to be completed before unregistering. */
758 unregister_netdevice(dev
);
767 static int mif6_delete(struct mr6_table
*mrt
, int vifi
, struct list_head
*head
)
769 struct mif_device
*v
;
770 struct net_device
*dev
;
771 struct inet6_dev
*in6_dev
;
773 if (vifi
< 0 || vifi
>= mrt
->maxvif
)
774 return -EADDRNOTAVAIL
;
776 v
= &mrt
->vif6_table
[vifi
];
778 write_lock_bh(&mrt_lock
);
783 write_unlock_bh(&mrt_lock
);
784 return -EADDRNOTAVAIL
;
787 #ifdef CONFIG_IPV6_PIMSM_V2
788 if (vifi
== mrt
->mroute_reg_vif_num
)
789 mrt
->mroute_reg_vif_num
= -1;
792 if (vifi
+ 1 == mrt
->maxvif
) {
794 for (tmp
= vifi
- 1; tmp
>= 0; tmp
--) {
795 if (MIF_EXISTS(mrt
, tmp
))
798 mrt
->maxvif
= tmp
+ 1;
801 write_unlock_bh(&mrt_lock
);
803 dev_set_allmulti(dev
, -1);
805 in6_dev
= __in6_dev_get(dev
);
807 in6_dev
->cnf
.mc_forwarding
--;
809 if (v
->flags
& MIFF_REGISTER
)
810 unregister_netdevice_queue(dev
, head
);
816 static inline void ip6mr_cache_free(struct mfc6_cache
*c
)
818 kmem_cache_free(mrt_cachep
, c
);
821 /* Destroy an unresolved cache entry, killing queued skbs
822 and reporting error to netlink readers.
825 static void ip6mr_destroy_unres(struct mr6_table
*mrt
, struct mfc6_cache
*c
)
827 struct net
*net
= read_pnet(&mrt
->net
);
830 atomic_dec(&mrt
->cache_resolve_queue_len
);
832 while((skb
= skb_dequeue(&c
->mfc_un
.unres
.unresolved
)) != NULL
) {
833 if (ipv6_hdr(skb
)->version
== 0) {
834 struct nlmsghdr
*nlh
= (struct nlmsghdr
*)skb_pull(skb
, sizeof(struct ipv6hdr
));
835 nlh
->nlmsg_type
= NLMSG_ERROR
;
836 nlh
->nlmsg_len
= NLMSG_LENGTH(sizeof(struct nlmsgerr
));
837 skb_trim(skb
, nlh
->nlmsg_len
);
838 ((struct nlmsgerr
*)NLMSG_DATA(nlh
))->error
= -ETIMEDOUT
;
839 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).pid
);
848 /* Timer process for all the unresolved queue. */
850 static void ipmr_do_expire_process(struct mr6_table
*mrt
)
852 unsigned long now
= jiffies
;
853 unsigned long expires
= 10 * HZ
;
854 struct mfc6_cache
*c
, *next
;
856 list_for_each_entry_safe(c
, next
, &mrt
->mfc6_unres_queue
, list
) {
857 if (time_after(c
->mfc_un
.unres
.expires
, now
)) {
859 unsigned long interval
= c
->mfc_un
.unres
.expires
- now
;
860 if (interval
< expires
)
866 ip6mr_destroy_unres(mrt
, c
);
869 if (!list_empty(&mrt
->mfc6_unres_queue
))
870 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+ expires
);
873 static void ipmr_expire_process(unsigned long arg
)
875 struct mr6_table
*mrt
= (struct mr6_table
*)arg
;
877 if (!spin_trylock(&mfc_unres_lock
)) {
878 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+ 1);
882 if (!list_empty(&mrt
->mfc6_unres_queue
))
883 ipmr_do_expire_process(mrt
);
885 spin_unlock(&mfc_unres_lock
);
888 /* Fill oifs list. It is called under write locked mrt_lock. */
890 static void ip6mr_update_thresholds(struct mr6_table
*mrt
, struct mfc6_cache
*cache
,
895 cache
->mfc_un
.res
.minvif
= MAXMIFS
;
896 cache
->mfc_un
.res
.maxvif
= 0;
897 memset(cache
->mfc_un
.res
.ttls
, 255, MAXMIFS
);
899 for (vifi
= 0; vifi
< mrt
->maxvif
; vifi
++) {
900 if (MIF_EXISTS(mrt
, vifi
) &&
901 ttls
[vifi
] && ttls
[vifi
] < 255) {
902 cache
->mfc_un
.res
.ttls
[vifi
] = ttls
[vifi
];
903 if (cache
->mfc_un
.res
.minvif
> vifi
)
904 cache
->mfc_un
.res
.minvif
= vifi
;
905 if (cache
->mfc_un
.res
.maxvif
<= vifi
)
906 cache
->mfc_un
.res
.maxvif
= vifi
+ 1;
911 static int mif6_add(struct net
*net
, struct mr6_table
*mrt
,
912 struct mif6ctl
*vifc
, int mrtsock
)
914 int vifi
= vifc
->mif6c_mifi
;
915 struct mif_device
*v
= &mrt
->vif6_table
[vifi
];
916 struct net_device
*dev
;
917 struct inet6_dev
*in6_dev
;
921 if (MIF_EXISTS(mrt
, vifi
))
924 switch (vifc
->mif6c_flags
) {
925 #ifdef CONFIG_IPV6_PIMSM_V2
928 * Special Purpose VIF in PIM
929 * All the packets will be sent to the daemon
931 if (mrt
->mroute_reg_vif_num
>= 0)
933 dev
= ip6mr_reg_vif(net
, mrt
);
936 err
= dev_set_allmulti(dev
, 1);
938 unregister_netdevice(dev
);
945 dev
= dev_get_by_index(net
, vifc
->mif6c_pifi
);
947 return -EADDRNOTAVAIL
;
948 err
= dev_set_allmulti(dev
, 1);
958 in6_dev
= __in6_dev_get(dev
);
960 in6_dev
->cnf
.mc_forwarding
++;
963 * Fill in the VIF structures
965 v
->rate_limit
= vifc
->vifc_rate_limit
;
966 v
->flags
= vifc
->mif6c_flags
;
968 v
->flags
|= VIFF_STATIC
;
969 v
->threshold
= vifc
->vifc_threshold
;
974 v
->link
= dev
->ifindex
;
975 if (v
->flags
& MIFF_REGISTER
)
976 v
->link
= dev
->iflink
;
978 /* And finish update writing critical data */
979 write_lock_bh(&mrt_lock
);
981 #ifdef CONFIG_IPV6_PIMSM_V2
982 if (v
->flags
& MIFF_REGISTER
)
983 mrt
->mroute_reg_vif_num
= vifi
;
985 if (vifi
+ 1 > mrt
->maxvif
)
986 mrt
->maxvif
= vifi
+ 1;
987 write_unlock_bh(&mrt_lock
);
991 static struct mfc6_cache
*ip6mr_cache_find(struct mr6_table
*mrt
,
992 const struct in6_addr
*origin
,
993 const struct in6_addr
*mcastgrp
)
995 int line
= MFC6_HASH(mcastgrp
, origin
);
996 struct mfc6_cache
*c
;
998 list_for_each_entry(c
, &mrt
->mfc6_cache_array
[line
], list
) {
999 if (ipv6_addr_equal(&c
->mf6c_origin
, origin
) &&
1000 ipv6_addr_equal(&c
->mf6c_mcastgrp
, mcastgrp
))
1007 * Allocate a multicast cache entry
1009 static struct mfc6_cache
*ip6mr_cache_alloc(void)
1011 struct mfc6_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_KERNEL
);
1014 c
->mfc_un
.res
.minvif
= MAXMIFS
;
1018 static struct mfc6_cache
*ip6mr_cache_alloc_unres(void)
1020 struct mfc6_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_ATOMIC
);
1023 skb_queue_head_init(&c
->mfc_un
.unres
.unresolved
);
1024 c
->mfc_un
.unres
.expires
= jiffies
+ 10 * HZ
;
1029 * A cache entry has gone into a resolved state from queued
1032 static void ip6mr_cache_resolve(struct net
*net
, struct mr6_table
*mrt
,
1033 struct mfc6_cache
*uc
, struct mfc6_cache
*c
)
1035 struct sk_buff
*skb
;
1038 * Play the pending entries through our router
1041 while((skb
= __skb_dequeue(&uc
->mfc_un
.unres
.unresolved
))) {
1042 if (ipv6_hdr(skb
)->version
== 0) {
1043 struct nlmsghdr
*nlh
= (struct nlmsghdr
*)skb_pull(skb
, sizeof(struct ipv6hdr
));
1045 if (__ip6mr_fill_mroute(mrt
, skb
, c
, NLMSG_DATA(nlh
)) > 0) {
1046 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - (u8
*)nlh
;
1048 nlh
->nlmsg_type
= NLMSG_ERROR
;
1049 nlh
->nlmsg_len
= NLMSG_LENGTH(sizeof(struct nlmsgerr
));
1050 skb_trim(skb
, nlh
->nlmsg_len
);
1051 ((struct nlmsgerr
*)NLMSG_DATA(nlh
))->error
= -EMSGSIZE
;
1053 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).pid
);
1055 ip6_mr_forward(net
, mrt
, skb
, c
);
1060 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
1061 * expects the following bizarre scheme.
1063 * Called under mrt_lock.
1066 static int ip6mr_cache_report(struct mr6_table
*mrt
, struct sk_buff
*pkt
,
1067 mifi_t mifi
, int assert)
1069 struct sk_buff
*skb
;
1070 struct mrt6msg
*msg
;
1073 #ifdef CONFIG_IPV6_PIMSM_V2
1074 if (assert == MRT6MSG_WHOLEPKT
)
1075 skb
= skb_realloc_headroom(pkt
, -skb_network_offset(pkt
)
1079 skb
= alloc_skb(sizeof(struct ipv6hdr
) + sizeof(*msg
), GFP_ATOMIC
);
1084 /* I suppose that internal messages
1085 * do not require checksums */
1087 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1089 #ifdef CONFIG_IPV6_PIMSM_V2
1090 if (assert == MRT6MSG_WHOLEPKT
) {
1091 /* Ugly, but we have no choice with this interface.
1092 Duplicate old header, fix length etc.
1093 And all this only to mangle msg->im6_msgtype and
1094 to set msg->im6_mbz to "mbz" :-)
1096 skb_push(skb
, -skb_network_offset(pkt
));
1098 skb_push(skb
, sizeof(*msg
));
1099 skb_reset_transport_header(skb
);
1100 msg
= (struct mrt6msg
*)skb_transport_header(skb
);
1102 msg
->im6_msgtype
= MRT6MSG_WHOLEPKT
;
1103 msg
->im6_mif
= mrt
->mroute_reg_vif_num
;
1105 ipv6_addr_copy(&msg
->im6_src
, &ipv6_hdr(pkt
)->saddr
);
1106 ipv6_addr_copy(&msg
->im6_dst
, &ipv6_hdr(pkt
)->daddr
);
1108 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1113 * Copy the IP header
1116 skb_put(skb
, sizeof(struct ipv6hdr
));
1117 skb_reset_network_header(skb
);
1118 skb_copy_to_linear_data(skb
, ipv6_hdr(pkt
), sizeof(struct ipv6hdr
));
1123 skb_put(skb
, sizeof(*msg
));
1124 skb_reset_transport_header(skb
);
1125 msg
= (struct mrt6msg
*)skb_transport_header(skb
);
1128 msg
->im6_msgtype
= assert;
1129 msg
->im6_mif
= mifi
;
1131 ipv6_addr_copy(&msg
->im6_src
, &ipv6_hdr(pkt
)->saddr
);
1132 ipv6_addr_copy(&msg
->im6_dst
, &ipv6_hdr(pkt
)->daddr
);
1134 skb_dst_set(skb
, dst_clone(skb_dst(pkt
)));
1135 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1138 if (mrt
->mroute6_sk
== NULL
) {
1144 * Deliver to user space multicast routing algorithms
1146 ret
= sock_queue_rcv_skb(mrt
->mroute6_sk
, skb
);
1148 if (net_ratelimit())
1149 printk(KERN_WARNING
"mroute6: pending queue full, dropping entries.\n");
1157 * Queue a packet for resolution. It gets locked cache entry!
1161 ip6mr_cache_unresolved(struct mr6_table
*mrt
, mifi_t mifi
, struct sk_buff
*skb
)
1165 struct mfc6_cache
*c
;
1167 spin_lock_bh(&mfc_unres_lock
);
1168 list_for_each_entry(c
, &mrt
->mfc6_unres_queue
, list
) {
1169 if (ipv6_addr_equal(&c
->mf6c_mcastgrp
, &ipv6_hdr(skb
)->daddr
) &&
1170 ipv6_addr_equal(&c
->mf6c_origin
, &ipv6_hdr(skb
)->saddr
)) {
1178 * Create a new entry if allowable
1181 if (atomic_read(&mrt
->cache_resolve_queue_len
) >= 10 ||
1182 (c
= ip6mr_cache_alloc_unres()) == NULL
) {
1183 spin_unlock_bh(&mfc_unres_lock
);
1190 * Fill in the new cache entry
1192 c
->mf6c_parent
= -1;
1193 c
->mf6c_origin
= ipv6_hdr(skb
)->saddr
;
1194 c
->mf6c_mcastgrp
= ipv6_hdr(skb
)->daddr
;
1197 * Reflect first query at pim6sd
1199 err
= ip6mr_cache_report(mrt
, skb
, mifi
, MRT6MSG_NOCACHE
);
1201 /* If the report failed throw the cache entry
1204 spin_unlock_bh(&mfc_unres_lock
);
1206 ip6mr_cache_free(c
);
1211 atomic_inc(&mrt
->cache_resolve_queue_len
);
1212 list_add(&c
->list
, &mrt
->mfc6_unres_queue
);
1214 ipmr_do_expire_process(mrt
);
1218 * See if we can append the packet
1220 if (c
->mfc_un
.unres
.unresolved
.qlen
> 3) {
1224 skb_queue_tail(&c
->mfc_un
.unres
.unresolved
, skb
);
1228 spin_unlock_bh(&mfc_unres_lock
);
1233 * MFC6 cache manipulation by user space
1236 static int ip6mr_mfc_delete(struct mr6_table
*mrt
, struct mf6cctl
*mfc
)
1239 struct mfc6_cache
*c
, *next
;
1241 line
= MFC6_HASH(&mfc
->mf6cc_mcastgrp
.sin6_addr
, &mfc
->mf6cc_origin
.sin6_addr
);
1243 list_for_each_entry_safe(c
, next
, &mrt
->mfc6_cache_array
[line
], list
) {
1244 if (ipv6_addr_equal(&c
->mf6c_origin
, &mfc
->mf6cc_origin
.sin6_addr
) &&
1245 ipv6_addr_equal(&c
->mf6c_mcastgrp
, &mfc
->mf6cc_mcastgrp
.sin6_addr
)) {
1246 write_lock_bh(&mrt_lock
);
1248 write_unlock_bh(&mrt_lock
);
1250 ip6mr_cache_free(c
);
1257 static int ip6mr_device_event(struct notifier_block
*this,
1258 unsigned long event
, void *ptr
)
1260 struct net_device
*dev
= ptr
;
1261 struct net
*net
= dev_net(dev
);
1262 struct mr6_table
*mrt
;
1263 struct mif_device
*v
;
1267 if (event
!= NETDEV_UNREGISTER
)
1270 ip6mr_for_each_table(mrt
, net
) {
1271 v
= &mrt
->vif6_table
[0];
1272 for (ct
= 0; ct
< mrt
->maxvif
; ct
++, v
++) {
1274 mif6_delete(mrt
, ct
, &list
);
1277 unregister_netdevice_many(&list
);
1282 static struct notifier_block ip6_mr_notifier
= {
1283 .notifier_call
= ip6mr_device_event
1287 * Setup for IP multicast routing
1290 static int __net_init
ip6mr_net_init(struct net
*net
)
1294 err
= ip6mr_rules_init(net
);
1298 #ifdef CONFIG_PROC_FS
1300 if (!proc_net_fops_create(net
, "ip6_mr_vif", 0, &ip6mr_vif_fops
))
1302 if (!proc_net_fops_create(net
, "ip6_mr_cache", 0, &ip6mr_mfc_fops
))
1303 goto proc_cache_fail
;
1308 #ifdef CONFIG_PROC_FS
1310 proc_net_remove(net
, "ip6_mr_vif");
1312 ip6mr_rules_exit(net
);
1318 static void __net_exit
ip6mr_net_exit(struct net
*net
)
1320 #ifdef CONFIG_PROC_FS
1321 proc_net_remove(net
, "ip6_mr_cache");
1322 proc_net_remove(net
, "ip6_mr_vif");
1324 ip6mr_rules_exit(net
);
1327 static struct pernet_operations ip6mr_net_ops
= {
1328 .init
= ip6mr_net_init
,
1329 .exit
= ip6mr_net_exit
,
1332 int __init
ip6_mr_init(void)
1336 mrt_cachep
= kmem_cache_create("ip6_mrt_cache",
1337 sizeof(struct mfc6_cache
),
1338 0, SLAB_HWCACHE_ALIGN
,
1343 err
= register_pernet_subsys(&ip6mr_net_ops
);
1345 goto reg_pernet_fail
;
1347 err
= register_netdevice_notifier(&ip6_mr_notifier
);
1349 goto reg_notif_fail
;
1350 #ifdef CONFIG_IPV6_PIMSM_V2
1351 if (inet6_add_protocol(&pim6_protocol
, IPPROTO_PIM
) < 0) {
1352 printk(KERN_ERR
"ip6_mr_init: can't add PIM protocol\n");
1354 goto add_proto_fail
;
1357 rtnl_register(RTNL_FAMILY_IP6MR
, RTM_GETROUTE
, NULL
, ip6mr_rtm_dumproute
);
1359 #ifdef CONFIG_IPV6_PIMSM_V2
1361 unregister_netdevice_notifier(&ip6_mr_notifier
);
1364 unregister_pernet_subsys(&ip6mr_net_ops
);
1366 kmem_cache_destroy(mrt_cachep
);
1370 void ip6_mr_cleanup(void)
1372 unregister_netdevice_notifier(&ip6_mr_notifier
);
1373 unregister_pernet_subsys(&ip6mr_net_ops
);
1374 kmem_cache_destroy(mrt_cachep
);
1377 static int ip6mr_mfc_add(struct net
*net
, struct mr6_table
*mrt
,
1378 struct mf6cctl
*mfc
, int mrtsock
)
1382 struct mfc6_cache
*uc
, *c
;
1383 unsigned char ttls
[MAXMIFS
];
1386 if (mfc
->mf6cc_parent
>= MAXMIFS
)
1389 memset(ttls
, 255, MAXMIFS
);
1390 for (i
= 0; i
< MAXMIFS
; i
++) {
1391 if (IF_ISSET(i
, &mfc
->mf6cc_ifset
))
1396 line
= MFC6_HASH(&mfc
->mf6cc_mcastgrp
.sin6_addr
, &mfc
->mf6cc_origin
.sin6_addr
);
1398 list_for_each_entry(c
, &mrt
->mfc6_cache_array
[line
], list
) {
1399 if (ipv6_addr_equal(&c
->mf6c_origin
, &mfc
->mf6cc_origin
.sin6_addr
) &&
1400 ipv6_addr_equal(&c
->mf6c_mcastgrp
, &mfc
->mf6cc_mcastgrp
.sin6_addr
)) {
1407 write_lock_bh(&mrt_lock
);
1408 c
->mf6c_parent
= mfc
->mf6cc_parent
;
1409 ip6mr_update_thresholds(mrt
, c
, ttls
);
1411 c
->mfc_flags
|= MFC_STATIC
;
1412 write_unlock_bh(&mrt_lock
);
1416 if (!ipv6_addr_is_multicast(&mfc
->mf6cc_mcastgrp
.sin6_addr
))
1419 c
= ip6mr_cache_alloc();
1423 c
->mf6c_origin
= mfc
->mf6cc_origin
.sin6_addr
;
1424 c
->mf6c_mcastgrp
= mfc
->mf6cc_mcastgrp
.sin6_addr
;
1425 c
->mf6c_parent
= mfc
->mf6cc_parent
;
1426 ip6mr_update_thresholds(mrt
, c
, ttls
);
1428 c
->mfc_flags
|= MFC_STATIC
;
1430 write_lock_bh(&mrt_lock
);
1431 list_add(&c
->list
, &mrt
->mfc6_cache_array
[line
]);
1432 write_unlock_bh(&mrt_lock
);
1435 * Check to see if we resolved a queued list. If so we
1436 * need to send on the frames and tidy up.
1439 spin_lock_bh(&mfc_unres_lock
);
1440 list_for_each_entry(uc
, &mrt
->mfc6_unres_queue
, list
) {
1441 if (ipv6_addr_equal(&uc
->mf6c_origin
, &c
->mf6c_origin
) &&
1442 ipv6_addr_equal(&uc
->mf6c_mcastgrp
, &c
->mf6c_mcastgrp
)) {
1443 list_del(&uc
->list
);
1444 atomic_dec(&mrt
->cache_resolve_queue_len
);
1449 if (list_empty(&mrt
->mfc6_unres_queue
))
1450 del_timer(&mrt
->ipmr_expire_timer
);
1451 spin_unlock_bh(&mfc_unres_lock
);
1454 ip6mr_cache_resolve(net
, mrt
, uc
, c
);
1455 ip6mr_cache_free(uc
);
1461 * Close the multicast socket, and clear the vif tables etc
1464 static void mroute_clean_tables(struct mr6_table
*mrt
)
1468 struct mfc6_cache
*c
, *next
;
1471 * Shut down all active vif entries
1473 for (i
= 0; i
< mrt
->maxvif
; i
++) {
1474 if (!(mrt
->vif6_table
[i
].flags
& VIFF_STATIC
))
1475 mif6_delete(mrt
, i
, &list
);
1477 unregister_netdevice_many(&list
);
1482 for (i
= 0; i
< MFC6_LINES
; i
++) {
1483 list_for_each_entry_safe(c
, next
, &mrt
->mfc6_cache_array
[i
], list
) {
1484 if (c
->mfc_flags
& MFC_STATIC
)
1486 write_lock_bh(&mrt_lock
);
1488 write_unlock_bh(&mrt_lock
);
1490 ip6mr_cache_free(c
);
1494 if (atomic_read(&mrt
->cache_resolve_queue_len
) != 0) {
1495 spin_lock_bh(&mfc_unres_lock
);
1496 list_for_each_entry_safe(c
, next
, &mrt
->mfc6_unres_queue
, list
) {
1498 ip6mr_destroy_unres(mrt
, c
);
1500 spin_unlock_bh(&mfc_unres_lock
);
1504 static int ip6mr_sk_init(struct mr6_table
*mrt
, struct sock
*sk
)
1507 struct net
*net
= sock_net(sk
);
1510 write_lock_bh(&mrt_lock
);
1511 if (likely(mrt
->mroute6_sk
== NULL
)) {
1512 mrt
->mroute6_sk
= sk
;
1513 net
->ipv6
.devconf_all
->mc_forwarding
++;
1517 write_unlock_bh(&mrt_lock
);
1524 int ip6mr_sk_done(struct sock
*sk
)
1527 struct net
*net
= sock_net(sk
);
1528 struct mr6_table
*mrt
;
1531 ip6mr_for_each_table(mrt
, net
) {
1532 if (sk
== mrt
->mroute6_sk
) {
1533 write_lock_bh(&mrt_lock
);
1534 mrt
->mroute6_sk
= NULL
;
1535 net
->ipv6
.devconf_all
->mc_forwarding
--;
1536 write_unlock_bh(&mrt_lock
);
1538 mroute_clean_tables(mrt
);
1548 struct sock
*mroute6_socket(struct net
*net
, struct sk_buff
*skb
)
1550 struct mr6_table
*mrt
;
1551 struct flowi6 fl6
= {
1552 .flowi6_iif
= skb
->skb_iif
,
1553 .flowi6_oif
= skb
->dev
->ifindex
,
1554 .flowi6_mark
= skb
->mark
,
1557 if (ip6mr_fib_lookup(net
, &fl6
, &mrt
) < 0)
1560 return mrt
->mroute6_sk
;
1564 * Socket options and virtual interface manipulation. The whole
1565 * virtual interface system is a complete heap, but unfortunately
1566 * that's how BSD mrouted happens to think. Maybe one day with a proper
1567 * MOSPF/PIM router set up we can clean this up.
1570 int ip6_mroute_setsockopt(struct sock
*sk
, int optname
, char __user
*optval
, unsigned int optlen
)
1576 struct net
*net
= sock_net(sk
);
1577 struct mr6_table
*mrt
;
1579 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1583 if (optname
!= MRT6_INIT
) {
1584 if (sk
!= mrt
->mroute6_sk
&& !capable(CAP_NET_ADMIN
))
1590 if (sk
->sk_type
!= SOCK_RAW
||
1591 inet_sk(sk
)->inet_num
!= IPPROTO_ICMPV6
)
1593 if (optlen
< sizeof(int))
1596 return ip6mr_sk_init(mrt
, sk
);
1599 return ip6mr_sk_done(sk
);
1602 if (optlen
< sizeof(vif
))
1604 if (copy_from_user(&vif
, optval
, sizeof(vif
)))
1606 if (vif
.mif6c_mifi
>= MAXMIFS
)
1609 ret
= mif6_add(net
, mrt
, &vif
, sk
== mrt
->mroute6_sk
);
1614 if (optlen
< sizeof(mifi_t
))
1616 if (copy_from_user(&mifi
, optval
, sizeof(mifi_t
)))
1619 ret
= mif6_delete(mrt
, mifi
, NULL
);
1624 * Manipulate the forwarding caches. These live
1625 * in a sort of kernel/user symbiosis.
1629 if (optlen
< sizeof(mfc
))
1631 if (copy_from_user(&mfc
, optval
, sizeof(mfc
)))
1634 if (optname
== MRT6_DEL_MFC
)
1635 ret
= ip6mr_mfc_delete(mrt
, &mfc
);
1637 ret
= ip6mr_mfc_add(net
, mrt
, &mfc
, sk
== mrt
->mroute6_sk
);
1642 * Control PIM assert (to activate pim will activate assert)
1647 if (get_user(v
, (int __user
*)optval
))
1649 mrt
->mroute_do_assert
= !!v
;
1653 #ifdef CONFIG_IPV6_PIMSM_V2
1657 if (get_user(v
, (int __user
*)optval
))
1662 if (v
!= mrt
->mroute_do_pim
) {
1663 mrt
->mroute_do_pim
= v
;
1664 mrt
->mroute_do_assert
= v
;
1671 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1676 if (optlen
!= sizeof(u32
))
1678 if (get_user(v
, (u32 __user
*)optval
))
1680 if (sk
== mrt
->mroute6_sk
)
1685 if (!ip6mr_new_table(net
, v
))
1687 raw6_sk(sk
)->ip6mr_table
= v
;
1693 * Spurious command, or MRT6_VERSION which you cannot
1697 return -ENOPROTOOPT
;
1702 * Getsock opt support for the multicast routing system.
1705 int ip6_mroute_getsockopt(struct sock
*sk
, int optname
, char __user
*optval
,
1710 struct net
*net
= sock_net(sk
);
1711 struct mr6_table
*mrt
;
1713 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1721 #ifdef CONFIG_IPV6_PIMSM_V2
1723 val
= mrt
->mroute_do_pim
;
1727 val
= mrt
->mroute_do_assert
;
1730 return -ENOPROTOOPT
;
1733 if (get_user(olr
, optlen
))
1736 olr
= min_t(int, olr
, sizeof(int));
1740 if (put_user(olr
, optlen
))
1742 if (copy_to_user(optval
, &val
, olr
))
1748 * The IP multicast ioctl support routines.
1751 int ip6mr_ioctl(struct sock
*sk
, int cmd
, void __user
*arg
)
1753 struct sioc_sg_req6 sr
;
1754 struct sioc_mif_req6 vr
;
1755 struct mif_device
*vif
;
1756 struct mfc6_cache
*c
;
1757 struct net
*net
= sock_net(sk
);
1758 struct mr6_table
*mrt
;
1760 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1765 case SIOCGETMIFCNT_IN6
:
1766 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1768 if (vr
.mifi
>= mrt
->maxvif
)
1770 read_lock(&mrt_lock
);
1771 vif
= &mrt
->vif6_table
[vr
.mifi
];
1772 if (MIF_EXISTS(mrt
, vr
.mifi
)) {
1773 vr
.icount
= vif
->pkt_in
;
1774 vr
.ocount
= vif
->pkt_out
;
1775 vr
.ibytes
= vif
->bytes_in
;
1776 vr
.obytes
= vif
->bytes_out
;
1777 read_unlock(&mrt_lock
);
1779 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1783 read_unlock(&mrt_lock
);
1784 return -EADDRNOTAVAIL
;
1785 case SIOCGETSGCNT_IN6
:
1786 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1789 read_lock(&mrt_lock
);
1790 c
= ip6mr_cache_find(mrt
, &sr
.src
.sin6_addr
, &sr
.grp
.sin6_addr
);
1792 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1793 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1794 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1795 read_unlock(&mrt_lock
);
1797 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1801 read_unlock(&mrt_lock
);
1802 return -EADDRNOTAVAIL
;
1804 return -ENOIOCTLCMD
;
1808 #ifdef CONFIG_COMPAT
1809 struct compat_sioc_sg_req6
{
1810 struct sockaddr_in6 src
;
1811 struct sockaddr_in6 grp
;
1812 compat_ulong_t pktcnt
;
1813 compat_ulong_t bytecnt
;
1814 compat_ulong_t wrong_if
;
1817 struct compat_sioc_mif_req6
{
1819 compat_ulong_t icount
;
1820 compat_ulong_t ocount
;
1821 compat_ulong_t ibytes
;
1822 compat_ulong_t obytes
;
1825 int ip6mr_compat_ioctl(struct sock
*sk
, unsigned int cmd
, void __user
*arg
)
1827 struct compat_sioc_sg_req6 sr
;
1828 struct compat_sioc_mif_req6 vr
;
1829 struct mif_device
*vif
;
1830 struct mfc6_cache
*c
;
1831 struct net
*net
= sock_net(sk
);
1832 struct mr6_table
*mrt
;
1834 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1839 case SIOCGETMIFCNT_IN6
:
1840 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1842 if (vr
.mifi
>= mrt
->maxvif
)
1844 read_lock(&mrt_lock
);
1845 vif
= &mrt
->vif6_table
[vr
.mifi
];
1846 if (MIF_EXISTS(mrt
, vr
.mifi
)) {
1847 vr
.icount
= vif
->pkt_in
;
1848 vr
.ocount
= vif
->pkt_out
;
1849 vr
.ibytes
= vif
->bytes_in
;
1850 vr
.obytes
= vif
->bytes_out
;
1851 read_unlock(&mrt_lock
);
1853 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1857 read_unlock(&mrt_lock
);
1858 return -EADDRNOTAVAIL
;
1859 case SIOCGETSGCNT_IN6
:
1860 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1863 read_lock(&mrt_lock
);
1864 c
= ip6mr_cache_find(mrt
, &sr
.src
.sin6_addr
, &sr
.grp
.sin6_addr
);
1866 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1867 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1868 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1869 read_unlock(&mrt_lock
);
1871 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1875 read_unlock(&mrt_lock
);
1876 return -EADDRNOTAVAIL
;
1878 return -ENOIOCTLCMD
;
1883 static inline int ip6mr_forward2_finish(struct sk_buff
*skb
)
1885 IP6_INC_STATS_BH(dev_net(skb_dst(skb
)->dev
), ip6_dst_idev(skb_dst(skb
)),
1886 IPSTATS_MIB_OUTFORWDATAGRAMS
);
1887 return dst_output(skb
);
1891 * Processing handlers for ip6mr_forward
1894 static int ip6mr_forward2(struct net
*net
, struct mr6_table
*mrt
,
1895 struct sk_buff
*skb
, struct mfc6_cache
*c
, int vifi
)
1897 struct ipv6hdr
*ipv6h
;
1898 struct mif_device
*vif
= &mrt
->vif6_table
[vifi
];
1899 struct net_device
*dev
;
1900 struct dst_entry
*dst
;
1903 if (vif
->dev
== NULL
)
1906 #ifdef CONFIG_IPV6_PIMSM_V2
1907 if (vif
->flags
& MIFF_REGISTER
) {
1909 vif
->bytes_out
+= skb
->len
;
1910 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
1911 vif
->dev
->stats
.tx_packets
++;
1912 ip6mr_cache_report(mrt
, skb
, vifi
, MRT6MSG_WHOLEPKT
);
1917 ipv6h
= ipv6_hdr(skb
);
1919 fl6
= (struct flowi6
) {
1920 .flowi6_oif
= vif
->link
,
1921 .daddr
= ipv6h
->daddr
,
1924 dst
= ip6_route_output(net
, NULL
, &fl6
);
1929 skb_dst_set(skb
, dst
);
1932 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1933 * not only before forwarding, but after forwarding on all output
1934 * interfaces. It is clear, if mrouter runs a multicasting
1935 * program, it should receive packets not depending to what interface
1936 * program is joined.
1937 * If we will not make it, the program will have to join on all
1938 * interfaces. On the other hand, multihoming host (or router, but
1939 * not mrouter) cannot join to more than one interface - it will
1940 * result in receiving multiple packets.
1945 vif
->bytes_out
+= skb
->len
;
1947 /* We are about to write */
1948 /* XXX: extension headers? */
1949 if (skb_cow(skb
, sizeof(*ipv6h
) + LL_RESERVED_SPACE(dev
)))
1952 ipv6h
= ipv6_hdr(skb
);
1955 IP6CB(skb
)->flags
|= IP6SKB_FORWARDED
;
1957 return NF_HOOK(NFPROTO_IPV6
, NF_INET_FORWARD
, skb
, skb
->dev
, dev
,
1958 ip6mr_forward2_finish
);
1965 static int ip6mr_find_vif(struct mr6_table
*mrt
, struct net_device
*dev
)
1969 for (ct
= mrt
->maxvif
- 1; ct
>= 0; ct
--) {
1970 if (mrt
->vif6_table
[ct
].dev
== dev
)
1976 static int ip6_mr_forward(struct net
*net
, struct mr6_table
*mrt
,
1977 struct sk_buff
*skb
, struct mfc6_cache
*cache
)
1982 vif
= cache
->mf6c_parent
;
1983 cache
->mfc_un
.res
.pkt
++;
1984 cache
->mfc_un
.res
.bytes
+= skb
->len
;
1987 * Wrong interface: drop packet and (maybe) send PIM assert.
1989 if (mrt
->vif6_table
[vif
].dev
!= skb
->dev
) {
1992 cache
->mfc_un
.res
.wrong_if
++;
1993 true_vifi
= ip6mr_find_vif(mrt
, skb
->dev
);
1995 if (true_vifi
>= 0 && mrt
->mroute_do_assert
&&
1996 /* pimsm uses asserts, when switching from RPT to SPT,
1997 so that we cannot check that packet arrived on an oif.
1998 It is bad, but otherwise we would need to move pretty
1999 large chunk of pimd to kernel. Ough... --ANK
2001 (mrt
->mroute_do_pim
||
2002 cache
->mfc_un
.res
.ttls
[true_vifi
] < 255) &&
2004 cache
->mfc_un
.res
.last_assert
+ MFC_ASSERT_THRESH
)) {
2005 cache
->mfc_un
.res
.last_assert
= jiffies
;
2006 ip6mr_cache_report(mrt
, skb
, true_vifi
, MRT6MSG_WRONGMIF
);
2011 mrt
->vif6_table
[vif
].pkt_in
++;
2012 mrt
->vif6_table
[vif
].bytes_in
+= skb
->len
;
2017 for (ct
= cache
->mfc_un
.res
.maxvif
- 1; ct
>= cache
->mfc_un
.res
.minvif
; ct
--) {
2018 if (ipv6_hdr(skb
)->hop_limit
> cache
->mfc_un
.res
.ttls
[ct
]) {
2020 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
2022 ip6mr_forward2(net
, mrt
, skb2
, cache
, psend
);
2028 ip6mr_forward2(net
, mrt
, skb
, cache
, psend
);
2039 * Multicast packets for forwarding arrive here
2042 int ip6_mr_input(struct sk_buff
*skb
)
2044 struct mfc6_cache
*cache
;
2045 struct net
*net
= dev_net(skb
->dev
);
2046 struct mr6_table
*mrt
;
2047 struct flowi6 fl6
= {
2048 .flowi6_iif
= skb
->dev
->ifindex
,
2049 .flowi6_mark
= skb
->mark
,
2053 err
= ip6mr_fib_lookup(net
, &fl6
, &mrt
);
2057 read_lock(&mrt_lock
);
2058 cache
= ip6mr_cache_find(mrt
,
2059 &ipv6_hdr(skb
)->saddr
, &ipv6_hdr(skb
)->daddr
);
2062 * No usable cache entry
2064 if (cache
== NULL
) {
2067 vif
= ip6mr_find_vif(mrt
, skb
->dev
);
2069 int err
= ip6mr_cache_unresolved(mrt
, vif
, skb
);
2070 read_unlock(&mrt_lock
);
2074 read_unlock(&mrt_lock
);
2079 ip6_mr_forward(net
, mrt
, skb
, cache
);
2081 read_unlock(&mrt_lock
);
2087 static int __ip6mr_fill_mroute(struct mr6_table
*mrt
, struct sk_buff
*skb
,
2088 struct mfc6_cache
*c
, struct rtmsg
*rtm
)
2091 struct rtnexthop
*nhp
;
2092 u8
*b
= skb_tail_pointer(skb
);
2093 struct rtattr
*mp_head
;
2095 /* If cache is unresolved, don't try to parse IIF and OIF */
2096 if (c
->mf6c_parent
>= MAXMIFS
)
2099 if (MIF_EXISTS(mrt
, c
->mf6c_parent
))
2100 RTA_PUT(skb
, RTA_IIF
, 4, &mrt
->vif6_table
[c
->mf6c_parent
].dev
->ifindex
);
2102 mp_head
= (struct rtattr
*)skb_put(skb
, RTA_LENGTH(0));
2104 for (ct
= c
->mfc_un
.res
.minvif
; ct
< c
->mfc_un
.res
.maxvif
; ct
++) {
2105 if (MIF_EXISTS(mrt
, ct
) && c
->mfc_un
.res
.ttls
[ct
] < 255) {
2106 if (skb_tailroom(skb
) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp
)) + 4))
2107 goto rtattr_failure
;
2108 nhp
= (struct rtnexthop
*)skb_put(skb
, RTA_ALIGN(sizeof(*nhp
)));
2109 nhp
->rtnh_flags
= 0;
2110 nhp
->rtnh_hops
= c
->mfc_un
.res
.ttls
[ct
];
2111 nhp
->rtnh_ifindex
= mrt
->vif6_table
[ct
].dev
->ifindex
;
2112 nhp
->rtnh_len
= sizeof(*nhp
);
2115 mp_head
->rta_type
= RTA_MULTIPATH
;
2116 mp_head
->rta_len
= skb_tail_pointer(skb
) - (u8
*)mp_head
;
2117 rtm
->rtm_type
= RTN_MULTICAST
;
2125 int ip6mr_get_route(struct net
*net
,
2126 struct sk_buff
*skb
, struct rtmsg
*rtm
, int nowait
)
2129 struct mr6_table
*mrt
;
2130 struct mfc6_cache
*cache
;
2131 struct rt6_info
*rt
= (struct rt6_info
*)skb_dst(skb
);
2133 mrt
= ip6mr_get_table(net
, RT6_TABLE_DFLT
);
2137 read_lock(&mrt_lock
);
2138 cache
= ip6mr_cache_find(mrt
, &rt
->rt6i_src
.addr
, &rt
->rt6i_dst
.addr
);
2141 struct sk_buff
*skb2
;
2142 struct ipv6hdr
*iph
;
2143 struct net_device
*dev
;
2147 read_unlock(&mrt_lock
);
2152 if (dev
== NULL
|| (vif
= ip6mr_find_vif(mrt
, dev
)) < 0) {
2153 read_unlock(&mrt_lock
);
2157 /* really correct? */
2158 skb2
= alloc_skb(sizeof(struct ipv6hdr
), GFP_ATOMIC
);
2160 read_unlock(&mrt_lock
);
2164 skb_reset_transport_header(skb2
);
2166 skb_put(skb2
, sizeof(struct ipv6hdr
));
2167 skb_reset_network_header(skb2
);
2169 iph
= ipv6_hdr(skb2
);
2172 iph
->flow_lbl
[0] = 0;
2173 iph
->flow_lbl
[1] = 0;
2174 iph
->flow_lbl
[2] = 0;
2175 iph
->payload_len
= 0;
2176 iph
->nexthdr
= IPPROTO_NONE
;
2178 ipv6_addr_copy(&iph
->saddr
, &rt
->rt6i_src
.addr
);
2179 ipv6_addr_copy(&iph
->daddr
, &rt
->rt6i_dst
.addr
);
2181 err
= ip6mr_cache_unresolved(mrt
, vif
, skb2
);
2182 read_unlock(&mrt_lock
);
2187 if (!nowait
&& (rtm
->rtm_flags
&RTM_F_NOTIFY
))
2188 cache
->mfc_flags
|= MFC_NOTIFY
;
2190 err
= __ip6mr_fill_mroute(mrt
, skb
, cache
, rtm
);
2191 read_unlock(&mrt_lock
);
2195 static int ip6mr_fill_mroute(struct mr6_table
*mrt
, struct sk_buff
*skb
,
2196 u32 pid
, u32 seq
, struct mfc6_cache
*c
)
2198 struct nlmsghdr
*nlh
;
2201 nlh
= nlmsg_put(skb
, pid
, seq
, RTM_NEWROUTE
, sizeof(*rtm
), NLM_F_MULTI
);
2205 rtm
= nlmsg_data(nlh
);
2206 rtm
->rtm_family
= RTNL_FAMILY_IPMR
;
2207 rtm
->rtm_dst_len
= 128;
2208 rtm
->rtm_src_len
= 128;
2210 rtm
->rtm_table
= mrt
->id
;
2211 NLA_PUT_U32(skb
, RTA_TABLE
, mrt
->id
);
2212 rtm
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2213 rtm
->rtm_protocol
= RTPROT_UNSPEC
;
2216 NLA_PUT(skb
, RTA_SRC
, 16, &c
->mf6c_origin
);
2217 NLA_PUT(skb
, RTA_DST
, 16, &c
->mf6c_mcastgrp
);
2219 if (__ip6mr_fill_mroute(mrt
, skb
, c
, rtm
) < 0)
2220 goto nla_put_failure
;
2222 return nlmsg_end(skb
, nlh
);
2225 nlmsg_cancel(skb
, nlh
);
2229 static int ip6mr_rtm_dumproute(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2231 struct net
*net
= sock_net(skb
->sk
);
2232 struct mr6_table
*mrt
;
2233 struct mfc6_cache
*mfc
;
2234 unsigned int t
= 0, s_t
;
2235 unsigned int h
= 0, s_h
;
2236 unsigned int e
= 0, s_e
;
2242 read_lock(&mrt_lock
);
2243 ip6mr_for_each_table(mrt
, net
) {
2248 for (h
= s_h
; h
< MFC6_LINES
; h
++) {
2249 list_for_each_entry(mfc
, &mrt
->mfc6_cache_array
[h
], list
) {
2252 if (ip6mr_fill_mroute(mrt
, skb
,
2253 NETLINK_CB(cb
->skb
).pid
,
2267 read_unlock(&mrt_lock
);