2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <asm/system.h>
20 #include <asm/uaccess.h>
21 #include <linux/types.h>
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/timer.h>
26 #include <linux/kernel.h>
27 #include <linux/fcntl.h>
28 #include <linux/stat.h>
29 #include <linux/socket.h>
30 #include <linux/inet.h>
31 #include <linux/netdevice.h>
32 #include <linux/inetdevice.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35 #include <linux/init.h>
36 #include <net/protocol.h>
37 #include <linux/skbuff.h>
41 #include <linux/notifier.h>
42 #include <linux/if_arp.h>
43 #include <net/checksum.h>
44 #include <net/netlink.h>
47 #include <net/ip6_route.h>
48 #include <linux/mroute6.h>
49 #include <linux/pim.h>
50 #include <net/addrconf.h>
51 #include <linux/netfilter_ipv6.h>
53 struct sock
*mroute6_socket
;
56 /* Big lock, protecting vif table, mrt cache and mroute socket state.
57 Note that the changes are semaphored via rtnl_lock.
60 static DEFINE_RWLOCK(mrt_lock
);
63 * Multicast router control variables
66 static struct mif_device vif6_table
[MAXMIFS
]; /* Devices */
69 #define MIF_EXISTS(idx) (vif6_table[idx].dev != NULL)
71 static int mroute_do_assert
; /* Set in PIM assert */
72 #ifdef CONFIG_IPV6_PIMSM_V2
73 static int mroute_do_pim
;
75 #define mroute_do_pim 0
78 static struct mfc6_cache
*mfc6_cache_array
[MFC6_LINES
]; /* Forwarding cache */
80 static struct mfc6_cache
*mfc_unres_queue
; /* Queue of unresolved entries */
81 static atomic_t cache_resolve_queue_len
; /* Size of unresolved */
83 /* Special spinlock for queue of unresolved entries */
84 static DEFINE_SPINLOCK(mfc_unres_lock
);
86 /* We return to original Alan's scheme. Hash table of resolved
87 entries is changed only in process context and protected
88 with weak lock mrt_lock. Queue of unresolved entries is protected
89 with strong spinlock mfc_unres_lock.
91 In this case data path is free of exclusive locks at all.
94 static struct kmem_cache
*mrt_cachep __read_mostly
;
96 static int ip6_mr_forward(struct sk_buff
*skb
, struct mfc6_cache
*cache
);
97 static int ip6mr_cache_report(struct sk_buff
*pkt
, mifi_t mifi
, int assert);
98 static int ip6mr_fill_mroute(struct sk_buff
*skb
, struct mfc6_cache
*c
, struct rtmsg
*rtm
);
100 #ifdef CONFIG_IPV6_PIMSM_V2
101 static struct inet6_protocol pim6_protocol
;
104 static struct timer_list ipmr_expire_timer
;
107 #ifdef CONFIG_PROC_FS
109 struct ipmr_mfc_iter
{
110 struct mfc6_cache
**cache
;
115 static struct mfc6_cache
*ipmr_mfc_seq_idx(struct ipmr_mfc_iter
*it
, loff_t pos
)
117 struct mfc6_cache
*mfc
;
119 it
->cache
= mfc6_cache_array
;
120 read_lock(&mrt_lock
);
121 for (it
->ct
= 0; it
->ct
< ARRAY_SIZE(mfc6_cache_array
); it
->ct
++)
122 for (mfc
= mfc6_cache_array
[it
->ct
]; mfc
; mfc
= mfc
->next
)
125 read_unlock(&mrt_lock
);
127 it
->cache
= &mfc_unres_queue
;
128 spin_lock_bh(&mfc_unres_lock
);
129 for (mfc
= mfc_unres_queue
; mfc
; mfc
= mfc
->next
)
132 spin_unlock_bh(&mfc_unres_lock
);
142 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
145 struct ipmr_vif_iter
{
149 static struct mif_device
*ip6mr_vif_seq_idx(struct ipmr_vif_iter
*iter
,
152 for (iter
->ct
= 0; iter
->ct
< maxvif
; ++iter
->ct
) {
153 if (!MIF_EXISTS(iter
->ct
))
156 return &vif6_table
[iter
->ct
];
161 static void *ip6mr_vif_seq_start(struct seq_file
*seq
, loff_t
*pos
)
164 read_lock(&mrt_lock
);
165 return (*pos
? ip6mr_vif_seq_idx(seq
->private, *pos
- 1)
169 static void *ip6mr_vif_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
171 struct ipmr_vif_iter
*iter
= seq
->private;
174 if (v
== SEQ_START_TOKEN
)
175 return ip6mr_vif_seq_idx(iter
, 0);
177 while (++iter
->ct
< maxvif
) {
178 if (!MIF_EXISTS(iter
->ct
))
180 return &vif6_table
[iter
->ct
];
185 static void ip6mr_vif_seq_stop(struct seq_file
*seq
, void *v
)
188 read_unlock(&mrt_lock
);
191 static int ip6mr_vif_seq_show(struct seq_file
*seq
, void *v
)
193 if (v
== SEQ_START_TOKEN
) {
195 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
197 const struct mif_device
*vif
= v
;
198 const char *name
= vif
->dev
? vif
->dev
->name
: "none";
201 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
203 name
, vif
->bytes_in
, vif
->pkt_in
,
204 vif
->bytes_out
, vif
->pkt_out
,
210 static struct seq_operations ip6mr_vif_seq_ops
= {
211 .start
= ip6mr_vif_seq_start
,
212 .next
= ip6mr_vif_seq_next
,
213 .stop
= ip6mr_vif_seq_stop
,
214 .show
= ip6mr_vif_seq_show
,
217 static int ip6mr_vif_open(struct inode
*inode
, struct file
*file
)
219 return seq_open_private(file
, &ip6mr_vif_seq_ops
,
220 sizeof(struct ipmr_vif_iter
));
223 static struct file_operations ip6mr_vif_fops
= {
224 .owner
= THIS_MODULE
,
225 .open
= ip6mr_vif_open
,
228 .release
= seq_release
,
231 static void *ipmr_mfc_seq_start(struct seq_file
*seq
, loff_t
*pos
)
233 return (*pos
? ipmr_mfc_seq_idx(seq
->private, *pos
- 1)
237 static void *ipmr_mfc_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
239 struct mfc6_cache
*mfc
= v
;
240 struct ipmr_mfc_iter
*it
= seq
->private;
244 if (v
== SEQ_START_TOKEN
)
245 return ipmr_mfc_seq_idx(seq
->private, 0);
250 if (it
->cache
== &mfc_unres_queue
)
253 BUG_ON(it
->cache
!= mfc6_cache_array
);
255 while (++it
->ct
< ARRAY_SIZE(mfc6_cache_array
)) {
256 mfc
= mfc6_cache_array
[it
->ct
];
261 /* exhausted cache_array, show unresolved */
262 read_unlock(&mrt_lock
);
263 it
->cache
= &mfc_unres_queue
;
266 spin_lock_bh(&mfc_unres_lock
);
267 mfc
= mfc_unres_queue
;
272 spin_unlock_bh(&mfc_unres_lock
);
278 static void ipmr_mfc_seq_stop(struct seq_file
*seq
, void *v
)
280 struct ipmr_mfc_iter
*it
= seq
->private;
282 if (it
->cache
== &mfc_unres_queue
)
283 spin_unlock_bh(&mfc_unres_lock
);
284 else if (it
->cache
== mfc6_cache_array
)
285 read_unlock(&mrt_lock
);
288 static int ipmr_mfc_seq_show(struct seq_file
*seq
, void *v
)
292 if (v
== SEQ_START_TOKEN
) {
296 "Iif Pkts Bytes Wrong Oifs\n");
298 const struct mfc6_cache
*mfc
= v
;
299 const struct ipmr_mfc_iter
*it
= seq
->private;
302 NIP6_FMT
" " NIP6_FMT
" %-3d %8ld %8ld %8ld",
303 NIP6(mfc
->mf6c_mcastgrp
), NIP6(mfc
->mf6c_origin
),
306 mfc
->mfc_un
.res
.bytes
,
307 mfc
->mfc_un
.res
.wrong_if
);
309 if (it
->cache
!= &mfc_unres_queue
) {
310 for (n
= mfc
->mfc_un
.res
.minvif
;
311 n
< mfc
->mfc_un
.res
.maxvif
; n
++) {
313 mfc
->mfc_un
.res
.ttls
[n
] < 255)
316 n
, mfc
->mfc_un
.res
.ttls
[n
]);
324 static struct seq_operations ipmr_mfc_seq_ops
= {
325 .start
= ipmr_mfc_seq_start
,
326 .next
= ipmr_mfc_seq_next
,
327 .stop
= ipmr_mfc_seq_stop
,
328 .show
= ipmr_mfc_seq_show
,
331 static int ipmr_mfc_open(struct inode
*inode
, struct file
*file
)
333 return seq_open_private(file
, &ipmr_mfc_seq_ops
,
334 sizeof(struct ipmr_mfc_iter
));
337 static struct file_operations ip6mr_mfc_fops
= {
338 .owner
= THIS_MODULE
,
339 .open
= ipmr_mfc_open
,
342 .release
= seq_release
,
346 #ifdef CONFIG_IPV6_PIMSM_V2
347 static int reg_vif_num
= -1;
349 static int pim6_rcv(struct sk_buff
*skb
)
351 struct pimreghdr
*pim
;
352 struct ipv6hdr
*encap
;
353 struct net_device
*reg_dev
= NULL
;
355 if (!pskb_may_pull(skb
, sizeof(*pim
) + sizeof(*encap
)))
358 pim
= (struct pimreghdr
*)skb_transport_header(skb
);
359 if (pim
->type
!= ((PIM_VERSION
<< 4) | PIM_REGISTER
) ||
360 (pim
->flags
& PIM_NULL_REGISTER
) ||
361 (ip_compute_csum((void *)pim
, sizeof(*pim
)) != 0 &&
362 csum_fold(skb_checksum(skb
, 0, skb
->len
, 0))))
365 /* check if the inner packet is destined to mcast group */
366 encap
= (struct ipv6hdr
*)(skb_transport_header(skb
) +
369 if (!ipv6_addr_is_multicast(&encap
->daddr
) ||
370 encap
->payload_len
== 0 ||
371 ntohs(encap
->payload_len
) + sizeof(*pim
) > skb
->len
)
374 read_lock(&mrt_lock
);
375 if (reg_vif_num
>= 0)
376 reg_dev
= vif6_table
[reg_vif_num
].dev
;
379 read_unlock(&mrt_lock
);
384 skb
->mac_header
= skb
->network_header
;
385 skb_pull(skb
, (u8
*)encap
- skb
->data
);
386 skb_reset_network_header(skb
);
388 skb
->protocol
= htons(ETH_P_IP
);
390 skb
->pkt_type
= PACKET_HOST
;
391 dst_release(skb
->dst
);
392 ((struct net_device_stats
*)netdev_priv(reg_dev
))->rx_bytes
+= skb
->len
;
393 ((struct net_device_stats
*)netdev_priv(reg_dev
))->rx_packets
++;
404 static struct inet6_protocol pim6_protocol
= {
408 /* Service routines creating virtual interfaces: PIMREG */
410 static int reg_vif_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
412 read_lock(&mrt_lock
);
413 ((struct net_device_stats
*)netdev_priv(dev
))->tx_bytes
+= skb
->len
;
414 ((struct net_device_stats
*)netdev_priv(dev
))->tx_packets
++;
415 ip6mr_cache_report(skb
, reg_vif_num
, MRT6MSG_WHOLEPKT
);
416 read_unlock(&mrt_lock
);
421 static struct net_device_stats
*reg_vif_get_stats(struct net_device
*dev
)
423 return (struct net_device_stats
*)netdev_priv(dev
);
426 static void reg_vif_setup(struct net_device
*dev
)
428 dev
->type
= ARPHRD_PIMREG
;
429 dev
->mtu
= 1500 - sizeof(struct ipv6hdr
) - 8;
430 dev
->flags
= IFF_NOARP
;
431 dev
->hard_start_xmit
= reg_vif_xmit
;
432 dev
->get_stats
= reg_vif_get_stats
;
433 dev
->destructor
= free_netdev
;
436 static struct net_device
*ip6mr_reg_vif(void)
438 struct net_device
*dev
;
440 dev
= alloc_netdev(sizeof(struct net_device_stats
), "pim6reg",
446 if (register_netdevice(dev
)) {
458 /* allow the register to be completed before unregistering. */
462 unregister_netdevice(dev
);
471 static int mif6_delete(int vifi
)
473 struct mif_device
*v
;
474 struct net_device
*dev
;
475 if (vifi
< 0 || vifi
>= maxvif
)
476 return -EADDRNOTAVAIL
;
478 v
= &vif6_table
[vifi
];
480 write_lock_bh(&mrt_lock
);
485 write_unlock_bh(&mrt_lock
);
486 return -EADDRNOTAVAIL
;
489 #ifdef CONFIG_IPV6_PIMSM_V2
490 if (vifi
== reg_vif_num
)
494 if (vifi
+ 1 == maxvif
) {
496 for (tmp
= vifi
- 1; tmp
>= 0; tmp
--) {
503 write_unlock_bh(&mrt_lock
);
505 dev_set_allmulti(dev
, -1);
507 if (v
->flags
& MIFF_REGISTER
)
508 unregister_netdevice(dev
);
514 /* Destroy an unresolved cache entry, killing queued skbs
515 and reporting error to netlink readers.
518 static void ip6mr_destroy_unres(struct mfc6_cache
*c
)
522 atomic_dec(&cache_resolve_queue_len
);
524 while((skb
= skb_dequeue(&c
->mfc_un
.unres
.unresolved
)) != NULL
) {
525 if (ipv6_hdr(skb
)->version
== 0) {
526 struct nlmsghdr
*nlh
= (struct nlmsghdr
*)skb_pull(skb
, sizeof(struct ipv6hdr
));
527 nlh
->nlmsg_type
= NLMSG_ERROR
;
528 nlh
->nlmsg_len
= NLMSG_LENGTH(sizeof(struct nlmsgerr
));
529 skb_trim(skb
, nlh
->nlmsg_len
);
530 ((struct nlmsgerr
*)NLMSG_DATA(nlh
))->error
= -ETIMEDOUT
;
531 rtnl_unicast(skb
, NETLINK_CB(skb
).pid
);
536 kmem_cache_free(mrt_cachep
, c
);
540 /* Single timer process for all the unresolved queue. */
542 static void ipmr_do_expire_process(unsigned long dummy
)
544 unsigned long now
= jiffies
;
545 unsigned long expires
= 10 * HZ
;
546 struct mfc6_cache
*c
, **cp
;
548 cp
= &mfc_unres_queue
;
550 while ((c
= *cp
) != NULL
) {
551 if (time_after(c
->mfc_un
.unres
.expires
, now
)) {
553 unsigned long interval
= c
->mfc_un
.unres
.expires
- now
;
554 if (interval
< expires
)
561 ip6mr_destroy_unres(c
);
564 if (atomic_read(&cache_resolve_queue_len
))
565 mod_timer(&ipmr_expire_timer
, jiffies
+ expires
);
568 static void ipmr_expire_process(unsigned long dummy
)
570 if (!spin_trylock(&mfc_unres_lock
)) {
571 mod_timer(&ipmr_expire_timer
, jiffies
+ 1);
575 if (atomic_read(&cache_resolve_queue_len
))
576 ipmr_do_expire_process(dummy
);
578 spin_unlock(&mfc_unres_lock
);
581 /* Fill oifs list. It is called under write locked mrt_lock. */
583 static void ip6mr_update_thresholds(struct mfc6_cache
*cache
, unsigned char *ttls
)
587 cache
->mfc_un
.res
.minvif
= MAXMIFS
;
588 cache
->mfc_un
.res
.maxvif
= 0;
589 memset(cache
->mfc_un
.res
.ttls
, 255, MAXMIFS
);
591 for (vifi
= 0; vifi
< maxvif
; vifi
++) {
592 if (MIF_EXISTS(vifi
) && ttls
[vifi
] && ttls
[vifi
] < 255) {
593 cache
->mfc_un
.res
.ttls
[vifi
] = ttls
[vifi
];
594 if (cache
->mfc_un
.res
.minvif
> vifi
)
595 cache
->mfc_un
.res
.minvif
= vifi
;
596 if (cache
->mfc_un
.res
.maxvif
<= vifi
)
597 cache
->mfc_un
.res
.maxvif
= vifi
+ 1;
602 static int mif6_add(struct mif6ctl
*vifc
, int mrtsock
)
604 int vifi
= vifc
->mif6c_mifi
;
605 struct mif_device
*v
= &vif6_table
[vifi
];
606 struct net_device
*dev
;
609 if (MIF_EXISTS(vifi
))
612 switch (vifc
->mif6c_flags
) {
613 #ifdef CONFIG_IPV6_PIMSM_V2
616 * Special Purpose VIF in PIM
617 * All the packets will be sent to the daemon
619 if (reg_vif_num
>= 0)
621 dev
= ip6mr_reg_vif();
627 dev
= dev_get_by_index(vifc
->mif6c_pifi
);
629 return -EADDRNOTAVAIL
;
636 dev_set_allmulti(dev
, 1);
639 * Fill in the VIF structures
641 v
->rate_limit
= vifc
->vifc_rate_limit
;
642 v
->flags
= vifc
->mif6c_flags
;
644 v
->flags
|= VIFF_STATIC
;
645 v
->threshold
= vifc
->vifc_threshold
;
650 v
->link
= dev
->ifindex
;
651 if (v
->flags
& MIFF_REGISTER
)
652 v
->link
= dev
->iflink
;
654 /* And finish update writing critical data */
655 write_lock_bh(&mrt_lock
);
658 #ifdef CONFIG_IPV6_PIMSM_V2
659 if (v
->flags
& MIFF_REGISTER
)
662 if (vifi
+ 1 > maxvif
)
664 write_unlock_bh(&mrt_lock
);
668 static struct mfc6_cache
*ip6mr_cache_find(struct in6_addr
*origin
, struct in6_addr
*mcastgrp
)
670 int line
= MFC6_HASH(mcastgrp
, origin
);
671 struct mfc6_cache
*c
;
673 for (c
= mfc6_cache_array
[line
]; c
; c
= c
->next
) {
674 if (ipv6_addr_equal(&c
->mf6c_origin
, origin
) &&
675 ipv6_addr_equal(&c
->mf6c_mcastgrp
, mcastgrp
))
682 * Allocate a multicast cache entry
684 static struct mfc6_cache
*ip6mr_cache_alloc(void)
686 struct mfc6_cache
*c
= kmem_cache_alloc(mrt_cachep
, GFP_KERNEL
);
689 memset(c
, 0, sizeof(*c
));
690 c
->mfc_un
.res
.minvif
= MAXMIFS
;
694 static struct mfc6_cache
*ip6mr_cache_alloc_unres(void)
696 struct mfc6_cache
*c
= kmem_cache_alloc(mrt_cachep
, GFP_ATOMIC
);
699 memset(c
, 0, sizeof(*c
));
700 skb_queue_head_init(&c
->mfc_un
.unres
.unresolved
);
701 c
->mfc_un
.unres
.expires
= jiffies
+ 10 * HZ
;
706 * A cache entry has gone into a resolved state from queued
709 static void ip6mr_cache_resolve(struct mfc6_cache
*uc
, struct mfc6_cache
*c
)
714 * Play the pending entries through our router
717 while((skb
= __skb_dequeue(&uc
->mfc_un
.unres
.unresolved
))) {
718 if (ipv6_hdr(skb
)->version
== 0) {
720 struct nlmsghdr
*nlh
= (struct nlmsghdr
*)skb_pull(skb
, sizeof(struct ipv6hdr
));
722 if (ip6mr_fill_mroute(skb
, c
, NLMSG_DATA(nlh
)) > 0) {
723 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - (u8
*)nlh
;
725 nlh
->nlmsg_type
= NLMSG_ERROR
;
726 nlh
->nlmsg_len
= NLMSG_LENGTH(sizeof(struct nlmsgerr
));
727 skb_trim(skb
, nlh
->nlmsg_len
);
728 ((struct nlmsgerr
*)NLMSG_DATA(nlh
))->error
= -EMSGSIZE
;
730 err
= rtnl_unicast(skb
, NETLINK_CB(skb
).pid
);
732 ip6_mr_forward(skb
, c
);
737 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
738 * expects the following bizarre scheme.
740 * Called under mrt_lock.
743 static int ip6mr_cache_report(struct sk_buff
*pkt
, mifi_t mifi
, int assert)
749 #ifdef CONFIG_IPV6_PIMSM_V2
750 if (assert == MRT6MSG_WHOLEPKT
)
751 skb
= skb_realloc_headroom(pkt
, -skb_network_offset(pkt
)
755 skb
= alloc_skb(sizeof(struct ipv6hdr
) + sizeof(*msg
), GFP_ATOMIC
);
760 /* I suppose that internal messages
761 * do not require checksums */
763 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
765 #ifdef CONFIG_IPV6_PIMSM_V2
766 if (assert == MRT6MSG_WHOLEPKT
) {
767 /* Ugly, but we have no choice with this interface.
768 Duplicate old header, fix length etc.
769 And all this only to mangle msg->im6_msgtype and
770 to set msg->im6_mbz to "mbz" :-)
772 skb_push(skb
, -skb_network_offset(pkt
));
774 skb_push(skb
, sizeof(*msg
));
775 skb_reset_transport_header(skb
);
776 msg
= (struct mrt6msg
*)skb_transport_header(skb
);
778 msg
->im6_msgtype
= MRT6MSG_WHOLEPKT
;
779 msg
->im6_mif
= reg_vif_num
;
781 ipv6_addr_copy(&msg
->im6_src
, &ipv6_hdr(pkt
)->saddr
);
782 ipv6_addr_copy(&msg
->im6_dst
, &ipv6_hdr(pkt
)->daddr
);
784 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
792 skb_put(skb
, sizeof(struct ipv6hdr
));
793 skb_reset_network_header(skb
);
794 skb_copy_to_linear_data(skb
, ipv6_hdr(pkt
), sizeof(struct ipv6hdr
));
799 skb_put(skb
, sizeof(*msg
));
800 skb_reset_transport_header(skb
);
801 msg
= (struct mrt6msg
*)skb_transport_header(skb
);
804 msg
->im6_msgtype
= assert;
807 ipv6_addr_copy(&msg
->im6_src
, &ipv6_hdr(pkt
)->saddr
);
808 ipv6_addr_copy(&msg
->im6_dst
, &ipv6_hdr(pkt
)->daddr
);
810 skb
->dst
= dst_clone(pkt
->dst
);
811 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
813 skb_pull(skb
, sizeof(struct ipv6hdr
));
816 if (mroute6_socket
== NULL
) {
822 * Deliver to user space multicast routing algorithms
824 if ((ret
= sock_queue_rcv_skb(mroute6_socket
, skb
)) < 0) {
826 printk(KERN_WARNING
"mroute6: pending queue full, dropping entries.\n");
834 * Queue a packet for resolution. It gets locked cache entry!
838 ip6mr_cache_unresolved(mifi_t mifi
, struct sk_buff
*skb
)
841 struct mfc6_cache
*c
;
843 spin_lock_bh(&mfc_unres_lock
);
844 for (c
= mfc_unres_queue
; c
; c
= c
->next
) {
845 if (ipv6_addr_equal(&c
->mf6c_mcastgrp
, &ipv6_hdr(skb
)->daddr
) &&
846 ipv6_addr_equal(&c
->mf6c_origin
, &ipv6_hdr(skb
)->saddr
))
852 * Create a new entry if allowable
855 if (atomic_read(&cache_resolve_queue_len
) >= 10 ||
856 (c
= ip6mr_cache_alloc_unres()) == NULL
) {
857 spin_unlock_bh(&mfc_unres_lock
);
864 * Fill in the new cache entry
867 c
->mf6c_origin
= ipv6_hdr(skb
)->saddr
;
868 c
->mf6c_mcastgrp
= ipv6_hdr(skb
)->daddr
;
871 * Reflect first query at pim6sd
873 if ((err
= ip6mr_cache_report(skb
, mifi
, MRT6MSG_NOCACHE
)) < 0) {
874 /* If the report failed throw the cache entry
877 spin_unlock_bh(&mfc_unres_lock
);
879 kmem_cache_free(mrt_cachep
, c
);
884 atomic_inc(&cache_resolve_queue_len
);
885 c
->next
= mfc_unres_queue
;
888 ipmr_do_expire_process(1);
892 * See if we can append the packet
894 if (c
->mfc_un
.unres
.unresolved
.qlen
> 3) {
898 skb_queue_tail(&c
->mfc_un
.unres
.unresolved
, skb
);
902 spin_unlock_bh(&mfc_unres_lock
);
907 * MFC6 cache manipulation by user space
910 static int ip6mr_mfc_delete(struct mf6cctl
*mfc
)
913 struct mfc6_cache
*c
, **cp
;
915 line
= MFC6_HASH(&mfc
->mf6cc_mcastgrp
.sin6_addr
, &mfc
->mf6cc_origin
.sin6_addr
);
917 for (cp
= &mfc6_cache_array
[line
]; (c
= *cp
) != NULL
; cp
= &c
->next
) {
918 if (ipv6_addr_equal(&c
->mf6c_origin
, &mfc
->mf6cc_origin
.sin6_addr
) &&
919 ipv6_addr_equal(&c
->mf6c_mcastgrp
, &mfc
->mf6cc_mcastgrp
.sin6_addr
)) {
920 write_lock_bh(&mrt_lock
);
922 write_unlock_bh(&mrt_lock
);
924 kmem_cache_free(mrt_cachep
, c
);
931 static int ip6mr_device_event(struct notifier_block
*this,
932 unsigned long event
, void *ptr
)
934 struct net_device
*dev
= ptr
;
935 struct mif_device
*v
;
938 if (event
!= NETDEV_UNREGISTER
)
942 for (ct
= 0; ct
< maxvif
; ct
++, v
++) {
949 static struct notifier_block ip6_mr_notifier
= {
950 .notifier_call
= ip6mr_device_event
954 * Setup for IP multicast routing
957 void __init
ip6_mr_init(void)
959 mrt_cachep
= kmem_cache_create("ip6_mrt_cache",
960 sizeof(struct mfc6_cache
),
961 0, SLAB_HWCACHE_ALIGN
,
964 panic("cannot allocate ip6_mrt_cache");
966 setup_timer(&ipmr_expire_timer
, ipmr_expire_process
, 0);
967 register_netdevice_notifier(&ip6_mr_notifier
);
968 #ifdef CONFIG_PROC_FS
969 proc_net_fops_create("ip6_mr_vif", 0, &ip6mr_vif_fops
);
970 proc_net_fops_create("ip6_mr_cache", 0, &ip6mr_mfc_fops
);
975 static int ip6mr_mfc_add(struct mf6cctl
*mfc
, int mrtsock
)
978 struct mfc6_cache
*uc
, *c
, **cp
;
979 unsigned char ttls
[MAXMIFS
];
982 memset(ttls
, 255, MAXMIFS
);
983 for (i
= 0; i
< MAXMIFS
; i
++) {
984 if (IF_ISSET(i
, &mfc
->mf6cc_ifset
))
989 line
= MFC6_HASH(&mfc
->mf6cc_mcastgrp
.sin6_addr
, &mfc
->mf6cc_origin
.sin6_addr
);
991 for (cp
= &mfc6_cache_array
[line
]; (c
= *cp
) != NULL
; cp
= &c
->next
) {
992 if (ipv6_addr_equal(&c
->mf6c_origin
, &mfc
->mf6cc_origin
.sin6_addr
) &&
993 ipv6_addr_equal(&c
->mf6c_mcastgrp
, &mfc
->mf6cc_mcastgrp
.sin6_addr
))
998 write_lock_bh(&mrt_lock
);
999 c
->mf6c_parent
= mfc
->mf6cc_parent
;
1000 ip6mr_update_thresholds(c
, ttls
);
1002 c
->mfc_flags
|= MFC_STATIC
;
1003 write_unlock_bh(&mrt_lock
);
1007 if (!ipv6_addr_is_multicast(&mfc
->mf6cc_mcastgrp
.sin6_addr
))
1010 c
= ip6mr_cache_alloc();
1014 c
->mf6c_origin
= mfc
->mf6cc_origin
.sin6_addr
;
1015 c
->mf6c_mcastgrp
= mfc
->mf6cc_mcastgrp
.sin6_addr
;
1016 c
->mf6c_parent
= mfc
->mf6cc_parent
;
1017 ip6mr_update_thresholds(c
, ttls
);
1019 c
->mfc_flags
|= MFC_STATIC
;
1021 write_lock_bh(&mrt_lock
);
1022 c
->next
= mfc6_cache_array
[line
];
1023 mfc6_cache_array
[line
] = c
;
1024 write_unlock_bh(&mrt_lock
);
1027 * Check to see if we resolved a queued list. If so we
1028 * need to send on the frames and tidy up.
1030 spin_lock_bh(&mfc_unres_lock
);
1031 for (cp
= &mfc_unres_queue
; (uc
= *cp
) != NULL
;
1033 if (ipv6_addr_equal(&uc
->mf6c_origin
, &c
->mf6c_origin
) &&
1034 ipv6_addr_equal(&uc
->mf6c_mcastgrp
, &c
->mf6c_mcastgrp
)) {
1036 if (atomic_dec_and_test(&cache_resolve_queue_len
))
1037 del_timer(&ipmr_expire_timer
);
1041 spin_unlock_bh(&mfc_unres_lock
);
1044 ip6mr_cache_resolve(uc
, c
);
1045 kmem_cache_free(mrt_cachep
, uc
);
1051 * Close the multicast socket, and clear the vif tables etc
1054 static void mroute_clean_tables(struct sock
*sk
)
1059 * Shut down all active vif entries
1061 for (i
= 0; i
< maxvif
; i
++) {
1062 if (!(vif6_table
[i
].flags
& VIFF_STATIC
))
1069 for (i
= 0; i
< ARRAY_SIZE(mfc6_cache_array
); i
++) {
1070 struct mfc6_cache
*c
, **cp
;
1072 cp
= &mfc6_cache_array
[i
];
1073 while ((c
= *cp
) != NULL
) {
1074 if (c
->mfc_flags
& MFC_STATIC
) {
1078 write_lock_bh(&mrt_lock
);
1080 write_unlock_bh(&mrt_lock
);
1082 kmem_cache_free(mrt_cachep
, c
);
1086 if (atomic_read(&cache_resolve_queue_len
) != 0) {
1087 struct mfc6_cache
*c
;
1089 spin_lock_bh(&mfc_unres_lock
);
1090 while (mfc_unres_queue
!= NULL
) {
1091 c
= mfc_unres_queue
;
1092 mfc_unres_queue
= c
->next
;
1093 spin_unlock_bh(&mfc_unres_lock
);
1095 ip6mr_destroy_unres(c
);
1097 spin_lock_bh(&mfc_unres_lock
);
1099 spin_unlock_bh(&mfc_unres_lock
);
1103 static int ip6mr_sk_init(struct sock
*sk
)
1108 write_lock_bh(&mrt_lock
);
1109 if (likely(mroute6_socket
== NULL
))
1110 mroute6_socket
= sk
;
1113 write_unlock_bh(&mrt_lock
);
1120 int ip6mr_sk_done(struct sock
*sk
)
1125 if (sk
== mroute6_socket
) {
1126 write_lock_bh(&mrt_lock
);
1127 mroute6_socket
= NULL
;
1128 write_unlock_bh(&mrt_lock
);
1130 mroute_clean_tables(sk
);
1139 * Socket options and virtual interface manipulation. The whole
1140 * virtual interface system is a complete heap, but unfortunately
1141 * that's how BSD mrouted happens to think. Maybe one day with a proper
1142 * MOSPF/PIM router set up we can clean this up.
1145 int ip6_mroute_setsockopt(struct sock
*sk
, int optname
, char __user
*optval
, int optlen
)
1152 if (optname
!= MRT6_INIT
) {
1153 if (sk
!= mroute6_socket
&& !capable(CAP_NET_ADMIN
))
1159 if (sk
->sk_type
!= SOCK_RAW
||
1160 inet_sk(sk
)->num
!= IPPROTO_ICMPV6
)
1162 if (optlen
< sizeof(int))
1165 return ip6mr_sk_init(sk
);
1168 return ip6mr_sk_done(sk
);
1171 if (optlen
< sizeof(vif
))
1173 if (copy_from_user(&vif
, optval
, sizeof(vif
)))
1175 if (vif
.mif6c_mifi
>= MAXMIFS
)
1178 ret
= mif6_add(&vif
, sk
== mroute6_socket
);
1183 if (optlen
< sizeof(mifi_t
))
1185 if (copy_from_user(&mifi
, optval
, sizeof(mifi_t
)))
1188 ret
= mif6_delete(mifi
);
1193 * Manipulate the forwarding caches. These live
1194 * in a sort of kernel/user symbiosis.
1198 if (optlen
< sizeof(mfc
))
1200 if (copy_from_user(&mfc
, optval
, sizeof(mfc
)))
1203 if (optname
== MRT6_DEL_MFC
)
1204 ret
= ip6mr_mfc_delete(&mfc
);
1206 ret
= ip6mr_mfc_add(&mfc
, sk
== mroute6_socket
);
1211 * Control PIM assert (to activate pim will activate assert)
1216 if (get_user(v
, (int __user
*)optval
))
1218 mroute_do_assert
= !!v
;
1222 #ifdef CONFIG_IPV6_PIMSM_V2
1226 if (get_user(v
, (int __user
*)optval
))
1231 if (v
!= mroute_do_pim
) {
1233 mroute_do_assert
= v
;
1235 ret
= inet6_add_protocol(&pim6_protocol
,
1238 ret
= inet6_del_protocol(&pim6_protocol
,
1249 * Spurious command, or MRT6_VERSION which you cannot
1253 return -ENOPROTOOPT
;
1258 * Getsock opt support for the multicast routing system.
1261 int ip6_mroute_getsockopt(struct sock
*sk
, int optname
, char __user
*optval
,
1271 #ifdef CONFIG_IPV6_PIMSM_V2
1273 val
= mroute_do_pim
;
1277 val
= mroute_do_assert
;
1280 return -ENOPROTOOPT
;
1283 if (get_user(olr
, optlen
))
1286 olr
= min_t(int, olr
, sizeof(int));
1290 if (put_user(olr
, optlen
))
1292 if (copy_to_user(optval
, &val
, olr
))
1298 * The IP multicast ioctl support routines.
1301 int ip6mr_ioctl(struct sock
*sk
, int cmd
, void __user
*arg
)
1303 struct sioc_sg_req6 sr
;
1304 struct sioc_mif_req6 vr
;
1305 struct mif_device
*vif
;
1306 struct mfc6_cache
*c
;
1309 case SIOCGETMIFCNT_IN6
:
1310 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1312 if (vr
.mifi
>= maxvif
)
1314 read_lock(&mrt_lock
);
1315 vif
= &vif6_table
[vr
.mifi
];
1316 if (MIF_EXISTS(vr
.mifi
)) {
1317 vr
.icount
= vif
->pkt_in
;
1318 vr
.ocount
= vif
->pkt_out
;
1319 vr
.ibytes
= vif
->bytes_in
;
1320 vr
.obytes
= vif
->bytes_out
;
1321 read_unlock(&mrt_lock
);
1323 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1327 read_unlock(&mrt_lock
);
1328 return -EADDRNOTAVAIL
;
1329 case SIOCGETSGCNT_IN6
:
1330 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1333 read_lock(&mrt_lock
);
1334 c
= ip6mr_cache_find(&sr
.src
.sin6_addr
, &sr
.grp
.sin6_addr
);
1336 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1337 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1338 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1339 read_unlock(&mrt_lock
);
1341 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1345 read_unlock(&mrt_lock
);
1346 return -EADDRNOTAVAIL
;
1348 return -ENOIOCTLCMD
;
1353 static inline int ip6mr_forward2_finish(struct sk_buff
*skb
)
1355 IP6_INC_STATS_BH(ip6_dst_idev(skb
->dst
), IPSTATS_MIB_OUTFORWDATAGRAMS
);
1356 return dst_output(skb
);
1360 * Processing handlers for ip6mr_forward
1363 static int ip6mr_forward2(struct sk_buff
*skb
, struct mfc6_cache
*c
, int vifi
)
1365 struct ipv6hdr
*ipv6h
;
1366 struct mif_device
*vif
= &vif6_table
[vifi
];
1367 struct net_device
*dev
;
1368 struct dst_entry
*dst
;
1371 if (vif
->dev
== NULL
)
1374 #ifdef CONFIG_IPV6_PIMSM_V2
1375 if (vif
->flags
& MIFF_REGISTER
) {
1377 vif
->bytes_out
+= skb
->len
;
1378 ((struct net_device_stats
*)netdev_priv(vif
->dev
))->tx_bytes
+= skb
->len
;
1379 ((struct net_device_stats
*)netdev_priv(vif
->dev
))->tx_packets
++;
1380 ip6mr_cache_report(skb
, vifi
, MRT6MSG_WHOLEPKT
);
1386 ipv6h
= ipv6_hdr(skb
);
1388 fl
= (struct flowi
) {
1391 { .daddr
= ipv6h
->daddr
, }
1395 dst
= ip6_route_output(NULL
, &fl
);
1399 dst_release(skb
->dst
);
1403 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1404 * not only before forwarding, but after forwarding on all output
1405 * interfaces. It is clear, if mrouter runs a multicasting
1406 * program, it should receive packets not depending to what interface
1407 * program is joined.
1408 * If we will not make it, the program will have to join on all
1409 * interfaces. On the other hand, multihoming host (or router, but
1410 * not mrouter) cannot join to more than one interface - it will
1411 * result in receiving multiple packets.
1416 vif
->bytes_out
+= skb
->len
;
1418 /* We are about to write */
1419 /* XXX: extension headers? */
1420 if (skb_cow(skb
, sizeof(*ipv6h
) + LL_RESERVED_SPACE(dev
)))
1423 ipv6h
= ipv6_hdr(skb
);
1426 IP6CB(skb
)->flags
|= IP6SKB_FORWARDED
;
1428 return NF_HOOK(PF_INET6
, NF_IP6_FORWARD
, skb
, skb
->dev
, dev
,
1429 ip6mr_forward2_finish
);
1436 static int ip6mr_find_vif(struct net_device
*dev
)
1439 for (ct
= maxvif
- 1; ct
>= 0; ct
--) {
1440 if (vif6_table
[ct
].dev
== dev
)
1446 static int ip6_mr_forward(struct sk_buff
*skb
, struct mfc6_cache
*cache
)
1451 vif
= cache
->mf6c_parent
;
1452 cache
->mfc_un
.res
.pkt
++;
1453 cache
->mfc_un
.res
.bytes
+= skb
->len
;
1456 * Wrong interface: drop packet and (maybe) send PIM assert.
1458 if (vif6_table
[vif
].dev
!= skb
->dev
) {
1461 cache
->mfc_un
.res
.wrong_if
++;
1462 true_vifi
= ip6mr_find_vif(skb
->dev
);
1464 if (true_vifi
>= 0 && mroute_do_assert
&&
1465 /* pimsm uses asserts, when switching from RPT to SPT,
1466 so that we cannot check that packet arrived on an oif.
1467 It is bad, but otherwise we would need to move pretty
1468 large chunk of pimd to kernel. Ough... --ANK
1470 (mroute_do_pim
|| cache
->mfc_un
.res
.ttls
[true_vifi
] < 255) &&
1472 cache
->mfc_un
.res
.last_assert
+ MFC_ASSERT_THRESH
)) {
1473 cache
->mfc_un
.res
.last_assert
= jiffies
;
1474 ip6mr_cache_report(skb
, true_vifi
, MRT6MSG_WRONGMIF
);
1479 vif6_table
[vif
].pkt_in
++;
1480 vif6_table
[vif
].bytes_in
+= skb
->len
;
1485 for (ct
= cache
->mfc_un
.res
.maxvif
- 1; ct
>= cache
->mfc_un
.res
.minvif
; ct
--) {
1486 if (ipv6_hdr(skb
)->hop_limit
> cache
->mfc_un
.res
.ttls
[ct
]) {
1488 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
1490 ip6mr_forward2(skb2
, cache
, psend
);
1496 ip6mr_forward2(skb
, cache
, psend
);
1507 * Multicast packets for forwarding arrive here
1510 int ip6_mr_input(struct sk_buff
*skb
)
1512 struct mfc6_cache
*cache
;
1514 read_lock(&mrt_lock
);
1515 cache
= ip6mr_cache_find(&ipv6_hdr(skb
)->saddr
, &ipv6_hdr(skb
)->daddr
);
1518 * No usable cache entry
1520 if (cache
== NULL
) {
1523 vif
= ip6mr_find_vif(skb
->dev
);
1525 int err
= ip6mr_cache_unresolved(vif
, skb
);
1526 read_unlock(&mrt_lock
);
1530 read_unlock(&mrt_lock
);
1535 ip6_mr_forward(skb
, cache
);
1537 read_unlock(&mrt_lock
);
1544 ip6mr_fill_mroute(struct sk_buff
*skb
, struct mfc6_cache
*c
, struct rtmsg
*rtm
)
1547 struct rtnexthop
*nhp
;
1548 struct net_device
*dev
= vif6_table
[c
->mf6c_parent
].dev
;
1549 u8
*b
= skb_tail_pointer(skb
);
1550 struct rtattr
*mp_head
;
1553 RTA_PUT(skb
, RTA_IIF
, 4, &dev
->ifindex
);
1555 mp_head
= (struct rtattr
*)skb_put(skb
, RTA_LENGTH(0));
1557 for (ct
= c
->mfc_un
.res
.minvif
; ct
< c
->mfc_un
.res
.maxvif
; ct
++) {
1558 if (c
->mfc_un
.res
.ttls
[ct
] < 255) {
1559 if (skb_tailroom(skb
) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp
)) + 4))
1560 goto rtattr_failure
;
1561 nhp
= (struct rtnexthop
*)skb_put(skb
, RTA_ALIGN(sizeof(*nhp
)));
1562 nhp
->rtnh_flags
= 0;
1563 nhp
->rtnh_hops
= c
->mfc_un
.res
.ttls
[ct
];
1564 nhp
->rtnh_ifindex
= vif6_table
[ct
].dev
->ifindex
;
1565 nhp
->rtnh_len
= sizeof(*nhp
);
1568 mp_head
->rta_type
= RTA_MULTIPATH
;
1569 mp_head
->rta_len
= skb_tail_pointer(skb
) - (u8
*)mp_head
;
1570 rtm
->rtm_type
= RTN_MULTICAST
;
1578 int ip6mr_get_route(struct sk_buff
*skb
, struct rtmsg
*rtm
, int nowait
)
1581 struct mfc6_cache
*cache
;
1582 struct rt6_info
*rt
= (struct rt6_info
*)skb
->dst
;
1584 read_lock(&mrt_lock
);
1585 cache
= ip6mr_cache_find(&rt
->rt6i_src
.addr
, &rt
->rt6i_dst
.addr
);
1588 struct sk_buff
*skb2
;
1589 struct ipv6hdr
*iph
;
1590 struct net_device
*dev
;
1594 read_unlock(&mrt_lock
);
1599 if (dev
== NULL
|| (vif
= ip6mr_find_vif(dev
)) < 0) {
1600 read_unlock(&mrt_lock
);
1604 /* really correct? */
1605 skb2
= alloc_skb(sizeof(struct ipv6hdr
), GFP_ATOMIC
);
1607 read_unlock(&mrt_lock
);
1611 skb_reset_transport_header(skb2
);
1613 skb_put(skb2
, sizeof(struct ipv6hdr
));
1614 skb_reset_network_header(skb2
);
1616 iph
= ipv6_hdr(skb2
);
1619 iph
->flow_lbl
[0] = 0;
1620 iph
->flow_lbl
[1] = 0;
1621 iph
->flow_lbl
[2] = 0;
1622 iph
->payload_len
= 0;
1623 iph
->nexthdr
= IPPROTO_NONE
;
1625 ipv6_addr_copy(&iph
->saddr
, &rt
->rt6i_src
.addr
);
1626 ipv6_addr_copy(&iph
->daddr
, &rt
->rt6i_dst
.addr
);
1628 err
= ip6mr_cache_unresolved(vif
, skb2
);
1629 read_unlock(&mrt_lock
);
1634 if (!nowait
&& (rtm
->rtm_flags
&RTM_F_NOTIFY
))
1635 cache
->mfc_flags
|= MFC_NOTIFY
;
1637 err
= ip6mr_fill_mroute(skb
, cache
, rtm
);
1638 read_unlock(&mrt_lock
);