2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requrement to work with older peers.
29 #include <asm/system.h>
30 #include <asm/uaccess.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/timer.h>
36 #include <linux/kernel.h>
37 #include <linux/fcntl.h>
38 #include <linux/stat.h>
39 #include <linux/socket.h>
41 #include <linux/inet.h>
42 #include <linux/netdevice.h>
43 #include <linux/inetdevice.h>
44 #include <linux/igmp.h>
45 #include <linux/proc_fs.h>
46 #include <linux/seq_file.h>
47 #include <linux/mroute.h>
48 #include <linux/init.h>
49 #include <linux/if_ether.h>
50 #include <net/net_namespace.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54 #include <net/route.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
63 #include <net/checksum.h>
64 #include <net/netlink.h>
66 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
67 #define CONFIG_IP_PIMSM 1
70 static struct sock
*mroute_socket
;
73 /* Big lock, protecting vif table, mrt cache and mroute socket state.
74 Note that the changes are semaphored via rtnl_lock.
77 static DEFINE_RWLOCK(mrt_lock
);
80 * Multicast router control variables
83 static struct vif_device vif_table
[MAXVIFS
]; /* Devices */
86 #define VIF_EXISTS(idx) (vif_table[idx].dev != NULL)
88 static int mroute_do_assert
; /* Set in PIM assert */
89 static int mroute_do_pim
;
91 static struct mfc_cache
*mfc_cache_array
[MFC_LINES
]; /* Forwarding cache */
93 static struct mfc_cache
*mfc_unres_queue
; /* Queue of unresolved entries */
94 static atomic_t cache_resolve_queue_len
; /* Size of unresolved */
96 /* Special spinlock for queue of unresolved entries */
97 static DEFINE_SPINLOCK(mfc_unres_lock
);
99 /* We return to original Alan's scheme. Hash table of resolved
100 entries is changed only in process context and protected
101 with weak lock mrt_lock. Queue of unresolved entries is protected
102 with strong spinlock mfc_unres_lock.
104 In this case data path is free of exclusive locks at all.
107 static struct kmem_cache
*mrt_cachep __read_mostly
;
109 static int ip_mr_forward(struct sk_buff
*skb
, struct mfc_cache
*cache
, int local
);
110 static int ipmr_cache_report(struct sk_buff
*pkt
, vifi_t vifi
, int assert);
111 static int ipmr_fill_mroute(struct sk_buff
*skb
, struct mfc_cache
*c
, struct rtmsg
*rtm
);
113 #ifdef CONFIG_IP_PIMSM_V2
114 static struct net_protocol pim_protocol
;
117 static struct timer_list ipmr_expire_timer
;
119 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
121 static void ipmr_del_tunnel(struct net_device
*dev
, struct vifctl
*v
)
125 dev
= __dev_get_by_name(&init_net
, "tunl0");
127 const struct net_device_ops
*ops
= dev
->netdev_ops
;
129 struct ip_tunnel_parm p
;
131 memset(&p
, 0, sizeof(p
));
132 p
.iph
.daddr
= v
->vifc_rmt_addr
.s_addr
;
133 p
.iph
.saddr
= v
->vifc_lcl_addr
.s_addr
;
136 p
.iph
.protocol
= IPPROTO_IPIP
;
137 sprintf(p
.name
, "dvmrp%d", v
->vifc_vifi
);
138 ifr
.ifr_ifru
.ifru_data
= (__force
void __user
*)&p
;
140 if (ops
->ndo_do_ioctl
) {
141 mm_segment_t oldfs
= get_fs();
144 ops
->ndo_do_ioctl(dev
, &ifr
, SIOCDELTUNNEL
);
151 struct net_device
*ipmr_new_tunnel(struct vifctl
*v
)
153 struct net_device
*dev
;
155 dev
= __dev_get_by_name(&init_net
, "tunl0");
158 const struct net_device_ops
*ops
= dev
->netdev_ops
;
161 struct ip_tunnel_parm p
;
162 struct in_device
*in_dev
;
164 memset(&p
, 0, sizeof(p
));
165 p
.iph
.daddr
= v
->vifc_rmt_addr
.s_addr
;
166 p
.iph
.saddr
= v
->vifc_lcl_addr
.s_addr
;
169 p
.iph
.protocol
= IPPROTO_IPIP
;
170 sprintf(p
.name
, "dvmrp%d", v
->vifc_vifi
);
171 ifr
.ifr_ifru
.ifru_data
= (__force
void __user
*)&p
;
173 if (ops
->ndo_do_ioctl
) {
174 mm_segment_t oldfs
= get_fs();
177 err
= ops
->ndo_do_ioctl(dev
, &ifr
, SIOCADDTUNNEL
);
184 if (err
== 0 && (dev
= __dev_get_by_name(&init_net
, p
.name
)) != NULL
) {
185 dev
->flags
|= IFF_MULTICAST
;
187 in_dev
= __in_dev_get_rtnl(dev
);
191 ipv4_devconf_setall(in_dev
);
192 IPV4_DEVCONF(in_dev
->cnf
, RP_FILTER
) = 0;
202 /* allow the register to be completed before unregistering. */
206 unregister_netdevice(dev
);
210 #ifdef CONFIG_IP_PIMSM
212 static int reg_vif_num
= -1;
214 static int reg_vif_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
216 read_lock(&mrt_lock
);
217 dev
->stats
.tx_bytes
+= skb
->len
;
218 dev
->stats
.tx_packets
++;
219 ipmr_cache_report(skb
, reg_vif_num
, IGMPMSG_WHOLEPKT
);
220 read_unlock(&mrt_lock
);
225 static const struct net_device_ops reg_vif_netdev_ops
= {
226 .ndo_start_xmit
= reg_vif_xmit
,
229 static void reg_vif_setup(struct net_device
*dev
)
231 dev
->type
= ARPHRD_PIMREG
;
232 dev
->mtu
= ETH_DATA_LEN
- sizeof(struct iphdr
) - 8;
233 dev
->flags
= IFF_NOARP
;
234 dev
->netdev_ops
= ®_vif_netdev_ops
,
235 dev
->destructor
= free_netdev
;
238 static struct net_device
*ipmr_reg_vif(void)
240 struct net_device
*dev
;
241 struct in_device
*in_dev
;
243 dev
= alloc_netdev(0, "pimreg", reg_vif_setup
);
248 if (register_netdevice(dev
)) {
255 if ((in_dev
= __in_dev_get_rcu(dev
)) == NULL
) {
260 ipv4_devconf_setall(in_dev
);
261 IPV4_DEVCONF(in_dev
->cnf
, RP_FILTER
) = 0;
272 /* allow the register to be completed before unregistering. */
276 unregister_netdevice(dev
);
283 * @notify: Set to 1, if the caller is a notifier_call
286 static int vif_delete(int vifi
, int notify
)
288 struct vif_device
*v
;
289 struct net_device
*dev
;
290 struct in_device
*in_dev
;
292 if (vifi
< 0 || vifi
>= maxvif
)
293 return -EADDRNOTAVAIL
;
295 v
= &vif_table
[vifi
];
297 write_lock_bh(&mrt_lock
);
302 write_unlock_bh(&mrt_lock
);
303 return -EADDRNOTAVAIL
;
306 #ifdef CONFIG_IP_PIMSM
307 if (vifi
== reg_vif_num
)
311 if (vifi
+1 == maxvif
) {
313 for (tmp
=vifi
-1; tmp
>=0; tmp
--) {
320 write_unlock_bh(&mrt_lock
);
322 dev_set_allmulti(dev
, -1);
324 if ((in_dev
= __in_dev_get_rtnl(dev
)) != NULL
) {
325 IPV4_DEVCONF(in_dev
->cnf
, MC_FORWARDING
)--;
326 ip_rt_multicast_event(in_dev
);
329 if (v
->flags
&(VIFF_TUNNEL
|VIFF_REGISTER
) && !notify
)
330 unregister_netdevice(dev
);
336 /* Destroy an unresolved cache entry, killing queued skbs
337 and reporting error to netlink readers.
340 static void ipmr_destroy_unres(struct mfc_cache
*c
)
345 atomic_dec(&cache_resolve_queue_len
);
347 while ((skb
= skb_dequeue(&c
->mfc_un
.unres
.unresolved
))) {
348 if (ip_hdr(skb
)->version
== 0) {
349 struct nlmsghdr
*nlh
= (struct nlmsghdr
*)skb_pull(skb
, sizeof(struct iphdr
));
350 nlh
->nlmsg_type
= NLMSG_ERROR
;
351 nlh
->nlmsg_len
= NLMSG_LENGTH(sizeof(struct nlmsgerr
));
352 skb_trim(skb
, nlh
->nlmsg_len
);
354 e
->error
= -ETIMEDOUT
;
355 memset(&e
->msg
, 0, sizeof(e
->msg
));
357 rtnl_unicast(skb
, &init_net
, NETLINK_CB(skb
).pid
);
362 kmem_cache_free(mrt_cachep
, c
);
366 /* Single timer process for all the unresolved queue. */
368 static void ipmr_expire_process(unsigned long dummy
)
371 unsigned long expires
;
372 struct mfc_cache
*c
, **cp
;
374 if (!spin_trylock(&mfc_unres_lock
)) {
375 mod_timer(&ipmr_expire_timer
, jiffies
+HZ
/10);
379 if (atomic_read(&cache_resolve_queue_len
) == 0)
384 cp
= &mfc_unres_queue
;
386 while ((c
=*cp
) != NULL
) {
387 if (time_after(c
->mfc_un
.unres
.expires
, now
)) {
388 unsigned long interval
= c
->mfc_un
.unres
.expires
- now
;
389 if (interval
< expires
)
397 ipmr_destroy_unres(c
);
400 if (atomic_read(&cache_resolve_queue_len
))
401 mod_timer(&ipmr_expire_timer
, jiffies
+ expires
);
404 spin_unlock(&mfc_unres_lock
);
407 /* Fill oifs list. It is called under write locked mrt_lock. */
409 static void ipmr_update_thresholds(struct mfc_cache
*cache
, unsigned char *ttls
)
413 cache
->mfc_un
.res
.minvif
= MAXVIFS
;
414 cache
->mfc_un
.res
.maxvif
= 0;
415 memset(cache
->mfc_un
.res
.ttls
, 255, MAXVIFS
);
417 for (vifi
=0; vifi
<maxvif
; vifi
++) {
418 if (VIF_EXISTS(vifi
) && ttls
[vifi
] && ttls
[vifi
] < 255) {
419 cache
->mfc_un
.res
.ttls
[vifi
] = ttls
[vifi
];
420 if (cache
->mfc_un
.res
.minvif
> vifi
)
421 cache
->mfc_un
.res
.minvif
= vifi
;
422 if (cache
->mfc_un
.res
.maxvif
<= vifi
)
423 cache
->mfc_un
.res
.maxvif
= vifi
+ 1;
428 static int vif_add(struct vifctl
*vifc
, int mrtsock
)
430 int vifi
= vifc
->vifc_vifi
;
431 struct vif_device
*v
= &vif_table
[vifi
];
432 struct net_device
*dev
;
433 struct in_device
*in_dev
;
437 if (VIF_EXISTS(vifi
))
440 switch (vifc
->vifc_flags
) {
441 #ifdef CONFIG_IP_PIMSM
444 * Special Purpose VIF in PIM
445 * All the packets will be sent to the daemon
447 if (reg_vif_num
>= 0)
449 dev
= ipmr_reg_vif();
452 err
= dev_set_allmulti(dev
, 1);
454 unregister_netdevice(dev
);
461 dev
= ipmr_new_tunnel(vifc
);
464 err
= dev_set_allmulti(dev
, 1);
466 ipmr_del_tunnel(dev
, vifc
);
472 dev
= ip_dev_find(&init_net
, vifc
->vifc_lcl_addr
.s_addr
);
474 return -EADDRNOTAVAIL
;
475 err
= dev_set_allmulti(dev
, 1);
485 if ((in_dev
= __in_dev_get_rtnl(dev
)) == NULL
)
486 return -EADDRNOTAVAIL
;
487 IPV4_DEVCONF(in_dev
->cnf
, MC_FORWARDING
)++;
488 ip_rt_multicast_event(in_dev
);
491 * Fill in the VIF structures
493 v
->rate_limit
= vifc
->vifc_rate_limit
;
494 v
->local
= vifc
->vifc_lcl_addr
.s_addr
;
495 v
->remote
= vifc
->vifc_rmt_addr
.s_addr
;
496 v
->flags
= vifc
->vifc_flags
;
498 v
->flags
|= VIFF_STATIC
;
499 v
->threshold
= vifc
->vifc_threshold
;
504 v
->link
= dev
->ifindex
;
505 if (v
->flags
&(VIFF_TUNNEL
|VIFF_REGISTER
))
506 v
->link
= dev
->iflink
;
508 /* And finish update writing critical data */
509 write_lock_bh(&mrt_lock
);
511 #ifdef CONFIG_IP_PIMSM
512 if (v
->flags
&VIFF_REGISTER
)
517 write_unlock_bh(&mrt_lock
);
521 static struct mfc_cache
*ipmr_cache_find(__be32 origin
, __be32 mcastgrp
)
523 int line
= MFC_HASH(mcastgrp
, origin
);
526 for (c
=mfc_cache_array
[line
]; c
; c
= c
->next
) {
527 if (c
->mfc_origin
==origin
&& c
->mfc_mcastgrp
==mcastgrp
)
534 * Allocate a multicast cache entry
536 static struct mfc_cache
*ipmr_cache_alloc(void)
538 struct mfc_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_KERNEL
);
541 c
->mfc_un
.res
.minvif
= MAXVIFS
;
545 static struct mfc_cache
*ipmr_cache_alloc_unres(void)
547 struct mfc_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_ATOMIC
);
550 skb_queue_head_init(&c
->mfc_un
.unres
.unresolved
);
551 c
->mfc_un
.unres
.expires
= jiffies
+ 10*HZ
;
556 * A cache entry has gone into a resolved state from queued
559 static void ipmr_cache_resolve(struct mfc_cache
*uc
, struct mfc_cache
*c
)
565 * Play the pending entries through our router
568 while ((skb
= __skb_dequeue(&uc
->mfc_un
.unres
.unresolved
))) {
569 if (ip_hdr(skb
)->version
== 0) {
570 struct nlmsghdr
*nlh
= (struct nlmsghdr
*)skb_pull(skb
, sizeof(struct iphdr
));
572 if (ipmr_fill_mroute(skb
, c
, NLMSG_DATA(nlh
)) > 0) {
573 nlh
->nlmsg_len
= (skb_tail_pointer(skb
) -
576 nlh
->nlmsg_type
= NLMSG_ERROR
;
577 nlh
->nlmsg_len
= NLMSG_LENGTH(sizeof(struct nlmsgerr
));
578 skb_trim(skb
, nlh
->nlmsg_len
);
580 e
->error
= -EMSGSIZE
;
581 memset(&e
->msg
, 0, sizeof(e
->msg
));
584 rtnl_unicast(skb
, &init_net
, NETLINK_CB(skb
).pid
);
586 ip_mr_forward(skb
, c
, 0);
591 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
592 * expects the following bizarre scheme.
594 * Called under mrt_lock.
597 static int ipmr_cache_report(struct sk_buff
*pkt
, vifi_t vifi
, int assert)
600 const int ihl
= ip_hdrlen(pkt
);
601 struct igmphdr
*igmp
;
605 #ifdef CONFIG_IP_PIMSM
606 if (assert == IGMPMSG_WHOLEPKT
)
607 skb
= skb_realloc_headroom(pkt
, sizeof(struct iphdr
));
610 skb
= alloc_skb(128, GFP_ATOMIC
);
615 #ifdef CONFIG_IP_PIMSM
616 if (assert == IGMPMSG_WHOLEPKT
) {
617 /* Ugly, but we have no choice with this interface.
618 Duplicate old header, fix ihl, length etc.
619 And all this only to mangle msg->im_msgtype and
620 to set msg->im_mbz to "mbz" :-)
622 skb_push(skb
, sizeof(struct iphdr
));
623 skb_reset_network_header(skb
);
624 skb_reset_transport_header(skb
);
625 msg
= (struct igmpmsg
*)skb_network_header(skb
);
626 memcpy(msg
, skb_network_header(pkt
), sizeof(struct iphdr
));
627 msg
->im_msgtype
= IGMPMSG_WHOLEPKT
;
629 msg
->im_vif
= reg_vif_num
;
630 ip_hdr(skb
)->ihl
= sizeof(struct iphdr
) >> 2;
631 ip_hdr(skb
)->tot_len
= htons(ntohs(ip_hdr(pkt
)->tot_len
) +
632 sizeof(struct iphdr
));
641 skb
->network_header
= skb
->tail
;
643 skb_copy_to_linear_data(skb
, pkt
->data
, ihl
);
644 ip_hdr(skb
)->protocol
= 0; /* Flag to the kernel this is a route add */
645 msg
= (struct igmpmsg
*)skb_network_header(skb
);
647 skb
->dst
= dst_clone(pkt
->dst
);
653 igmp
=(struct igmphdr
*)skb_put(skb
, sizeof(struct igmphdr
));
655 msg
->im_msgtype
= assert;
657 ip_hdr(skb
)->tot_len
= htons(skb
->len
); /* Fix the length */
658 skb
->transport_header
= skb
->network_header
;
661 if (mroute_socket
== NULL
) {
669 if ((ret
= sock_queue_rcv_skb(mroute_socket
, skb
))<0) {
671 printk(KERN_WARNING
"mroute: pending queue full, dropping entries.\n");
679 * Queue a packet for resolution. It gets locked cache entry!
683 ipmr_cache_unresolved(vifi_t vifi
, struct sk_buff
*skb
)
687 const struct iphdr
*iph
= ip_hdr(skb
);
689 spin_lock_bh(&mfc_unres_lock
);
690 for (c
=mfc_unres_queue
; c
; c
=c
->next
) {
691 if (c
->mfc_mcastgrp
== iph
->daddr
&&
692 c
->mfc_origin
== iph
->saddr
)
698 * Create a new entry if allowable
701 if (atomic_read(&cache_resolve_queue_len
) >= 10 ||
702 (c
=ipmr_cache_alloc_unres())==NULL
) {
703 spin_unlock_bh(&mfc_unres_lock
);
710 * Fill in the new cache entry
713 c
->mfc_origin
= iph
->saddr
;
714 c
->mfc_mcastgrp
= iph
->daddr
;
717 * Reflect first query at mrouted.
719 if ((err
= ipmr_cache_report(skb
, vifi
, IGMPMSG_NOCACHE
))<0) {
720 /* If the report failed throw the cache entry
723 spin_unlock_bh(&mfc_unres_lock
);
725 kmem_cache_free(mrt_cachep
, c
);
730 atomic_inc(&cache_resolve_queue_len
);
731 c
->next
= mfc_unres_queue
;
734 mod_timer(&ipmr_expire_timer
, c
->mfc_un
.unres
.expires
);
738 * See if we can append the packet
740 if (c
->mfc_un
.unres
.unresolved
.qlen
>3) {
744 skb_queue_tail(&c
->mfc_un
.unres
.unresolved
, skb
);
748 spin_unlock_bh(&mfc_unres_lock
);
753 * MFC cache manipulation by user space mroute daemon
756 static int ipmr_mfc_delete(struct mfcctl
*mfc
)
759 struct mfc_cache
*c
, **cp
;
761 line
= MFC_HASH(mfc
->mfcc_mcastgrp
.s_addr
, mfc
->mfcc_origin
.s_addr
);
763 for (cp
=&mfc_cache_array
[line
]; (c
=*cp
) != NULL
; cp
= &c
->next
) {
764 if (c
->mfc_origin
== mfc
->mfcc_origin
.s_addr
&&
765 c
->mfc_mcastgrp
== mfc
->mfcc_mcastgrp
.s_addr
) {
766 write_lock_bh(&mrt_lock
);
768 write_unlock_bh(&mrt_lock
);
770 kmem_cache_free(mrt_cachep
, c
);
777 static int ipmr_mfc_add(struct mfcctl
*mfc
, int mrtsock
)
780 struct mfc_cache
*uc
, *c
, **cp
;
782 line
= MFC_HASH(mfc
->mfcc_mcastgrp
.s_addr
, mfc
->mfcc_origin
.s_addr
);
784 for (cp
=&mfc_cache_array
[line
]; (c
=*cp
) != NULL
; cp
= &c
->next
) {
785 if (c
->mfc_origin
== mfc
->mfcc_origin
.s_addr
&&
786 c
->mfc_mcastgrp
== mfc
->mfcc_mcastgrp
.s_addr
)
791 write_lock_bh(&mrt_lock
);
792 c
->mfc_parent
= mfc
->mfcc_parent
;
793 ipmr_update_thresholds(c
, mfc
->mfcc_ttls
);
795 c
->mfc_flags
|= MFC_STATIC
;
796 write_unlock_bh(&mrt_lock
);
800 if (!ipv4_is_multicast(mfc
->mfcc_mcastgrp
.s_addr
))
803 c
= ipmr_cache_alloc();
807 c
->mfc_origin
= mfc
->mfcc_origin
.s_addr
;
808 c
->mfc_mcastgrp
= mfc
->mfcc_mcastgrp
.s_addr
;
809 c
->mfc_parent
= mfc
->mfcc_parent
;
810 ipmr_update_thresholds(c
, mfc
->mfcc_ttls
);
812 c
->mfc_flags
|= MFC_STATIC
;
814 write_lock_bh(&mrt_lock
);
815 c
->next
= mfc_cache_array
[line
];
816 mfc_cache_array
[line
] = c
;
817 write_unlock_bh(&mrt_lock
);
820 * Check to see if we resolved a queued list. If so we
821 * need to send on the frames and tidy up.
823 spin_lock_bh(&mfc_unres_lock
);
824 for (cp
= &mfc_unres_queue
; (uc
=*cp
) != NULL
;
826 if (uc
->mfc_origin
== c
->mfc_origin
&&
827 uc
->mfc_mcastgrp
== c
->mfc_mcastgrp
) {
829 if (atomic_dec_and_test(&cache_resolve_queue_len
))
830 del_timer(&ipmr_expire_timer
);
834 spin_unlock_bh(&mfc_unres_lock
);
837 ipmr_cache_resolve(uc
, c
);
838 kmem_cache_free(mrt_cachep
, uc
);
844 * Close the multicast socket, and clear the vif tables etc
847 static void mroute_clean_tables(struct sock
*sk
)
852 * Shut down all active vif entries
854 for (i
=0; i
<maxvif
; i
++) {
855 if (!(vif_table
[i
].flags
&VIFF_STATIC
))
862 for (i
=0; i
<MFC_LINES
; i
++) {
863 struct mfc_cache
*c
, **cp
;
865 cp
= &mfc_cache_array
[i
];
866 while ((c
= *cp
) != NULL
) {
867 if (c
->mfc_flags
&MFC_STATIC
) {
871 write_lock_bh(&mrt_lock
);
873 write_unlock_bh(&mrt_lock
);
875 kmem_cache_free(mrt_cachep
, c
);
879 if (atomic_read(&cache_resolve_queue_len
) != 0) {
882 spin_lock_bh(&mfc_unres_lock
);
883 while (mfc_unres_queue
!= NULL
) {
885 mfc_unres_queue
= c
->next
;
886 spin_unlock_bh(&mfc_unres_lock
);
888 ipmr_destroy_unres(c
);
890 spin_lock_bh(&mfc_unres_lock
);
892 spin_unlock_bh(&mfc_unres_lock
);
896 static void mrtsock_destruct(struct sock
*sk
)
899 if (sk
== mroute_socket
) {
900 IPV4_DEVCONF_ALL(sock_net(sk
), MC_FORWARDING
)--;
902 write_lock_bh(&mrt_lock
);
903 mroute_socket
= NULL
;
904 write_unlock_bh(&mrt_lock
);
906 mroute_clean_tables(sk
);
912 * Socket options and virtual interface manipulation. The whole
913 * virtual interface system is a complete heap, but unfortunately
914 * that's how BSD mrouted happens to think. Maybe one day with a proper
915 * MOSPF/PIM router set up we can clean this up.
918 int ip_mroute_setsockopt(struct sock
*sk
, int optname
, char __user
*optval
, int optlen
)
924 if (optname
!= MRT_INIT
) {
925 if (sk
!= mroute_socket
&& !capable(CAP_NET_ADMIN
))
931 if (sk
->sk_type
!= SOCK_RAW
||
932 inet_sk(sk
)->num
!= IPPROTO_IGMP
)
934 if (optlen
!= sizeof(int))
943 ret
= ip_ra_control(sk
, 1, mrtsock_destruct
);
945 write_lock_bh(&mrt_lock
);
947 write_unlock_bh(&mrt_lock
);
949 IPV4_DEVCONF_ALL(sock_net(sk
), MC_FORWARDING
)++;
954 if (sk
!= mroute_socket
)
956 return ip_ra_control(sk
, 0, NULL
);
959 if (optlen
!= sizeof(vif
))
961 if (copy_from_user(&vif
, optval
, sizeof(vif
)))
963 if (vif
.vifc_vifi
>= MAXVIFS
)
966 if (optname
== MRT_ADD_VIF
) {
967 ret
= vif_add(&vif
, sk
==mroute_socket
);
969 ret
= vif_delete(vif
.vifc_vifi
, 0);
975 * Manipulate the forwarding caches. These live
976 * in a sort of kernel/user symbiosis.
980 if (optlen
!= sizeof(mfc
))
982 if (copy_from_user(&mfc
, optval
, sizeof(mfc
)))
985 if (optname
== MRT_DEL_MFC
)
986 ret
= ipmr_mfc_delete(&mfc
);
988 ret
= ipmr_mfc_add(&mfc
, sk
==mroute_socket
);
992 * Control PIM assert.
997 if (get_user(v
,(int __user
*)optval
))
999 mroute_do_assert
=(v
)?1:0;
1002 #ifdef CONFIG_IP_PIMSM
1007 if (get_user(v
,(int __user
*)optval
))
1013 if (v
!= mroute_do_pim
) {
1015 mroute_do_assert
= v
;
1016 #ifdef CONFIG_IP_PIMSM_V2
1018 ret
= inet_add_protocol(&pim_protocol
,
1021 ret
= inet_del_protocol(&pim_protocol
,
1032 * Spurious command, or MRT_VERSION which you cannot
1036 return -ENOPROTOOPT
;
1041 * Getsock opt support for the multicast routing system.
1044 int ip_mroute_getsockopt(struct sock
*sk
, int optname
, char __user
*optval
, int __user
*optlen
)
1049 if (optname
!= MRT_VERSION
&&
1050 #ifdef CONFIG_IP_PIMSM
1053 optname
!=MRT_ASSERT
)
1054 return -ENOPROTOOPT
;
1056 if (get_user(olr
, optlen
))
1059 olr
= min_t(unsigned int, olr
, sizeof(int));
1063 if (put_user(olr
, optlen
))
1065 if (optname
== MRT_VERSION
)
1067 #ifdef CONFIG_IP_PIMSM
1068 else if (optname
== MRT_PIM
)
1069 val
= mroute_do_pim
;
1072 val
= mroute_do_assert
;
1073 if (copy_to_user(optval
, &val
, olr
))
1079 * The IP multicast ioctl support routines.
1082 int ipmr_ioctl(struct sock
*sk
, int cmd
, void __user
*arg
)
1084 struct sioc_sg_req sr
;
1085 struct sioc_vif_req vr
;
1086 struct vif_device
*vif
;
1087 struct mfc_cache
*c
;
1091 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1093 if (vr
.vifi
>= maxvif
)
1095 read_lock(&mrt_lock
);
1096 vif
=&vif_table
[vr
.vifi
];
1097 if (VIF_EXISTS(vr
.vifi
)) {
1098 vr
.icount
= vif
->pkt_in
;
1099 vr
.ocount
= vif
->pkt_out
;
1100 vr
.ibytes
= vif
->bytes_in
;
1101 vr
.obytes
= vif
->bytes_out
;
1102 read_unlock(&mrt_lock
);
1104 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1108 read_unlock(&mrt_lock
);
1109 return -EADDRNOTAVAIL
;
1111 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1114 read_lock(&mrt_lock
);
1115 c
= ipmr_cache_find(sr
.src
.s_addr
, sr
.grp
.s_addr
);
1117 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1118 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1119 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1120 read_unlock(&mrt_lock
);
1122 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1126 read_unlock(&mrt_lock
);
1127 return -EADDRNOTAVAIL
;
1129 return -ENOIOCTLCMD
;
1134 static int ipmr_device_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
1136 struct net_device
*dev
= ptr
;
1137 struct vif_device
*v
;
1140 if (!net_eq(dev_net(dev
), &init_net
))
1143 if (event
!= NETDEV_UNREGISTER
)
1146 for (ct
=0; ct
<maxvif
; ct
++,v
++) {
1154 static struct notifier_block ip_mr_notifier
= {
1155 .notifier_call
= ipmr_device_event
,
1159 * Encapsulate a packet by attaching a valid IPIP header to it.
1160 * This avoids tunnel drivers and other mess and gives us the speed so
1161 * important for multicast video.
1164 static void ip_encap(struct sk_buff
*skb
, __be32 saddr
, __be32 daddr
)
1167 struct iphdr
*old_iph
= ip_hdr(skb
);
1169 skb_push(skb
, sizeof(struct iphdr
));
1170 skb
->transport_header
= skb
->network_header
;
1171 skb_reset_network_header(skb
);
1175 iph
->tos
= old_iph
->tos
;
1176 iph
->ttl
= old_iph
->ttl
;
1180 iph
->protocol
= IPPROTO_IPIP
;
1182 iph
->tot_len
= htons(skb
->len
);
1183 ip_select_ident(iph
, skb
->dst
, NULL
);
1186 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
1190 static inline int ipmr_forward_finish(struct sk_buff
*skb
)
1192 struct ip_options
* opt
= &(IPCB(skb
)->opt
);
1194 IP_INC_STATS_BH(dev_net(skb
->dst
->dev
), IPSTATS_MIB_OUTFORWDATAGRAMS
);
1196 if (unlikely(opt
->optlen
))
1197 ip_forward_options(skb
);
1199 return dst_output(skb
);
1203 * Processing handlers for ipmr_forward
1206 static void ipmr_queue_xmit(struct sk_buff
*skb
, struct mfc_cache
*c
, int vifi
)
1208 const struct iphdr
*iph
= ip_hdr(skb
);
1209 struct vif_device
*vif
= &vif_table
[vifi
];
1210 struct net_device
*dev
;
1214 if (vif
->dev
== NULL
)
1217 #ifdef CONFIG_IP_PIMSM
1218 if (vif
->flags
& VIFF_REGISTER
) {
1220 vif
->bytes_out
+= skb
->len
;
1221 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
1222 vif
->dev
->stats
.tx_packets
++;
1223 ipmr_cache_report(skb
, vifi
, IGMPMSG_WHOLEPKT
);
1229 if (vif
->flags
&VIFF_TUNNEL
) {
1230 struct flowi fl
= { .oif
= vif
->link
,
1232 { .daddr
= vif
->remote
,
1233 .saddr
= vif
->local
,
1234 .tos
= RT_TOS(iph
->tos
) } },
1235 .proto
= IPPROTO_IPIP
};
1236 if (ip_route_output_key(&init_net
, &rt
, &fl
))
1238 encap
= sizeof(struct iphdr
);
1240 struct flowi fl
= { .oif
= vif
->link
,
1242 { .daddr
= iph
->daddr
,
1243 .tos
= RT_TOS(iph
->tos
) } },
1244 .proto
= IPPROTO_IPIP
};
1245 if (ip_route_output_key(&init_net
, &rt
, &fl
))
1249 dev
= rt
->u
.dst
.dev
;
1251 if (skb
->len
+encap
> dst_mtu(&rt
->u
.dst
) && (ntohs(iph
->frag_off
) & IP_DF
)) {
1252 /* Do not fragment multicasts. Alas, IPv4 does not
1253 allow to send ICMP, so that packets will disappear
1257 IP_INC_STATS_BH(dev_net(dev
), IPSTATS_MIB_FRAGFAILS
);
1262 encap
+= LL_RESERVED_SPACE(dev
) + rt
->u
.dst
.header_len
;
1264 if (skb_cow(skb
, encap
)) {
1270 vif
->bytes_out
+= skb
->len
;
1272 dst_release(skb
->dst
);
1273 skb
->dst
= &rt
->u
.dst
;
1274 ip_decrease_ttl(ip_hdr(skb
));
1276 /* FIXME: forward and output firewalls used to be called here.
1277 * What do we do with netfilter? -- RR */
1278 if (vif
->flags
& VIFF_TUNNEL
) {
1279 ip_encap(skb
, vif
->local
, vif
->remote
);
1280 /* FIXME: extra output firewall step used to be here. --RR */
1281 vif
->dev
->stats
.tx_packets
++;
1282 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
1285 IPCB(skb
)->flags
|= IPSKB_FORWARDED
;
1288 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1289 * not only before forwarding, but after forwarding on all output
1290 * interfaces. It is clear, if mrouter runs a multicasting
1291 * program, it should receive packets not depending to what interface
1292 * program is joined.
1293 * If we will not make it, the program will have to join on all
1294 * interfaces. On the other hand, multihoming host (or router, but
1295 * not mrouter) cannot join to more than one interface - it will
1296 * result in receiving multiple packets.
1298 NF_HOOK(PF_INET
, NF_INET_FORWARD
, skb
, skb
->dev
, dev
,
1299 ipmr_forward_finish
);
1307 static int ipmr_find_vif(struct net_device
*dev
)
1310 for (ct
=maxvif
-1; ct
>=0; ct
--) {
1311 if (vif_table
[ct
].dev
== dev
)
1317 /* "local" means that we should preserve one skb (for local delivery) */
1319 static int ip_mr_forward(struct sk_buff
*skb
, struct mfc_cache
*cache
, int local
)
1324 vif
= cache
->mfc_parent
;
1325 cache
->mfc_un
.res
.pkt
++;
1326 cache
->mfc_un
.res
.bytes
+= skb
->len
;
1329 * Wrong interface: drop packet and (maybe) send PIM assert.
1331 if (vif_table
[vif
].dev
!= skb
->dev
) {
1334 if (skb
->rtable
->fl
.iif
== 0) {
1335 /* It is our own packet, looped back.
1336 Very complicated situation...
1338 The best workaround until routing daemons will be
1339 fixed is not to redistribute packet, if it was
1340 send through wrong interface. It means, that
1341 multicast applications WILL NOT work for
1342 (S,G), which have default multicast route pointing
1343 to wrong oif. In any case, it is not a good
1344 idea to use multicasting applications on router.
1349 cache
->mfc_un
.res
.wrong_if
++;
1350 true_vifi
= ipmr_find_vif(skb
->dev
);
1352 if (true_vifi
>= 0 && mroute_do_assert
&&
1353 /* pimsm uses asserts, when switching from RPT to SPT,
1354 so that we cannot check that packet arrived on an oif.
1355 It is bad, but otherwise we would need to move pretty
1356 large chunk of pimd to kernel. Ough... --ANK
1358 (mroute_do_pim
|| cache
->mfc_un
.res
.ttls
[true_vifi
] < 255) &&
1360 cache
->mfc_un
.res
.last_assert
+ MFC_ASSERT_THRESH
)) {
1361 cache
->mfc_un
.res
.last_assert
= jiffies
;
1362 ipmr_cache_report(skb
, true_vifi
, IGMPMSG_WRONGVIF
);
1367 vif_table
[vif
].pkt_in
++;
1368 vif_table
[vif
].bytes_in
+= skb
->len
;
1373 for (ct
= cache
->mfc_un
.res
.maxvif
-1; ct
>= cache
->mfc_un
.res
.minvif
; ct
--) {
1374 if (ip_hdr(skb
)->ttl
> cache
->mfc_un
.res
.ttls
[ct
]) {
1376 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
1378 ipmr_queue_xmit(skb2
, cache
, psend
);
1385 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
1387 ipmr_queue_xmit(skb2
, cache
, psend
);
1389 ipmr_queue_xmit(skb
, cache
, psend
);
1402 * Multicast packets for forwarding arrive here
1405 int ip_mr_input(struct sk_buff
*skb
)
1407 struct mfc_cache
*cache
;
1408 int local
= skb
->rtable
->rt_flags
&RTCF_LOCAL
;
1410 /* Packet is looped back after forward, it should not be
1411 forwarded second time, but still can be delivered locally.
1413 if (IPCB(skb
)->flags
&IPSKB_FORWARDED
)
1417 if (IPCB(skb
)->opt
.router_alert
) {
1418 if (ip_call_ra_chain(skb
))
1420 } else if (ip_hdr(skb
)->protocol
== IPPROTO_IGMP
){
1421 /* IGMPv1 (and broken IGMPv2 implementations sort of
1422 Cisco IOS <= 11.2(8)) do not put router alert
1423 option to IGMP packets destined to routable
1424 groups. It is very bad, because it means
1425 that we can forward NO IGMP messages.
1427 read_lock(&mrt_lock
);
1428 if (mroute_socket
) {
1430 raw_rcv(mroute_socket
, skb
);
1431 read_unlock(&mrt_lock
);
1434 read_unlock(&mrt_lock
);
1438 read_lock(&mrt_lock
);
1439 cache
= ipmr_cache_find(ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
);
1442 * No usable cache entry
1444 if (cache
== NULL
) {
1448 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
1449 ip_local_deliver(skb
);
1451 read_unlock(&mrt_lock
);
1457 vif
= ipmr_find_vif(skb
->dev
);
1459 int err
= ipmr_cache_unresolved(vif
, skb
);
1460 read_unlock(&mrt_lock
);
1464 read_unlock(&mrt_lock
);
1469 ip_mr_forward(skb
, cache
, local
);
1471 read_unlock(&mrt_lock
);
1474 return ip_local_deliver(skb
);
1480 return ip_local_deliver(skb
);
1485 #ifdef CONFIG_IP_PIMSM
1486 static int __pim_rcv(struct sk_buff
*skb
, unsigned int pimlen
)
1488 struct net_device
*reg_dev
= NULL
;
1489 struct iphdr
*encap
;
1491 encap
= (struct iphdr
*)(skb_transport_header(skb
) + pimlen
);
1494 a. packet is really destinted to a multicast group
1495 b. packet is not a NULL-REGISTER
1496 c. packet is not truncated
1498 if (!ipv4_is_multicast(encap
->daddr
) ||
1499 encap
->tot_len
== 0 ||
1500 ntohs(encap
->tot_len
) + pimlen
> skb
->len
)
1503 read_lock(&mrt_lock
);
1504 if (reg_vif_num
>= 0)
1505 reg_dev
= vif_table
[reg_vif_num
].dev
;
1508 read_unlock(&mrt_lock
);
1510 if (reg_dev
== NULL
)
1513 skb
->mac_header
= skb
->network_header
;
1514 skb_pull(skb
, (u8
*)encap
- skb
->data
);
1515 skb_reset_network_header(skb
);
1517 skb
->protocol
= htons(ETH_P_IP
);
1519 skb
->pkt_type
= PACKET_HOST
;
1520 dst_release(skb
->dst
);
1522 reg_dev
->stats
.rx_bytes
+= skb
->len
;
1523 reg_dev
->stats
.rx_packets
++;
1532 #ifdef CONFIG_IP_PIMSM_V1
1534 * Handle IGMP messages of PIMv1
1537 int pim_rcv_v1(struct sk_buff
* skb
)
1539 struct igmphdr
*pim
;
1541 if (!pskb_may_pull(skb
, sizeof(*pim
) + sizeof(struct iphdr
)))
1544 pim
= igmp_hdr(skb
);
1546 if (!mroute_do_pim
||
1547 pim
->group
!= PIM_V1_VERSION
|| pim
->code
!= PIM_V1_REGISTER
)
1550 if (__pim_rcv(skb
, sizeof(*pim
))) {
1558 #ifdef CONFIG_IP_PIMSM_V2
1559 static int pim_rcv(struct sk_buff
* skb
)
1561 struct pimreghdr
*pim
;
1563 if (!pskb_may_pull(skb
, sizeof(*pim
) + sizeof(struct iphdr
)))
1566 pim
= (struct pimreghdr
*)skb_transport_header(skb
);
1567 if (pim
->type
!= ((PIM_VERSION
<<4)|(PIM_REGISTER
)) ||
1568 (pim
->flags
&PIM_NULL_REGISTER
) ||
1569 (ip_compute_csum((void *)pim
, sizeof(*pim
)) != 0 &&
1570 csum_fold(skb_checksum(skb
, 0, skb
->len
, 0))))
1573 if (__pim_rcv(skb
, sizeof(*pim
))) {
1582 ipmr_fill_mroute(struct sk_buff
*skb
, struct mfc_cache
*c
, struct rtmsg
*rtm
)
1585 struct rtnexthop
*nhp
;
1586 struct net_device
*dev
= vif_table
[c
->mfc_parent
].dev
;
1587 u8
*b
= skb_tail_pointer(skb
);
1588 struct rtattr
*mp_head
;
1591 RTA_PUT(skb
, RTA_IIF
, 4, &dev
->ifindex
);
1593 mp_head
= (struct rtattr
*)skb_put(skb
, RTA_LENGTH(0));
1595 for (ct
= c
->mfc_un
.res
.minvif
; ct
< c
->mfc_un
.res
.maxvif
; ct
++) {
1596 if (c
->mfc_un
.res
.ttls
[ct
] < 255) {
1597 if (skb_tailroom(skb
) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp
)) + 4))
1598 goto rtattr_failure
;
1599 nhp
= (struct rtnexthop
*)skb_put(skb
, RTA_ALIGN(sizeof(*nhp
)));
1600 nhp
->rtnh_flags
= 0;
1601 nhp
->rtnh_hops
= c
->mfc_un
.res
.ttls
[ct
];
1602 nhp
->rtnh_ifindex
= vif_table
[ct
].dev
->ifindex
;
1603 nhp
->rtnh_len
= sizeof(*nhp
);
1606 mp_head
->rta_type
= RTA_MULTIPATH
;
1607 mp_head
->rta_len
= skb_tail_pointer(skb
) - (u8
*)mp_head
;
1608 rtm
->rtm_type
= RTN_MULTICAST
;
1616 int ipmr_get_route(struct sk_buff
*skb
, struct rtmsg
*rtm
, int nowait
)
1619 struct mfc_cache
*cache
;
1620 struct rtable
*rt
= skb
->rtable
;
1622 read_lock(&mrt_lock
);
1623 cache
= ipmr_cache_find(rt
->rt_src
, rt
->rt_dst
);
1625 if (cache
== NULL
) {
1626 struct sk_buff
*skb2
;
1628 struct net_device
*dev
;
1632 read_unlock(&mrt_lock
);
1637 if (dev
== NULL
|| (vif
= ipmr_find_vif(dev
)) < 0) {
1638 read_unlock(&mrt_lock
);
1641 skb2
= skb_clone(skb
, GFP_ATOMIC
);
1643 read_unlock(&mrt_lock
);
1647 skb_push(skb2
, sizeof(struct iphdr
));
1648 skb_reset_network_header(skb2
);
1650 iph
->ihl
= sizeof(struct iphdr
) >> 2;
1651 iph
->saddr
= rt
->rt_src
;
1652 iph
->daddr
= rt
->rt_dst
;
1654 err
= ipmr_cache_unresolved(vif
, skb2
);
1655 read_unlock(&mrt_lock
);
1659 if (!nowait
&& (rtm
->rtm_flags
&RTM_F_NOTIFY
))
1660 cache
->mfc_flags
|= MFC_NOTIFY
;
1661 err
= ipmr_fill_mroute(skb
, cache
, rtm
);
1662 read_unlock(&mrt_lock
);
1666 #ifdef CONFIG_PROC_FS
1668 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
1670 struct ipmr_vif_iter
{
1674 static struct vif_device
*ipmr_vif_seq_idx(struct ipmr_vif_iter
*iter
,
1677 for (iter
->ct
= 0; iter
->ct
< maxvif
; ++iter
->ct
) {
1678 if (!VIF_EXISTS(iter
->ct
))
1681 return &vif_table
[iter
->ct
];
1686 static void *ipmr_vif_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1687 __acquires(mrt_lock
)
1689 read_lock(&mrt_lock
);
1690 return *pos
? ipmr_vif_seq_idx(seq
->private, *pos
- 1)
1694 static void *ipmr_vif_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1696 struct ipmr_vif_iter
*iter
= seq
->private;
1699 if (v
== SEQ_START_TOKEN
)
1700 return ipmr_vif_seq_idx(iter
, 0);
1702 while (++iter
->ct
< maxvif
) {
1703 if (!VIF_EXISTS(iter
->ct
))
1705 return &vif_table
[iter
->ct
];
1710 static void ipmr_vif_seq_stop(struct seq_file
*seq
, void *v
)
1711 __releases(mrt_lock
)
1713 read_unlock(&mrt_lock
);
1716 static int ipmr_vif_seq_show(struct seq_file
*seq
, void *v
)
1718 if (v
== SEQ_START_TOKEN
) {
1720 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
1722 const struct vif_device
*vif
= v
;
1723 const char *name
= vif
->dev
? vif
->dev
->name
: "none";
1726 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
1728 name
, vif
->bytes_in
, vif
->pkt_in
,
1729 vif
->bytes_out
, vif
->pkt_out
,
1730 vif
->flags
, vif
->local
, vif
->remote
);
1735 static const struct seq_operations ipmr_vif_seq_ops
= {
1736 .start
= ipmr_vif_seq_start
,
1737 .next
= ipmr_vif_seq_next
,
1738 .stop
= ipmr_vif_seq_stop
,
1739 .show
= ipmr_vif_seq_show
,
1742 static int ipmr_vif_open(struct inode
*inode
, struct file
*file
)
1744 return seq_open_private(file
, &ipmr_vif_seq_ops
,
1745 sizeof(struct ipmr_vif_iter
));
1748 static const struct file_operations ipmr_vif_fops
= {
1749 .owner
= THIS_MODULE
,
1750 .open
= ipmr_vif_open
,
1752 .llseek
= seq_lseek
,
1753 .release
= seq_release_private
,
1756 struct ipmr_mfc_iter
{
1757 struct mfc_cache
**cache
;
1762 static struct mfc_cache
*ipmr_mfc_seq_idx(struct ipmr_mfc_iter
*it
, loff_t pos
)
1764 struct mfc_cache
*mfc
;
1766 it
->cache
= mfc_cache_array
;
1767 read_lock(&mrt_lock
);
1768 for (it
->ct
= 0; it
->ct
< MFC_LINES
; it
->ct
++)
1769 for (mfc
= mfc_cache_array
[it
->ct
]; mfc
; mfc
= mfc
->next
)
1772 read_unlock(&mrt_lock
);
1774 it
->cache
= &mfc_unres_queue
;
1775 spin_lock_bh(&mfc_unres_lock
);
1776 for (mfc
= mfc_unres_queue
; mfc
; mfc
= mfc
->next
)
1779 spin_unlock_bh(&mfc_unres_lock
);
1786 static void *ipmr_mfc_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1788 struct ipmr_mfc_iter
*it
= seq
->private;
1791 return *pos
? ipmr_mfc_seq_idx(seq
->private, *pos
- 1)
1795 static void *ipmr_mfc_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1797 struct mfc_cache
*mfc
= v
;
1798 struct ipmr_mfc_iter
*it
= seq
->private;
1802 if (v
== SEQ_START_TOKEN
)
1803 return ipmr_mfc_seq_idx(seq
->private, 0);
1808 if (it
->cache
== &mfc_unres_queue
)
1811 BUG_ON(it
->cache
!= mfc_cache_array
);
1813 while (++it
->ct
< MFC_LINES
) {
1814 mfc
= mfc_cache_array
[it
->ct
];
1819 /* exhausted cache_array, show unresolved */
1820 read_unlock(&mrt_lock
);
1821 it
->cache
= &mfc_unres_queue
;
1824 spin_lock_bh(&mfc_unres_lock
);
1825 mfc
= mfc_unres_queue
;
1830 spin_unlock_bh(&mfc_unres_lock
);
1836 static void ipmr_mfc_seq_stop(struct seq_file
*seq
, void *v
)
1838 struct ipmr_mfc_iter
*it
= seq
->private;
1840 if (it
->cache
== &mfc_unres_queue
)
1841 spin_unlock_bh(&mfc_unres_lock
);
1842 else if (it
->cache
== mfc_cache_array
)
1843 read_unlock(&mrt_lock
);
1846 static int ipmr_mfc_seq_show(struct seq_file
*seq
, void *v
)
1850 if (v
== SEQ_START_TOKEN
) {
1852 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
1854 const struct mfc_cache
*mfc
= v
;
1855 const struct ipmr_mfc_iter
*it
= seq
->private;
1857 seq_printf(seq
, "%08lX %08lX %-3hd",
1858 (unsigned long) mfc
->mfc_mcastgrp
,
1859 (unsigned long) mfc
->mfc_origin
,
1862 if (it
->cache
!= &mfc_unres_queue
) {
1863 seq_printf(seq
, " %8lu %8lu %8lu",
1864 mfc
->mfc_un
.res
.pkt
,
1865 mfc
->mfc_un
.res
.bytes
,
1866 mfc
->mfc_un
.res
.wrong_if
);
1867 for (n
= mfc
->mfc_un
.res
.minvif
;
1868 n
< mfc
->mfc_un
.res
.maxvif
; n
++ ) {
1870 && mfc
->mfc_un
.res
.ttls
[n
] < 255)
1873 n
, mfc
->mfc_un
.res
.ttls
[n
]);
1876 /* unresolved mfc_caches don't contain
1877 * pkt, bytes and wrong_if values
1879 seq_printf(seq
, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
1881 seq_putc(seq
, '\n');
1886 static const struct seq_operations ipmr_mfc_seq_ops
= {
1887 .start
= ipmr_mfc_seq_start
,
1888 .next
= ipmr_mfc_seq_next
,
1889 .stop
= ipmr_mfc_seq_stop
,
1890 .show
= ipmr_mfc_seq_show
,
1893 static int ipmr_mfc_open(struct inode
*inode
, struct file
*file
)
1895 return seq_open_private(file
, &ipmr_mfc_seq_ops
,
1896 sizeof(struct ipmr_mfc_iter
));
1899 static const struct file_operations ipmr_mfc_fops
= {
1900 .owner
= THIS_MODULE
,
1901 .open
= ipmr_mfc_open
,
1903 .llseek
= seq_lseek
,
1904 .release
= seq_release_private
,
1908 #ifdef CONFIG_IP_PIMSM_V2
1909 static struct net_protocol pim_protocol
= {
1916 * Setup for IP multicast routing
1919 int __init
ip_mr_init(void)
1923 mrt_cachep
= kmem_cache_create("ip_mrt_cache",
1924 sizeof(struct mfc_cache
),
1925 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
1930 setup_timer(&ipmr_expire_timer
, ipmr_expire_process
, 0);
1931 err
= register_netdevice_notifier(&ip_mr_notifier
);
1933 goto reg_notif_fail
;
1934 #ifdef CONFIG_PROC_FS
1936 if (!proc_net_fops_create(&init_net
, "ip_mr_vif", 0, &ipmr_vif_fops
))
1938 if (!proc_net_fops_create(&init_net
, "ip_mr_cache", 0, &ipmr_mfc_fops
))
1939 goto proc_cache_fail
;
1942 #ifdef CONFIG_PROC_FS
1944 proc_net_remove(&init_net
, "ip_mr_vif");
1946 unregister_netdevice_notifier(&ip_mr_notifier
);
1949 del_timer(&ipmr_expire_timer
);
1950 kmem_cache_destroy(mrt_cachep
);