2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@redhat.com>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Version: $Id: ipmr.c,v 1.55 2000/11/28 13:13:27 davem Exp $
15 * Michael Chastain : Incorrect size of copying.
16 * Alan Cox : Added the cache manager code
17 * Alan Cox : Fixed the clone/copy bug and device race.
18 * Mike McLagan : Routing by source
19 * Malcolm Beattie : Buffer handling fixes.
20 * Alexey Kuznetsov : Double buffer free and other fixes.
21 * SVR Anand : Fixed several multicast bugs and problems.
22 * Alexey Kuznetsov : Status, optimisations and more.
23 * Brad Parker : Better behaviour on mrouted upcall
25 * Carlos Picoto : PIMv1 Support
26 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
27 * Relax this requrement to work with older peers.
31 #include <linux/config.h>
32 #include <asm/system.h>
33 #include <asm/uaccess.h>
34 #include <linux/types.h>
35 #include <linux/sched.h>
36 #include <linux/errno.h>
37 #include <linux/timer.h>
39 #include <linux/kernel.h>
40 #include <linux/fcntl.h>
41 #include <linux/stat.h>
42 #include <linux/socket.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <linux/inetdevice.h>
47 #include <linux/igmp.h>
48 #include <linux/proc_fs.h>
49 #include <linux/mroute.h>
50 #include <linux/init.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
58 #include <linux/notifier.h>
59 #include <linux/if_arp.h>
60 #include <linux/netfilter_ipv4.h>
62 #include <net/checksum.h>
64 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
65 #define CONFIG_IP_PIMSM 1
68 static struct sock
*mroute_socket
;
71 /* Big lock, protecting vif table, mrt cache and mroute socket state.
72 Note that the changes are semaphored via rtnl_lock.
75 static rwlock_t mrt_lock
= RW_LOCK_UNLOCKED
;
78 * Multicast router control variables
81 static struct vif_device vif_table
[MAXVIFS
]; /* Devices */
84 #define VIF_EXISTS(idx) (vif_table[idx].dev != NULL)
86 int mroute_do_assert
= 0; /* Set in PIM assert */
87 int mroute_do_pim
= 0;
89 static struct mfc_cache
*mfc_cache_array
[MFC_LINES
]; /* Forwarding cache */
91 static struct mfc_cache
*mfc_unres_queue
; /* Queue of unresolved entries */
92 atomic_t cache_resolve_queue_len
; /* Size of unresolved */
94 /* Special spinlock for queue of unresolved entries */
95 static spinlock_t mfc_unres_lock
= SPIN_LOCK_UNLOCKED
;
97 /* We return to original Alan's scheme. Hash table of resolved
98 entries is changed only in process context and protected
99 with weak lock mrt_lock. Queue of unresolved entries is protected
100 with strong spinlock mfc_unres_lock.
102 In this case data path is free of exclusive locks at all.
105 kmem_cache_t
*mrt_cachep
;
107 static int ip_mr_forward(struct sk_buff
*skb
, struct mfc_cache
*cache
, int local
);
108 static int ipmr_cache_report(struct sk_buff
*pkt
, vifi_t vifi
, int assert);
109 static int ipmr_fill_mroute(struct sk_buff
*skb
, struct mfc_cache
*c
, struct rtmsg
*rtm
);
111 extern struct inet_protocol pim_protocol
;
113 static struct timer_list ipmr_expire_timer
;
115 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
118 struct net_device
*ipmr_new_tunnel(struct vifctl
*v
)
120 struct net_device
*dev
;
122 dev
= __dev_get_by_name("tunl0");
128 struct ip_tunnel_parm p
;
129 struct in_device
*in_dev
;
131 memset(&p
, 0, sizeof(p
));
132 p
.iph
.daddr
= v
->vifc_rmt_addr
.s_addr
;
133 p
.iph
.saddr
= v
->vifc_lcl_addr
.s_addr
;
136 p
.iph
.protocol
= IPPROTO_IPIP
;
137 sprintf(p
.name
, "dvmrp%d", v
->vifc_vifi
);
138 ifr
.ifr_ifru
.ifru_data
= (void*)&p
;
140 oldfs
= get_fs(); set_fs(KERNEL_DS
);
141 err
= dev
->do_ioctl(dev
, &ifr
, SIOCADDTUNNEL
);
146 if (err
== 0 && (dev
= __dev_get_by_name(p
.name
)) != NULL
) {
147 dev
->flags
|= IFF_MULTICAST
;
149 in_dev
= __in_dev_get(dev
);
150 if (in_dev
== NULL
&& (in_dev
= inetdev_init(dev
)) == NULL
)
152 in_dev
->cnf
.rp_filter
= 0;
161 unregister_netdevice(dev
);
165 #ifdef CONFIG_IP_PIMSM
167 static int reg_vif_num
= -1;
169 static int reg_vif_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
171 read_lock(&mrt_lock
);
172 ((struct net_device_stats
*)dev
->priv
)->tx_bytes
+= skb
->len
;
173 ((struct net_device_stats
*)dev
->priv
)->tx_packets
++;
174 ipmr_cache_report(skb
, reg_vif_num
, IGMPMSG_WHOLEPKT
);
175 read_unlock(&mrt_lock
);
180 static struct net_device_stats
*reg_vif_get_stats(struct net_device
*dev
)
182 return (struct net_device_stats
*)dev
->priv
;
186 struct net_device
*ipmr_reg_vif(struct vifctl
*v
)
188 struct net_device
*dev
;
189 struct in_device
*in_dev
;
192 size
= sizeof(*dev
) + sizeof(struct net_device_stats
);
193 dev
= kmalloc(size
, GFP_KERNEL
);
197 memset(dev
, 0, size
);
201 strcpy(dev
->name
, "pimreg");
203 dev
->type
= ARPHRD_PIMREG
;
204 dev
->mtu
= 1500 - sizeof(struct iphdr
) - 8;
205 dev
->flags
= IFF_NOARP
;
206 dev
->hard_start_xmit
= reg_vif_xmit
;
207 dev
->get_stats
= reg_vif_get_stats
;
208 dev
->features
|= NETIF_F_DYNALLOC
;
210 if (register_netdevice(dev
)) {
216 if ((in_dev
= inetdev_init(dev
)) == NULL
)
219 in_dev
->cnf
.rp_filter
= 0;
227 unregister_netdevice(dev
);
236 static int vif_delete(int vifi
)
238 struct vif_device
*v
;
239 struct net_device
*dev
;
240 struct in_device
*in_dev
;
242 if (vifi
< 0 || vifi
>= maxvif
)
243 return -EADDRNOTAVAIL
;
245 v
= &vif_table
[vifi
];
247 write_lock_bh(&mrt_lock
);
252 write_unlock_bh(&mrt_lock
);
253 return -EADDRNOTAVAIL
;
256 #ifdef CONFIG_IP_PIMSM
257 if (vifi
== reg_vif_num
)
261 if (vifi
+1 == maxvif
) {
263 for (tmp
=vifi
-1; tmp
>=0; tmp
--) {
270 write_unlock_bh(&mrt_lock
);
272 dev_set_allmulti(dev
, -1);
274 if ((in_dev
= __in_dev_get(dev
)) != NULL
) {
275 in_dev
->cnf
.mc_forwarding
--;
276 ip_rt_multicast_event(in_dev
);
279 if (v
->flags
&(VIFF_TUNNEL
|VIFF_REGISTER
))
280 unregister_netdevice(dev
);
286 /* Destroy an unresolved cache entry, killing queued skbs
287 and reporting error to netlink readers.
290 static void ipmr_destroy_unres(struct mfc_cache
*c
)
294 atomic_dec(&cache_resolve_queue_len
);
296 while((skb
=skb_dequeue(&c
->mfc_un
.unres
.unresolved
))) {
297 #ifdef CONFIG_RTNETLINK
298 if (skb
->nh
.iph
->version
== 0) {
299 struct nlmsghdr
*nlh
= (struct nlmsghdr
*)skb_pull(skb
, sizeof(struct iphdr
));
300 nlh
->nlmsg_type
= NLMSG_ERROR
;
301 nlh
->nlmsg_len
= NLMSG_LENGTH(sizeof(struct nlmsgerr
));
302 skb_trim(skb
, nlh
->nlmsg_len
);
303 ((struct nlmsgerr
*)NLMSG_DATA(nlh
))->error
= -ETIMEDOUT
;
304 netlink_unicast(rtnl
, skb
, NETLINK_CB(skb
).dst_pid
, MSG_DONTWAIT
);
310 kmem_cache_free(mrt_cachep
, c
);
314 /* Single timer process for all the unresolved queue. */
316 void ipmr_expire_process(unsigned long dummy
)
319 unsigned long expires
;
320 struct mfc_cache
*c
, **cp
;
322 if (!spin_trylock(&mfc_unres_lock
)) {
323 mod_timer(&ipmr_expire_timer
, jiffies
+HZ
/10);
327 if (atomic_read(&cache_resolve_queue_len
) == 0)
332 cp
= &mfc_unres_queue
;
334 while ((c
=*cp
) != NULL
) {
335 long interval
= c
->mfc_un
.unres
.expires
- now
;
338 if (interval
< expires
)
346 ipmr_destroy_unres(c
);
349 if (atomic_read(&cache_resolve_queue_len
))
350 mod_timer(&ipmr_expire_timer
, jiffies
+ expires
);
353 spin_unlock(&mfc_unres_lock
);
356 /* Fill oifs list. It is called under write locked mrt_lock. */
358 static void ipmr_update_threshoulds(struct mfc_cache
*cache
, unsigned char *ttls
)
362 cache
->mfc_un
.res
.minvif
= MAXVIFS
;
363 cache
->mfc_un
.res
.maxvif
= 0;
364 memset(cache
->mfc_un
.res
.ttls
, 255, MAXVIFS
);
366 for (vifi
=0; vifi
<maxvif
; vifi
++) {
367 if (VIF_EXISTS(vifi
) && ttls
[vifi
] && ttls
[vifi
] < 255) {
368 cache
->mfc_un
.res
.ttls
[vifi
] = ttls
[vifi
];
369 if (cache
->mfc_un
.res
.minvif
> vifi
)
370 cache
->mfc_un
.res
.minvif
= vifi
;
371 if (cache
->mfc_un
.res
.maxvif
<= vifi
)
372 cache
->mfc_un
.res
.maxvif
= vifi
+ 1;
377 static int vif_add(struct vifctl
*vifc
, int mrtsock
)
379 int vifi
= vifc
->vifc_vifi
;
380 struct vif_device
*v
= &vif_table
[vifi
];
381 struct net_device
*dev
;
382 struct in_device
*in_dev
;
385 if (VIF_EXISTS(vifi
))
388 switch (vifc
->vifc_flags
) {
389 #ifdef CONFIG_IP_PIMSM
392 * Special Purpose VIF in PIM
393 * All the packets will be sent to the daemon
395 if (reg_vif_num
>= 0)
397 dev
= ipmr_reg_vif(vifc
);
403 dev
= ipmr_new_tunnel(vifc
);
408 dev
=ip_dev_find(vifc
->vifc_lcl_addr
.s_addr
);
410 return -EADDRNOTAVAIL
;
417 if ((in_dev
= __in_dev_get(dev
)) == NULL
)
418 return -EADDRNOTAVAIL
;
419 in_dev
->cnf
.mc_forwarding
++;
420 dev_set_allmulti(dev
, +1);
421 ip_rt_multicast_event(in_dev
);
424 * Fill in the VIF structures
426 v
->rate_limit
=vifc
->vifc_rate_limit
;
427 v
->local
=vifc
->vifc_lcl_addr
.s_addr
;
428 v
->remote
=vifc
->vifc_rmt_addr
.s_addr
;
429 v
->flags
=vifc
->vifc_flags
;
431 v
->flags
|= VIFF_STATIC
;
432 v
->threshold
=vifc
->vifc_threshold
;
437 v
->link
= dev
->ifindex
;
438 if (v
->flags
&(VIFF_TUNNEL
|VIFF_REGISTER
))
439 v
->link
= dev
->iflink
;
441 /* And finish update writing critical data */
442 write_lock_bh(&mrt_lock
);
445 #ifdef CONFIG_IP_PIMSM
446 if (v
->flags
&VIFF_REGISTER
)
451 write_unlock_bh(&mrt_lock
);
455 static struct mfc_cache
*ipmr_cache_find(__u32 origin
, __u32 mcastgrp
)
457 int line
=MFC_HASH(mcastgrp
,origin
);
460 for (c
=mfc_cache_array
[line
]; c
; c
= c
->next
) {
461 if (c
->mfc_origin
==origin
&& c
->mfc_mcastgrp
==mcastgrp
)
468 * Allocate a multicast cache entry
470 static struct mfc_cache
*ipmr_cache_alloc(void)
472 struct mfc_cache
*c
=kmem_cache_alloc(mrt_cachep
, GFP_KERNEL
);
475 memset(c
, 0, sizeof(*c
));
476 c
->mfc_un
.res
.minvif
= MAXVIFS
;
480 static struct mfc_cache
*ipmr_cache_alloc_unres(void)
482 struct mfc_cache
*c
=kmem_cache_alloc(mrt_cachep
, GFP_ATOMIC
);
485 memset(c
, 0, sizeof(*c
));
486 skb_queue_head_init(&c
->mfc_un
.unres
.unresolved
);
487 c
->mfc_un
.unres
.expires
= jiffies
+ 10*HZ
;
492 * A cache entry has gone into a resolved state from queued
495 static void ipmr_cache_resolve(struct mfc_cache
*uc
, struct mfc_cache
*c
)
500 * Play the pending entries through our router
503 while((skb
=__skb_dequeue(&uc
->mfc_un
.unres
.unresolved
))) {
504 #ifdef CONFIG_RTNETLINK
505 if (skb
->nh
.iph
->version
== 0) {
507 struct nlmsghdr
*nlh
= (struct nlmsghdr
*)skb_pull(skb
, sizeof(struct iphdr
));
509 if (ipmr_fill_mroute(skb
, c
, NLMSG_DATA(nlh
)) > 0) {
510 nlh
->nlmsg_len
= skb
->tail
- (u8
*)nlh
;
512 nlh
->nlmsg_type
= NLMSG_ERROR
;
513 nlh
->nlmsg_len
= NLMSG_LENGTH(sizeof(struct nlmsgerr
));
514 skb_trim(skb
, nlh
->nlmsg_len
);
515 ((struct nlmsgerr
*)NLMSG_DATA(nlh
))->error
= -EMSGSIZE
;
517 err
= netlink_unicast(rtnl
, skb
, NETLINK_CB(skb
).dst_pid
, MSG_DONTWAIT
);
520 ip_mr_forward(skb
, c
, 0);
525 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
526 * expects the following bizarre scheme.
528 * Called under mrt_lock.
531 static int ipmr_cache_report(struct sk_buff
*pkt
, vifi_t vifi
, int assert)
534 int ihl
= pkt
->nh
.iph
->ihl
<<2;
535 struct igmphdr
*igmp
;
539 #ifdef CONFIG_IP_PIMSM
540 if (assert == IGMPMSG_WHOLEPKT
)
541 skb
= skb_realloc_headroom(pkt
, sizeof(struct iphdr
));
544 skb
= alloc_skb(128, GFP_ATOMIC
);
549 #ifdef CONFIG_IP_PIMSM
550 if (assert == IGMPMSG_WHOLEPKT
) {
551 /* Ugly, but we have no choice with this interface.
552 Duplicate old header, fix ihl, length etc.
553 And all this only to mangle msg->im_msgtype and
554 to set msg->im_mbz to "mbz" :-)
556 msg
= (struct igmpmsg
*)skb_push(skb
, sizeof(struct iphdr
));
557 skb
->nh
.raw
= skb
->h
.raw
= (u8
*)msg
;
558 memcpy(msg
, pkt
->nh
.raw
, sizeof(struct iphdr
));
559 msg
->im_msgtype
= IGMPMSG_WHOLEPKT
;
561 msg
->im_vif
= reg_vif_num
;
562 skb
->nh
.iph
->ihl
= sizeof(struct iphdr
) >> 2;
563 skb
->nh
.iph
->tot_len
= htons(ntohs(pkt
->nh
.iph
->tot_len
) + sizeof(struct iphdr
));
572 skb
->nh
.iph
= (struct iphdr
*)skb_put(skb
, ihl
);
573 memcpy(skb
->data
,pkt
->data
,ihl
);
574 skb
->nh
.iph
->protocol
= 0; /* Flag to the kernel this is a route add */
575 msg
= (struct igmpmsg
*)skb
->nh
.iph
;
577 skb
->dst
= dst_clone(pkt
->dst
);
583 igmp
=(struct igmphdr
*)skb_put(skb
,sizeof(struct igmphdr
));
585 msg
->im_msgtype
= assert;
587 skb
->nh
.iph
->tot_len
=htons(skb
->len
); /* Fix the length */
588 skb
->h
.raw
= skb
->nh
.raw
;
591 if (mroute_socket
== NULL
) {
599 if ((ret
=sock_queue_rcv_skb(mroute_socket
,skb
))<0) {
601 printk(KERN_WARNING
"mroute: pending queue full, dropping entries.\n");
609 * Queue a packet for resolution. It gets locked cache entry!
613 ipmr_cache_unresolved(vifi_t vifi
, struct sk_buff
*skb
)
618 spin_lock_bh(&mfc_unres_lock
);
619 for (c
=mfc_unres_queue
; c
; c
=c
->next
) {
620 if (c
->mfc_mcastgrp
== skb
->nh
.iph
->daddr
&&
621 c
->mfc_origin
== skb
->nh
.iph
->saddr
)
627 * Create a new entry if allowable
630 if (atomic_read(&cache_resolve_queue_len
)>=10 ||
631 (c
=ipmr_cache_alloc_unres())==NULL
) {
632 spin_unlock_bh(&mfc_unres_lock
);
639 * Fill in the new cache entry
642 c
->mfc_origin
=skb
->nh
.iph
->saddr
;
643 c
->mfc_mcastgrp
=skb
->nh
.iph
->daddr
;
646 * Reflect first query at mrouted.
648 if ((err
= ipmr_cache_report(skb
, vifi
, IGMPMSG_NOCACHE
))<0) {
649 /* If the report failed throw the cache entry
652 spin_unlock_bh(&mfc_unres_lock
);
654 kmem_cache_free(mrt_cachep
, c
);
659 atomic_inc(&cache_resolve_queue_len
);
660 c
->next
= mfc_unres_queue
;
663 mod_timer(&ipmr_expire_timer
, c
->mfc_un
.unres
.expires
);
667 * See if we can append the packet
669 if (c
->mfc_un
.unres
.unresolved
.qlen
>3) {
673 skb_queue_tail(&c
->mfc_un
.unres
.unresolved
,skb
);
677 spin_unlock_bh(&mfc_unres_lock
);
682 * MFC cache manipulation by user space mroute daemon
685 int ipmr_mfc_delete(struct mfcctl
*mfc
)
688 struct mfc_cache
*c
, **cp
;
690 line
=MFC_HASH(mfc
->mfcc_mcastgrp
.s_addr
, mfc
->mfcc_origin
.s_addr
);
692 for (cp
=&mfc_cache_array
[line
]; (c
=*cp
) != NULL
; cp
= &c
->next
) {
693 if (c
->mfc_origin
== mfc
->mfcc_origin
.s_addr
&&
694 c
->mfc_mcastgrp
== mfc
->mfcc_mcastgrp
.s_addr
) {
695 write_lock_bh(&mrt_lock
);
697 write_unlock_bh(&mrt_lock
);
699 kmem_cache_free(mrt_cachep
, c
);
706 int ipmr_mfc_add(struct mfcctl
*mfc
, int mrtsock
)
709 struct mfc_cache
*uc
, *c
, **cp
;
711 line
=MFC_HASH(mfc
->mfcc_mcastgrp
.s_addr
, mfc
->mfcc_origin
.s_addr
);
713 for (cp
=&mfc_cache_array
[line
]; (c
=*cp
) != NULL
; cp
= &c
->next
) {
714 if (c
->mfc_origin
== mfc
->mfcc_origin
.s_addr
&&
715 c
->mfc_mcastgrp
== mfc
->mfcc_mcastgrp
.s_addr
)
720 write_lock_bh(&mrt_lock
);
721 c
->mfc_parent
= mfc
->mfcc_parent
;
722 ipmr_update_threshoulds(c
, mfc
->mfcc_ttls
);
724 c
->mfc_flags
|= MFC_STATIC
;
725 write_unlock_bh(&mrt_lock
);
729 if(!MULTICAST(mfc
->mfcc_mcastgrp
.s_addr
))
732 c
=ipmr_cache_alloc();
736 c
->mfc_origin
=mfc
->mfcc_origin
.s_addr
;
737 c
->mfc_mcastgrp
=mfc
->mfcc_mcastgrp
.s_addr
;
738 c
->mfc_parent
=mfc
->mfcc_parent
;
739 ipmr_update_threshoulds(c
, mfc
->mfcc_ttls
);
741 c
->mfc_flags
|= MFC_STATIC
;
743 write_lock_bh(&mrt_lock
);
744 c
->next
= mfc_cache_array
[line
];
745 mfc_cache_array
[line
] = c
;
746 write_unlock_bh(&mrt_lock
);
749 * Check to see if we resolved a queued list. If so we
750 * need to send on the frames and tidy up.
752 spin_lock_bh(&mfc_unres_lock
);
753 for (cp
= &mfc_unres_queue
; (uc
=*cp
) != NULL
;
755 if (uc
->mfc_origin
== c
->mfc_origin
&&
756 uc
->mfc_mcastgrp
== c
->mfc_mcastgrp
) {
758 if (atomic_dec_and_test(&cache_resolve_queue_len
))
759 del_timer(&ipmr_expire_timer
);
763 spin_unlock_bh(&mfc_unres_lock
);
766 ipmr_cache_resolve(uc
, c
);
767 kmem_cache_free(mrt_cachep
, uc
);
773 * Close the multicast socket, and clear the vif tables etc
776 static void mroute_clean_tables(struct sock
*sk
)
781 * Shut down all active vif entries
783 for(i
=0; i
<maxvif
; i
++) {
784 if (!(vif_table
[i
].flags
&VIFF_STATIC
))
791 for (i
=0;i
<MFC_LINES
;i
++) {
792 struct mfc_cache
*c
, **cp
;
794 cp
= &mfc_cache_array
[i
];
795 while ((c
= *cp
) != NULL
) {
796 if (c
->mfc_flags
&MFC_STATIC
) {
800 write_lock_bh(&mrt_lock
);
802 write_unlock_bh(&mrt_lock
);
804 kmem_cache_free(mrt_cachep
, c
);
808 if (atomic_read(&cache_resolve_queue_len
) != 0) {
811 spin_lock_bh(&mfc_unres_lock
);
812 while (mfc_unres_queue
!= NULL
) {
814 mfc_unres_queue
= c
->next
;
815 spin_unlock_bh(&mfc_unres_lock
);
817 ipmr_destroy_unres(c
);
819 spin_lock_bh(&mfc_unres_lock
);
821 spin_unlock_bh(&mfc_unres_lock
);
825 static void mrtsock_destruct(struct sock
*sk
)
828 if (sk
== mroute_socket
) {
829 ipv4_devconf
.mc_forwarding
--;
831 write_lock_bh(&mrt_lock
);
833 write_unlock_bh(&mrt_lock
);
835 mroute_clean_tables(sk
);
841 * Socket options and virtual interface manipulation. The whole
842 * virtual interface system is a complete heap, but unfortunately
843 * that's how BSD mrouted happens to think. Maybe one day with a proper
844 * MOSPF/PIM router set up we can clean this up.
847 int ip_mroute_setsockopt(struct sock
*sk
,int optname
,char *optval
,int optlen
)
853 if(optname
!=MRT_INIT
)
855 if(sk
!=mroute_socket
&& !capable(CAP_NET_ADMIN
))
862 if(sk
->type
!=SOCK_RAW
|| sk
->num
!=IPPROTO_IGMP
)
864 if(optlen
!=sizeof(int))
873 ret
= ip_ra_control(sk
, 1, mrtsock_destruct
);
875 write_lock_bh(&mrt_lock
);
877 write_unlock_bh(&mrt_lock
);
879 ipv4_devconf
.mc_forwarding
++;
884 if (sk
!=mroute_socket
)
886 return ip_ra_control(sk
, 0, NULL
);
889 if(optlen
!=sizeof(vif
))
891 if (copy_from_user(&vif
,optval
,sizeof(vif
)))
893 if(vif
.vifc_vifi
>= MAXVIFS
)
896 if (optname
==MRT_ADD_VIF
) {
897 ret
= vif_add(&vif
, sk
==mroute_socket
);
899 ret
= vif_delete(vif
.vifc_vifi
);
905 * Manipulate the forwarding caches. These live
906 * in a sort of kernel/user symbiosis.
910 if(optlen
!=sizeof(mfc
))
912 if (copy_from_user(&mfc
,optval
, sizeof(mfc
)))
915 if (optname
==MRT_DEL_MFC
)
916 ret
= ipmr_mfc_delete(&mfc
);
918 ret
= ipmr_mfc_add(&mfc
, sk
==mroute_socket
);
922 * Control PIM assert.
927 if(get_user(v
,(int *)optval
))
929 mroute_do_assert
=(v
)?1:0;
932 #ifdef CONFIG_IP_PIMSM
936 if(get_user(v
,(int *)optval
))
940 if (v
!= mroute_do_pim
) {
942 mroute_do_assert
= v
;
943 #ifdef CONFIG_IP_PIMSM_V2
945 inet_add_protocol(&pim_protocol
);
947 inet_del_protocol(&pim_protocol
);
955 * Spurious command, or MRT_VERSION which you cannot
964 * Getsock opt support for the multicast routing system.
967 int ip_mroute_getsockopt(struct sock
*sk
,int optname
,char *optval
,int *optlen
)
972 if(optname
!=MRT_VERSION
&&
973 #ifdef CONFIG_IP_PIMSM
979 if(get_user(olr
, optlen
))
982 olr
=min(olr
,sizeof(int));
983 if(put_user(olr
,optlen
))
985 if(optname
==MRT_VERSION
)
987 #ifdef CONFIG_IP_PIMSM
988 else if(optname
==MRT_PIM
)
992 val
=mroute_do_assert
;
993 if(copy_to_user(optval
,&val
,olr
))
999 * The IP multicast ioctl support routines.
1002 int ipmr_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
1004 struct sioc_sg_req sr
;
1005 struct sioc_vif_req vr
;
1006 struct vif_device
*vif
;
1007 struct mfc_cache
*c
;
1012 if (copy_from_user(&vr
,(void *)arg
,sizeof(vr
)))
1016 read_lock(&mrt_lock
);
1017 vif
=&vif_table
[vr
.vifi
];
1018 if(VIF_EXISTS(vr
.vifi
)) {
1019 vr
.icount
=vif
->pkt_in
;
1020 vr
.ocount
=vif
->pkt_out
;
1021 vr
.ibytes
=vif
->bytes_in
;
1022 vr
.obytes
=vif
->bytes_out
;
1023 read_unlock(&mrt_lock
);
1025 if (copy_to_user((void *)arg
,&vr
,sizeof(vr
)))
1029 read_unlock(&mrt_lock
);
1030 return -EADDRNOTAVAIL
;
1032 if (copy_from_user(&sr
,(void *)arg
,sizeof(sr
)))
1035 read_lock(&mrt_lock
);
1036 c
= ipmr_cache_find(sr
.src
.s_addr
, sr
.grp
.s_addr
);
1038 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1039 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1040 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1041 read_unlock(&mrt_lock
);
1043 if (copy_to_user((void *)arg
,&sr
,sizeof(sr
)))
1047 read_unlock(&mrt_lock
);
1048 return -EADDRNOTAVAIL
;
1050 return -ENOIOCTLCMD
;
1055 static int ipmr_device_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
1057 struct vif_device
*v
;
1059 if (event
!= NETDEV_UNREGISTER
)
1062 for(ct
=0;ct
<maxvif
;ct
++,v
++) {
1070 static struct notifier_block ip_mr_notifier
={
1077 * Encapsulate a packet by attaching a valid IPIP header to it.
1078 * This avoids tunnel drivers and other mess and gives us the speed so
1079 * important for multicast video.
1082 static void ip_encap(struct sk_buff
*skb
, u32 saddr
, u32 daddr
)
1084 struct iphdr
*iph
= (struct iphdr
*)skb_push(skb
,sizeof(struct iphdr
));
1087 iph
->tos
= skb
->nh
.iph
->tos
;
1088 iph
->ttl
= skb
->nh
.iph
->ttl
;
1092 iph
->protocol
= IPPROTO_IPIP
;
1094 iph
->tot_len
= htons(skb
->len
);
1095 ip_select_ident(iph
, skb
->dst
);
1098 skb
->h
.ipiph
= skb
->nh
.iph
;
1100 #ifdef CONFIG_NETFILTER
1101 nf_conntrack_put(skb
->nfct
);
1106 static inline int ipmr_forward_finish(struct sk_buff
*skb
)
1108 struct dst_entry
*dst
= skb
->dst
;
1110 if (skb
->len
<= dst
->pmtu
)
1111 return dst
->output(skb
);
1113 return ip_fragment(skb
, dst
->output
);
1117 * Processing handlers for ipmr_forward
1120 static void ipmr_queue_xmit(struct sk_buff
*skb
, struct mfc_cache
*c
,
1123 struct iphdr
*iph
= skb
->nh
.iph
;
1124 struct vif_device
*vif
= &vif_table
[vifi
];
1125 struct net_device
*dev
;
1128 struct sk_buff
*skb2
;
1130 if (vif
->dev
== NULL
)
1133 #ifdef CONFIG_IP_PIMSM
1134 if (vif
->flags
& VIFF_REGISTER
) {
1136 vif
->bytes_out
+=skb
->len
;
1137 ((struct net_device_stats
*)vif
->dev
->priv
)->tx_bytes
+= skb
->len
;
1138 ((struct net_device_stats
*)vif
->dev
->priv
)->tx_packets
++;
1139 ipmr_cache_report(skb
, vifi
, IGMPMSG_WHOLEPKT
);
1144 if (vif
->flags
&VIFF_TUNNEL
) {
1145 if (ip_route_output(&rt
, vif
->remote
, vif
->local
, RT_TOS(iph
->tos
), vif
->link
))
1147 encap
= sizeof(struct iphdr
);
1149 if (ip_route_output(&rt
, iph
->daddr
, 0, RT_TOS(iph
->tos
), vif
->link
))
1153 dev
= rt
->u
.dst
.dev
;
1155 if (skb
->len
+encap
> rt
->u
.dst
.pmtu
&& (ntohs(iph
->frag_off
) & IP_DF
)) {
1156 /* Do not fragment multicasts. Alas, IPv4 does not
1157 allow to send ICMP, so that packets will disappear
1161 IP_INC_STATS_BH(IpFragFails
);
1166 encap
+= dev
->hard_header_len
;
1168 if (skb_headroom(skb
) < encap
|| skb_cloned(skb
) || !last
)
1169 skb2
= skb_realloc_headroom(skb
, (encap
+ 15)&~15);
1170 else if (atomic_read(&skb
->users
) != 1)
1171 skb2
= skb_clone(skb
, GFP_ATOMIC
);
1173 atomic_inc(&skb
->users
);
1183 vif
->bytes_out
+=skb
->len
;
1185 dst_release(skb2
->dst
);
1186 skb2
->dst
= &rt
->u
.dst
;
1188 ip_decrease_ttl(iph
);
1190 /* FIXME: forward and output firewalls used to be called here.
1191 * What do we do with netfilter? -- RR */
1192 if (vif
->flags
& VIFF_TUNNEL
) {
1193 ip_encap(skb2
, vif
->local
, vif
->remote
);
1194 /* FIXME: extra output firewall step used to be here. --RR */
1195 ((struct ip_tunnel
*)vif
->dev
->priv
)->stat
.tx_packets
++;
1196 ((struct ip_tunnel
*)vif
->dev
->priv
)->stat
.tx_bytes
+=skb2
->len
;
1199 IPCB(skb2
)->flags
|= IPSKB_FORWARDED
;
1202 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1203 * not only before forwarding, but after forwarding on all output
1204 * interfaces. It is clear, if mrouter runs a multicasting
1205 * program, it should receive packets not depending to what interface
1206 * program is joined.
1207 * If we will not make it, the program will have to join on all
1208 * interfaces. On the other hand, multihoming host (or router, but
1209 * not mrouter) cannot join to more than one interface - it will
1210 * result in receiving multiple packets.
1212 NF_HOOK(PF_INET
, NF_IP_FORWARD
, skb2
, skb
->dev
, dev
,
1213 ipmr_forward_finish
);
1216 int ipmr_find_vif(struct net_device
*dev
)
1219 for (ct
=maxvif
-1; ct
>=0; ct
--) {
1220 if (vif_table
[ct
].dev
== dev
)
1226 /* "local" means that we should preserve one skb (for local delivery) */
1228 int ip_mr_forward(struct sk_buff
*skb
, struct mfc_cache
*cache
, int local
)
1233 vif
= cache
->mfc_parent
;
1234 cache
->mfc_un
.res
.pkt
++;
1235 cache
->mfc_un
.res
.bytes
+= skb
->len
;
1238 * Wrong interface: drop packet and (maybe) send PIM assert.
1240 if (vif_table
[vif
].dev
!= skb
->dev
) {
1243 if (((struct rtable
*)skb
->dst
)->key
.iif
== 0) {
1244 /* It is our own packet, looped back.
1245 Very complicated situation...
1247 The best workaround until routing daemons will be
1248 fixed is not to redistribute packet, if it was
1249 send through wrong interface. It means, that
1250 multicast applications WILL NOT work for
1251 (S,G), which have default multicast route pointing
1252 to wrong oif. In any case, it is not a good
1253 idea to use multicasting applications on router.
1258 cache
->mfc_un
.res
.wrong_if
++;
1259 true_vifi
= ipmr_find_vif(skb
->dev
);
1261 if (true_vifi
>= 0 && mroute_do_assert
&&
1262 /* pimsm uses asserts, when switching from RPT to SPT,
1263 so that we cannot check that packet arrived on an oif.
1264 It is bad, but otherwise we would need to move pretty
1265 large chunk of pimd to kernel. Ough... --ANK
1267 (mroute_do_pim
|| cache
->mfc_un
.res
.ttls
[true_vifi
] < 255) &&
1268 jiffies
- cache
->mfc_un
.res
.last_assert
> MFC_ASSERT_THRESH
) {
1269 cache
->mfc_un
.res
.last_assert
= jiffies
;
1270 ipmr_cache_report(skb
, true_vifi
, IGMPMSG_WRONGVIF
);
1275 vif_table
[vif
].pkt_in
++;
1276 vif_table
[vif
].bytes_in
+=skb
->len
;
1281 for (ct
= cache
->mfc_un
.res
.maxvif
-1; ct
>= cache
->mfc_un
.res
.minvif
; ct
--) {
1282 if (skb
->nh
.iph
->ttl
> cache
->mfc_un
.res
.ttls
[ct
]) {
1284 ipmr_queue_xmit(skb
, cache
, psend
, 0);
1289 ipmr_queue_xmit(skb
, cache
, psend
, !local
);
1299 * Multicast packets for forwarding arrive here
1302 int ip_mr_input(struct sk_buff
*skb
)
1304 struct mfc_cache
*cache
;
1305 int local
= ((struct rtable
*)skb
->dst
)->rt_flags
&RTCF_LOCAL
;
1307 /* Packet is looped back after forward, it should not be
1308 forwarded second time, but still can be delivered locally.
1310 if (IPCB(skb
)->flags
&IPSKB_FORWARDED
)
1314 if (IPCB(skb
)->opt
.router_alert
) {
1315 if (ip_call_ra_chain(skb
))
1317 } else if (skb
->nh
.iph
->protocol
== IPPROTO_IGMP
){
1318 /* IGMPv1 (and broken IGMPv2 implementations sort of
1319 Cisco IOS <= 11.2(8)) do not put router alert
1320 option to IGMP packets destined to routable
1321 groups. It is very bad, because it means
1322 that we can forward NO IGMP messages.
1324 read_lock(&mrt_lock
);
1325 if (mroute_socket
) {
1326 raw_rcv(mroute_socket
, skb
);
1327 read_unlock(&mrt_lock
);
1330 read_unlock(&mrt_lock
);
1334 read_lock(&mrt_lock
);
1335 cache
= ipmr_cache_find(skb
->nh
.iph
->saddr
, skb
->nh
.iph
->daddr
);
1338 * No usable cache entry
1344 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
1345 ip_local_deliver(skb
);
1347 read_unlock(&mrt_lock
);
1353 vif
= ipmr_find_vif(skb
->dev
);
1355 int err
= ipmr_cache_unresolved(vif
, skb
);
1356 read_unlock(&mrt_lock
);
1360 read_unlock(&mrt_lock
);
1365 ip_mr_forward(skb
, cache
, local
);
1367 read_unlock(&mrt_lock
);
1370 return ip_local_deliver(skb
);
1376 return ip_local_deliver(skb
);
1381 #ifdef CONFIG_IP_PIMSM_V1
1383 * Handle IGMP messages of PIMv1
1386 int pim_rcv_v1(struct sk_buff
* skb
, unsigned short len
)
1388 struct igmphdr
*pim
= (struct igmphdr
*)skb
->h
.raw
;
1389 struct iphdr
*encap
;
1390 struct net_device
*reg_dev
= NULL
;
1392 if (!mroute_do_pim
||
1393 len
< sizeof(*pim
) + sizeof(*encap
) ||
1394 pim
->group
!= PIM_V1_VERSION
|| pim
->code
!= PIM_V1_REGISTER
) {
1399 encap
= (struct iphdr
*)(skb
->h
.raw
+ sizeof(struct igmphdr
));
1402 a. packet is really destinted to a multicast group
1403 b. packet is not a NULL-REGISTER
1404 c. packet is not truncated
1406 if (!MULTICAST(encap
->daddr
) ||
1407 ntohs(encap
->tot_len
) == 0 ||
1408 ntohs(encap
->tot_len
) + sizeof(*pim
) > len
) {
1413 read_lock(&mrt_lock
);
1414 if (reg_vif_num
>= 0)
1415 reg_dev
= vif_table
[reg_vif_num
].dev
;
1418 read_unlock(&mrt_lock
);
1420 if (reg_dev
== NULL
) {
1425 skb
->mac
.raw
= skb
->nh
.raw
;
1426 skb_pull(skb
, (u8
*)encap
- skb
->data
);
1427 skb
->nh
.iph
= (struct iphdr
*)skb
->data
;
1429 memset(&(IPCB(skb
)->opt
), 0, sizeof(struct ip_options
));
1430 skb
->protocol
= __constant_htons(ETH_P_IP
);
1432 skb
->pkt_type
= PACKET_HOST
;
1433 dst_release(skb
->dst
);
1435 ((struct net_device_stats
*)reg_dev
->priv
)->rx_bytes
+= skb
->len
;
1436 ((struct net_device_stats
*)reg_dev
->priv
)->rx_packets
++;
1437 #ifdef CONFIG_NETFILTER
1438 nf_conntrack_put(skb
->nfct
);
1447 #ifdef CONFIG_IP_PIMSM_V2
1448 int pim_rcv(struct sk_buff
* skb
, unsigned short len
)
1450 struct pimreghdr
*pim
= (struct pimreghdr
*)skb
->h
.raw
;
1451 struct iphdr
*encap
;
1452 struct net_device
*reg_dev
= NULL
;
1454 if (len
< sizeof(*pim
) + sizeof(*encap
) ||
1455 pim
->type
!= ((PIM_VERSION
<<4)|(PIM_REGISTER
)) ||
1456 (pim
->flags
&PIM_NULL_REGISTER
) ||
1457 (ip_compute_csum((void *)pim
, sizeof(*pim
)) != 0 &&
1458 ip_compute_csum((void *)pim
, len
))) {
1463 /* check if the inner packet is destined to mcast group */
1464 encap
= (struct iphdr
*)(skb
->h
.raw
+ sizeof(struct pimreghdr
));
1465 if (!MULTICAST(encap
->daddr
) ||
1466 ntohs(encap
->tot_len
) == 0 ||
1467 ntohs(encap
->tot_len
) + sizeof(*pim
) > len
) {
1472 read_lock(&mrt_lock
);
1473 if (reg_vif_num
>= 0)
1474 reg_dev
= vif_table
[reg_vif_num
].dev
;
1477 read_unlock(&mrt_lock
);
1479 if (reg_dev
== NULL
) {
1484 skb
->mac
.raw
= skb
->nh
.raw
;
1485 skb_pull(skb
, (u8
*)encap
- skb
->data
);
1486 skb
->nh
.iph
= (struct iphdr
*)skb
->data
;
1488 memset(&(IPCB(skb
)->opt
), 0, sizeof(struct ip_options
));
1489 skb
->protocol
= __constant_htons(ETH_P_IP
);
1491 skb
->pkt_type
= PACKET_HOST
;
1492 dst_release(skb
->dst
);
1493 ((struct net_device_stats
*)reg_dev
->priv
)->rx_bytes
+= skb
->len
;
1494 ((struct net_device_stats
*)reg_dev
->priv
)->rx_packets
++;
1496 #ifdef CONFIG_NETFILTER
1497 nf_conntrack_put(skb
->nfct
);
1506 #ifdef CONFIG_RTNETLINK
1509 ipmr_fill_mroute(struct sk_buff
*skb
, struct mfc_cache
*c
, struct rtmsg
*rtm
)
1512 struct rtnexthop
*nhp
;
1513 struct net_device
*dev
= vif_table
[c
->mfc_parent
].dev
;
1515 struct rtattr
*mp_head
;
1518 RTA_PUT(skb
, RTA_IIF
, 4, &dev
->ifindex
);
1520 mp_head
= (struct rtattr
*)skb_put(skb
, RTA_LENGTH(0));
1522 for (ct
= c
->mfc_un
.res
.minvif
; ct
< c
->mfc_un
.res
.maxvif
; ct
++) {
1523 if (c
->mfc_un
.res
.ttls
[ct
] < 255) {
1524 if (skb_tailroom(skb
) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp
)) + 4))
1525 goto rtattr_failure
;
1526 nhp
= (struct rtnexthop
*)skb_put(skb
, RTA_ALIGN(sizeof(*nhp
)));
1527 nhp
->rtnh_flags
= 0;
1528 nhp
->rtnh_hops
= c
->mfc_un
.res
.ttls
[ct
];
1529 nhp
->rtnh_ifindex
= vif_table
[ct
].dev
->ifindex
;
1530 nhp
->rtnh_len
= sizeof(*nhp
);
1533 mp_head
->rta_type
= RTA_MULTIPATH
;
1534 mp_head
->rta_len
= skb
->tail
- (u8
*)mp_head
;
1535 rtm
->rtm_type
= RTN_MULTICAST
;
1539 skb_trim(skb
, b
- skb
->data
);
1543 int ipmr_get_route(struct sk_buff
*skb
, struct rtmsg
*rtm
, int nowait
)
1546 struct mfc_cache
*cache
;
1547 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
1549 read_lock(&mrt_lock
);
1550 cache
= ipmr_cache_find(rt
->rt_src
, rt
->rt_dst
);
1553 struct net_device
*dev
;
1557 read_unlock(&mrt_lock
);
1562 if (dev
== NULL
|| (vif
= ipmr_find_vif(dev
)) < 0) {
1563 read_unlock(&mrt_lock
);
1566 skb
->nh
.raw
= skb_push(skb
, sizeof(struct iphdr
));
1567 skb
->nh
.iph
->ihl
= sizeof(struct iphdr
)>>2;
1568 skb
->nh
.iph
->saddr
= rt
->rt_src
;
1569 skb
->nh
.iph
->daddr
= rt
->rt_dst
;
1570 skb
->nh
.iph
->version
= 0;
1571 err
= ipmr_cache_unresolved(vif
, skb
);
1572 read_unlock(&mrt_lock
);
1576 if (!nowait
&& (rtm
->rtm_flags
&RTM_F_NOTIFY
))
1577 cache
->mfc_flags
|= MFC_NOTIFY
;
1578 err
= ipmr_fill_mroute(skb
, cache
, rtm
);
1579 read_unlock(&mrt_lock
);
1584 #ifdef CONFIG_PROC_FS
1586 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
1589 static int ipmr_vif_info(char *buffer
, char **start
, off_t offset
, int length
)
1591 struct vif_device
*vif
;
1598 len
+= sprintf(buffer
,
1599 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
1602 read_lock(&mrt_lock
);
1603 for (ct
=0;ct
<maxvif
;ct
++)
1605 char *name
= "none";
1610 name
= vif
->dev
->name
;
1611 size
= sprintf(buffer
+len
, "%2d %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
1612 ct
, name
, vif
->bytes_in
, vif
->pkt_in
, vif
->bytes_out
, vif
->pkt_out
,
1613 vif
->flags
, vif
->local
, vif
->remote
);
1621 if(pos
>offset
+length
)
1624 read_unlock(&mrt_lock
);
1626 *start
=buffer
+(offset
-begin
);
1627 len
-=(offset
-begin
);
1635 static int ipmr_mfc_info(char *buffer
, char **start
, off_t offset
, int length
)
1637 struct mfc_cache
*mfc
;
1644 len
+= sprintf(buffer
,
1645 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
1648 read_lock(&mrt_lock
);
1649 for (ct
=0;ct
<MFC_LINES
;ct
++)
1651 for(mfc
=mfc_cache_array
[ct
]; mfc
; mfc
=mfc
->next
)
1656 * Interface forwarding map
1658 size
= sprintf(buffer
+len
, "%08lX %08lX %-3d %8ld %8ld %8ld",
1659 (unsigned long)mfc
->mfc_mcastgrp
,
1660 (unsigned long)mfc
->mfc_origin
,
1662 mfc
->mfc_un
.res
.pkt
,
1663 mfc
->mfc_un
.res
.bytes
,
1664 mfc
->mfc_un
.res
.wrong_if
);
1665 for(n
=mfc
->mfc_un
.res
.minvif
;n
<mfc
->mfc_un
.res
.maxvif
;n
++)
1667 if(VIF_EXISTS(n
) && mfc
->mfc_un
.res
.ttls
[n
] < 255)
1668 size
+= sprintf(buffer
+len
+size
, " %2d:%-3d", n
, mfc
->mfc_un
.res
.ttls
[n
]);
1670 size
+= sprintf(buffer
+len
+size
, "\n");
1678 if(pos
>offset
+length
)
1683 spin_lock_bh(&mfc_unres_lock
);
1684 for(mfc
=mfc_unres_queue
; mfc
; mfc
=mfc
->next
) {
1685 size
= sprintf(buffer
+len
, "%08lX %08lX %-3d %8ld %8ld %8ld\n",
1686 (unsigned long)mfc
->mfc_mcastgrp
,
1687 (unsigned long)mfc
->mfc_origin
,
1689 (long)mfc
->mfc_un
.unres
.unresolved
.qlen
,
1698 if(pos
>offset
+length
)
1701 spin_unlock_bh(&mfc_unres_lock
);
1704 read_unlock(&mrt_lock
);
1705 *start
=buffer
+(offset
-begin
);
1706 len
-=(offset
-begin
);
1717 #ifdef CONFIG_IP_PIMSM_V2
1718 struct inet_protocol pim_protocol
=
1720 pim_rcv
, /* PIM handler */
1721 NULL
, /* PIM error control */
1723 IPPROTO_PIM
, /* protocol ID */
1732 * Setup for IP multicast routing
1735 void __init
ip_mr_init(void)
1737 printk(KERN_INFO
"Linux IP multicast router 0.06 plus PIM-SM\n");
1738 mrt_cachep
= kmem_cache_create("ip_mrt_cache",
1739 sizeof(struct mfc_cache
),
1740 0, SLAB_HWCACHE_ALIGN
,
1742 init_timer(&ipmr_expire_timer
);
1743 ipmr_expire_timer
.function
=ipmr_expire_process
;
1744 register_netdevice_notifier(&ip_mr_notifier
);
1745 #ifdef CONFIG_PROC_FS
1746 proc_net_create("ip_mr_vif",0,ipmr_vif_info
);
1747 proc_net_create("ip_mr_cache",0,ipmr_mfc_info
);