Linux 2.2.0
[davej-history.git] / net / ipv4 / ipmr.c
blob99cda3ea0749996845b10816e5e11d4ca85bf3c2
1 /*
2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@cymru.net>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Version: $Id: ipmr.c,v 1.38 1999/01/12 14:34:40 davem Exp $
14 * Fixes:
15 * Michael Chastain : Incorrect size of copying.
16 * Alan Cox : Added the cache manager code
17 * Alan Cox : Fixed the clone/copy bug and device race.
18 * Mike McLagan : Routing by source
19 * Malcolm Beattie : Buffer handling fixes.
20 * Alexey Kuznetsov : Double buffer free and other fixes.
21 * SVR Anand : Fixed several multicast bugs and problems.
22 * Alexey Kuznetsov : Status, optimisations and more.
23 * Brad Parker : Better behaviour on mrouted upcall
24 * overflow.
25 * Carlos Picoto : PIMv1 Support
29 #include <linux/config.h>
30 #include <asm/system.h>
31 #include <asm/uaccess.h>
32 #include <linux/types.h>
33 #include <linux/sched.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <linux/mm.h>
37 #include <linux/kernel.h>
38 #include <linux/fcntl.h>
39 #include <linux/stat.h>
40 #include <linux/socket.h>
41 #include <linux/in.h>
42 #include <linux/inet.h>
43 #include <linux/netdevice.h>
44 #include <linux/inetdevice.h>
45 #include <linux/igmp.h>
46 #include <linux/proc_fs.h>
47 #include <linux/mroute.h>
48 #include <linux/init.h>
49 #include <net/ip.h>
50 #include <net/protocol.h>
51 #include <linux/skbuff.h>
52 #include <net/sock.h>
53 #include <net/icmp.h>
54 #include <net/udp.h>
55 #include <net/raw.h>
56 #include <linux/notifier.h>
57 #include <linux/if_arp.h>
58 #include <linux/ip_fw.h>
59 #include <linux/firewall.h>
60 #include <net/ipip.h>
61 #include <net/checksum.h>
63 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
64 #define CONFIG_IP_PIMSM 1
65 #endif
68 * Multicast router control variables
71 static struct vif_device vif_table[MAXVIFS]; /* Devices */
72 static unsigned long vifc_map; /* Active device map */
73 static int maxvif;
74 int mroute_do_assert = 0; /* Set in PIM assert */
75 int mroute_do_pim = 0;
76 static struct mfc_cache *mfc_cache_array[MFC_LINES]; /* Forwarding cache */
77 int cache_resolve_queue_len = 0; /* Size of unresolved */
79 static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
80 static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
81 static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
83 extern struct inet_protocol pim_protocol;
85 static
86 struct device *ipmr_new_tunnel(struct vifctl *v)
88 struct device *dev = NULL;
90 rtnl_lock();
91 dev = dev_get("tunl0");
93 if (dev) {
94 int err;
95 struct ifreq ifr;
96 mm_segment_t oldfs;
97 struct ip_tunnel_parm p;
98 struct in_device *in_dev;
100 memset(&p, 0, sizeof(p));
101 p.iph.daddr = v->vifc_rmt_addr.s_addr;
102 p.iph.saddr = v->vifc_lcl_addr.s_addr;
103 p.iph.version = 4;
104 p.iph.ihl = 5;
105 p.iph.protocol = IPPROTO_IPIP;
106 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
107 ifr.ifr_ifru.ifru_data = (void*)&p;
109 oldfs = get_fs(); set_fs(KERNEL_DS);
110 err = dev->do_ioctl(dev, &ifr, SIOCADDTUNNEL);
111 set_fs(oldfs);
113 if (err == 0 && (dev = dev_get(p.name)) != NULL) {
114 dev->flags |= IFF_MULTICAST;
116 in_dev = dev->ip_ptr;
117 if (in_dev == NULL && (in_dev = inetdev_init(dev)) == NULL)
118 goto failure;
119 in_dev->cnf.rp_filter = 0;
121 if (dev_open(dev))
122 goto failure;
125 rtnl_unlock();
126 return dev;
128 failure:
129 unregister_netdevice(dev);
130 rtnl_unlock();
131 return NULL;
134 #ifdef CONFIG_IP_PIMSM
136 static int reg_vif_num = -1;
137 static struct device * reg_dev;
139 static int reg_vif_xmit(struct sk_buff *skb, struct device *dev)
141 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT);
142 kfree_skb(skb);
143 return 0;
146 static struct net_device_stats *reg_vif_get_stats(struct device *dev)
148 return (struct net_device_stats*)dev->priv;
151 static
152 struct device *ipmr_reg_vif(struct vifctl *v)
154 struct device *dev;
155 struct in_device *in_dev;
156 int size;
158 size = sizeof(*dev) + IFNAMSIZ + sizeof(struct net_device_stats);
159 dev = kmalloc(size, GFP_KERNEL);
160 if (!dev)
161 return NULL;
163 memset(dev, 0, size);
165 dev->priv = dev + 1;
166 dev->name = dev->priv + sizeof(struct net_device_stats);
168 strcpy(dev->name, "pimreg");
170 dev->type = ARPHRD_PIMREG;
171 dev->mtu = 1500 - sizeof(struct iphdr) - 8;
172 dev->flags = IFF_NOARP;
173 dev->hard_start_xmit = reg_vif_xmit;
174 dev->get_stats = reg_vif_get_stats;
176 rtnl_lock();
178 if (register_netdevice(dev)) {
179 rtnl_unlock();
180 kfree(dev);
181 return NULL;
183 dev->iflink = 0;
185 if ((in_dev = inetdev_init(dev)) == NULL)
186 goto failure;
188 in_dev->cnf.rp_filter = 0;
190 if (dev_open(dev))
191 goto failure;
193 rtnl_unlock();
194 reg_dev = dev;
195 return dev;
197 failure:
198 unregister_netdevice(dev);
199 rtnl_unlock();
200 kfree(dev);
201 return NULL;
203 #endif
206 * Delete a VIF entry
209 static int vif_delete(int vifi)
211 struct vif_device *v;
212 struct device *dev;
213 struct in_device *in_dev;
215 if (vifi < 0 || vifi >= maxvif || !(vifc_map&(1<<vifi)))
216 return -EADDRNOTAVAIL;
218 v = &vif_table[vifi];
220 dev = v->dev;
221 v->dev = NULL;
222 vifc_map &= ~(1<<vifi);
224 if ((in_dev = dev->ip_ptr) != NULL)
225 in_dev->cnf.mc_forwarding = 0;
227 dev_set_allmulti(dev, -1);
228 ip_rt_multicast_event(in_dev);
230 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER)) {
231 #ifdef CONFIG_IP_PIMSM
232 if (vifi == reg_vif_num) {
233 reg_vif_num = -1;
234 reg_dev = NULL;
236 #endif
237 unregister_netdevice(dev);
238 if (v->flags&VIFF_REGISTER)
239 kfree(dev);
242 if (vifi+1 == maxvif) {
243 int tmp;
244 for (tmp=vifi-1; tmp>=0; tmp--) {
245 if (vifc_map&(1<<tmp))
246 break;
248 maxvif = tmp+1;
250 return 0;
253 static void ipmr_update_threshoulds(struct mfc_cache *cache, unsigned char *ttls)
255 int vifi;
257 start_bh_atomic();
259 cache->mfc_minvif = MAXVIFS;
260 cache->mfc_maxvif = 0;
261 memset(cache->mfc_ttls, 255, MAXVIFS);
263 for (vifi=0; vifi<maxvif; vifi++) {
264 if (vifc_map&(1<<vifi) && ttls[vifi] && ttls[vifi] < 255) {
265 cache->mfc_ttls[vifi] = ttls[vifi];
266 if (cache->mfc_minvif > vifi)
267 cache->mfc_minvif = vifi;
268 if (cache->mfc_maxvif <= vifi)
269 cache->mfc_maxvif = vifi + 1;
272 end_bh_atomic();
276 * Delete a multicast route cache entry
279 static void ipmr_cache_delete(struct mfc_cache *cache)
281 struct sk_buff *skb;
282 int line;
283 struct mfc_cache **cp;
286 * Find the right cache line
289 line=MFC_HASH(cache->mfc_mcastgrp,cache->mfc_origin);
290 cp=&(mfc_cache_array[line]);
292 if(cache->mfc_flags&MFC_QUEUED)
293 del_timer(&cache->mfc_timer);
296 * Unlink the buffer
299 while(*cp!=NULL)
301 if(*cp==cache)
303 *cp=cache->next;
304 break;
306 cp=&((*cp)->next);
310 * Free the buffer. If it is a pending resolution
311 * clean up the other resources.
314 if(cache->mfc_flags&MFC_QUEUED)
316 cache_resolve_queue_len--;
317 while((skb=skb_dequeue(&cache->mfc_unresolved))) {
318 #ifdef CONFIG_RTNETLINK
319 if (skb->nh.iph->version == 0) {
320 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
321 nlh->nlmsg_type = NLMSG_ERROR;
322 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
323 skb_trim(skb, nlh->nlmsg_len);
324 ((struct nlmsgerr*)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
325 netlink_unicast(rtnl, skb, NETLINK_CB(skb).dst_pid, MSG_DONTWAIT);
326 } else
327 #endif
328 kfree_skb(skb);
331 kfree_s(cache,sizeof(cache));
335 * Cache expiry timer
338 static void ipmr_cache_timer(unsigned long data)
340 struct mfc_cache *cache=(struct mfc_cache *)data;
341 ipmr_cache_delete(cache);
345 * Insert a multicast cache entry
348 static void ipmr_cache_insert(struct mfc_cache *c)
350 int line=MFC_HASH(c->mfc_mcastgrp,c->mfc_origin);
351 c->next=mfc_cache_array[line];
352 mfc_cache_array[line]=c;
356 * Find a multicast cache entry
359 struct mfc_cache *ipmr_cache_find(__u32 origin, __u32 mcastgrp)
361 int line=MFC_HASH(mcastgrp,origin);
362 struct mfc_cache *cache;
364 cache=mfc_cache_array[line];
365 while(cache!=NULL)
367 if(cache->mfc_origin==origin && cache->mfc_mcastgrp==mcastgrp)
368 return cache;
369 cache=cache->next;
371 return NULL;
375 * Allocate a multicast cache entry
378 static struct mfc_cache *ipmr_cache_alloc(int priority)
380 struct mfc_cache *c=(struct mfc_cache *)kmalloc(sizeof(struct mfc_cache), priority);
381 if(c==NULL)
382 return NULL;
383 memset(c, 0, sizeof(*c));
384 skb_queue_head_init(&c->mfc_unresolved);
385 init_timer(&c->mfc_timer);
386 c->mfc_timer.data=(long)c;
387 c->mfc_timer.function=ipmr_cache_timer;
388 c->mfc_minvif = MAXVIFS;
389 return c;
393 * A cache entry has gone into a resolved state from queued
396 static void ipmr_cache_resolve(struct mfc_cache *cache)
398 struct sk_buff *skb;
400 start_bh_atomic();
403 * Kill the queue entry timer.
406 del_timer(&cache->mfc_timer);
408 if (cache->mfc_flags&MFC_QUEUED) {
409 cache->mfc_flags&=~MFC_QUEUED;
410 cache_resolve_queue_len--;
413 end_bh_atomic();
416 * Play the pending entries through our router
418 while((skb=skb_dequeue(&cache->mfc_unresolved))) {
419 #ifdef CONFIG_RTNETLINK
420 if (skb->nh.iph->version == 0) {
421 int err;
422 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
424 if (ipmr_fill_mroute(skb, cache, NLMSG_DATA(nlh)) > 0) {
425 nlh->nlmsg_len = skb->tail - (u8*)nlh;
426 } else {
427 nlh->nlmsg_type = NLMSG_ERROR;
428 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
429 skb_trim(skb, nlh->nlmsg_len);
430 ((struct nlmsgerr*)NLMSG_DATA(nlh))->error = -EMSGSIZE;
432 err = netlink_unicast(rtnl, skb, NETLINK_CB(skb).pid, MSG_DONTWAIT);
433 } else
434 #endif
435 ip_mr_forward(skb, cache, 0);
440 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
441 * expects the following bizarre scheme..
444 static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
446 struct sk_buff *skb;
447 int ihl = pkt->nh.iph->ihl<<2;
448 struct igmphdr *igmp;
449 struct igmpmsg *msg;
450 int ret;
452 #ifdef CONFIG_IP_PIMSM
453 if (assert == IGMPMSG_WHOLEPKT)
454 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
455 else
456 #endif
457 skb = alloc_skb(128, GFP_ATOMIC);
459 if(!skb)
460 return -ENOBUFS;
462 #ifdef CONFIG_IP_PIMSM
463 if (assert == IGMPMSG_WHOLEPKT) {
464 /* Ugly, but we have no choice with this interface.
465 Duplicate old header, fix ihl, length etc.
466 And all this only to mangle msg->im_msgtype and
467 to set msg->im_mbz to "mbz" :-)
469 msg = (struct igmpmsg*)skb_push(skb, sizeof(struct iphdr));
470 skb->nh.raw = skb->h.raw = (u8*)msg;
471 memcpy(msg, pkt->nh.raw, sizeof(struct iphdr));
472 msg->im_msgtype = IGMPMSG_WHOLEPKT;
473 msg->im_mbz = 0;
474 msg->im_vif = reg_vif_num;
475 skb->nh.iph->ihl = sizeof(struct iphdr) >> 2;
476 skb->nh.iph->tot_len = htons(ntohs(pkt->nh.iph->tot_len) + sizeof(struct iphdr));
477 } else
478 #endif
482 * Copy the IP header
485 skb->nh.iph = (struct iphdr *)skb_put(skb, ihl);
486 memcpy(skb->data,pkt->data,ihl);
487 skb->nh.iph->protocol = 0; /* Flag to the kernel this is a route add */
488 msg = (struct igmpmsg*)skb->nh.iph;
489 msg->im_vif = vifi;
490 skb->dst = dst_clone(pkt->dst);
493 * Add our header
496 igmp=(struct igmphdr *)skb_put(skb,sizeof(struct igmphdr));
497 igmp->type =
498 msg->im_msgtype = assert;
499 igmp->code = 0;
500 skb->nh.iph->tot_len=htons(skb->len); /* Fix the length */
501 skb->h.raw = skb->nh.raw;
505 * Deliver to mrouted
507 if ((ret=sock_queue_rcv_skb(mroute_socket,skb))<0) {
508 if (net_ratelimit())
509 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
510 kfree_skb(skb);
513 return ret;
517 * Queue a packet for resolution
520 static int ipmr_cache_unresolved(struct mfc_cache *cache, vifi_t vifi, struct sk_buff *skb)
522 if(cache==NULL)
525 * Create a new entry if allowable
527 if(cache_resolve_queue_len>=10 || (cache=ipmr_cache_alloc(GFP_ATOMIC))==NULL)
529 kfree_skb(skb);
530 return -ENOBUFS;
533 * Fill in the new cache entry
535 cache->mfc_parent=ALL_VIFS;
536 cache->mfc_origin=skb->nh.iph->saddr;
537 cache->mfc_mcastgrp=skb->nh.iph->daddr;
538 cache->mfc_flags=MFC_QUEUED;
540 * Link to the unresolved list
542 ipmr_cache_insert(cache);
543 cache_resolve_queue_len++;
545 * Fire off the expiry timer
547 cache->mfc_timer.expires=jiffies+10*HZ;
548 add_timer(&cache->mfc_timer);
550 * Reflect first query at mrouted.
552 if(mroute_socket)
554 /* If the report failed throw the cache entry
555 out - Brad Parker
557 OK, OK, Brad. Only do not forget to free skb
558 and return :-) --ANK
560 if (ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE)<0) {
561 ipmr_cache_delete(cache);
562 kfree_skb(skb);
563 return -ENOBUFS;
568 * See if we can append the packet
570 if(cache->mfc_queuelen>3)
572 kfree_skb(skb);
573 return -ENOBUFS;
575 cache->mfc_queuelen++;
576 skb_queue_tail(&cache->mfc_unresolved,skb);
577 return 0;
581 * MFC cache manipulation by user space mroute daemon
584 int ipmr_mfc_modify(int action, struct mfcctl *mfc)
586 struct mfc_cache *cache;
588 if(!MULTICAST(mfc->mfcc_mcastgrp.s_addr))
589 return -EINVAL;
591 * Find the cache line
594 start_bh_atomic();
596 cache=ipmr_cache_find(mfc->mfcc_origin.s_addr,mfc->mfcc_mcastgrp.s_addr);
599 * Delete an entry
601 if(action==MRT_DEL_MFC)
603 if(cache)
605 ipmr_cache_delete(cache);
606 end_bh_atomic();
607 return 0;
609 end_bh_atomic();
610 return -ENOENT;
612 if(cache)
616 * Update the cache, see if it frees a pending queue
619 cache->mfc_flags|=MFC_RESOLVED;
620 cache->mfc_parent=mfc->mfcc_parent;
621 ipmr_update_threshoulds(cache, mfc->mfcc_ttls);
624 * Check to see if we resolved a queued list. If so we
625 * need to send on the frames and tidy up.
628 if(cache->mfc_flags&MFC_QUEUED)
629 ipmr_cache_resolve(cache); /* Unhook & send the frames */
630 end_bh_atomic();
631 return 0;
635 * Unsolicited update - that's ok, add anyway.
639 cache=ipmr_cache_alloc(GFP_ATOMIC);
640 if(cache==NULL)
642 end_bh_atomic();
643 return -ENOMEM;
645 cache->mfc_flags=MFC_RESOLVED;
646 cache->mfc_origin=mfc->mfcc_origin.s_addr;
647 cache->mfc_mcastgrp=mfc->mfcc_mcastgrp.s_addr;
648 cache->mfc_parent=mfc->mfcc_parent;
649 ipmr_update_threshoulds(cache, mfc->mfcc_ttls);
650 ipmr_cache_insert(cache);
651 end_bh_atomic();
652 return 0;
655 static void mrtsock_destruct(struct sock *sk)
657 if (sk == mroute_socket) {
658 ipv4_devconf.mc_forwarding = 0;
659 mroute_socket=NULL;
660 mroute_close(sk);
665 * Socket options and virtual interface manipulation. The whole
666 * virtual interface system is a complete heap, but unfortunately
667 * that's how BSD mrouted happens to think. Maybe one day with a proper
668 * MOSPF/PIM router set up we can clean this up.
671 int ip_mroute_setsockopt(struct sock *sk,int optname,char *optval,int optlen)
673 struct vifctl vif;
674 struct mfcctl mfc;
676 if(optname!=MRT_INIT)
678 if(sk!=mroute_socket)
679 return -EACCES;
682 switch(optname)
684 case MRT_INIT:
685 if(sk->type!=SOCK_RAW || sk->num!=IPPROTO_IGMP)
686 return -EOPNOTSUPP;
687 if(optlen!=sizeof(int))
688 return -ENOPROTOOPT;
690 int opt;
691 if (get_user(opt,(int *)optval))
692 return -EFAULT;
693 if (opt != 1)
694 return -ENOPROTOOPT;
696 if(mroute_socket)
697 return -EADDRINUSE;
698 mroute_socket=sk;
699 ipv4_devconf.mc_forwarding = 1;
700 if (ip_ra_control(sk, 1, mrtsock_destruct) == 0)
701 return 0;
702 mrtsock_destruct(sk);
703 return -EADDRINUSE;
704 case MRT_DONE:
705 return ip_ra_control(sk, 0, NULL);
706 case MRT_ADD_VIF:
707 case MRT_DEL_VIF:
708 if(optlen!=sizeof(vif))
709 return -EINVAL;
710 if (copy_from_user(&vif,optval,sizeof(vif)))
711 return -EFAULT;
712 if(vif.vifc_vifi >= MAXVIFS)
713 return -ENFILE;
714 if(optname==MRT_ADD_VIF)
716 struct vif_device *v=&vif_table[vif.vifc_vifi];
717 struct device *dev;
718 struct in_device *in_dev;
720 /* Is vif busy ? */
721 if (vifc_map&(1<<vif.vifc_vifi))
722 return -EADDRINUSE;
724 switch (vif.vifc_flags) {
725 #ifdef CONFIG_IP_PIMSM
726 case VIFF_REGISTER:
729 * Special Purpose VIF in PIM
730 * All the packets will be sent to the daemon
732 if (reg_vif_num >= 0)
733 return -EADDRINUSE;
734 reg_vif_num = vif.vifc_vifi;
735 dev = ipmr_reg_vif(&vif);
736 if (!dev) {
737 reg_vif_num = -1;
738 return -ENOBUFS;
740 break;
741 #endif
742 case VIFF_TUNNEL:
743 dev = ipmr_new_tunnel(&vif);
744 if (!dev)
745 return -ENOBUFS;
746 break;
747 case 0:
748 dev=ip_dev_find(vif.vifc_lcl_addr.s_addr);
749 if (!dev)
750 return -EADDRNOTAVAIL;
751 break;
752 default:
753 #if 0
754 printk(KERN_DEBUG "ipmr_add_vif: flags %02x\n", vif.vifc_flags);
755 #endif
756 return -EINVAL;
759 if ((in_dev = dev->ip_ptr) == NULL)
760 return -EADDRNOTAVAIL;
761 if (in_dev->cnf.mc_forwarding)
762 return -EADDRINUSE;
763 in_dev->cnf.mc_forwarding = 1;
764 dev_set_allmulti(dev, +1);
765 ip_rt_multicast_event(in_dev);
768 * Fill in the VIF structures
770 start_bh_atomic();
771 v->rate_limit=vif.vifc_rate_limit;
772 v->local=vif.vifc_lcl_addr.s_addr;
773 v->remote=vif.vifc_rmt_addr.s_addr;
774 v->flags=vif.vifc_flags;
775 v->threshold=vif.vifc_threshold;
776 v->dev=dev;
777 v->bytes_in = 0;
778 v->bytes_out = 0;
779 v->pkt_in = 0;
780 v->pkt_out = 0;
781 v->link = dev->ifindex;
782 if (vif.vifc_flags&(VIFF_TUNNEL|VIFF_REGISTER))
783 v->link = dev->iflink;
784 vifc_map|=(1<<vif.vifc_vifi);
785 if (vif.vifc_vifi+1 > maxvif)
786 maxvif = vif.vifc_vifi+1;
787 end_bh_atomic();
788 return 0;
789 } else {
790 int ret;
791 rtnl_lock();
792 ret = vif_delete(vif.vifc_vifi);
793 rtnl_unlock();
794 return ret;
798 * Manipulate the forwarding caches. These live
799 * in a sort of kernel/user symbiosis.
801 case MRT_ADD_MFC:
802 case MRT_DEL_MFC:
803 if(optlen!=sizeof(mfc))
804 return -EINVAL;
805 if (copy_from_user(&mfc,optval, sizeof(mfc)))
806 return -EFAULT;
807 return ipmr_mfc_modify(optname, &mfc);
809 * Control PIM assert.
811 case MRT_ASSERT:
813 int v;
814 if(get_user(v,(int *)optval))
815 return -EFAULT;
816 mroute_do_assert=(v)?1:0;
817 return 0;
819 #ifdef CONFIG_IP_PIMSM
820 case MRT_PIM:
822 int v;
823 if(get_user(v,(int *)optval))
824 return -EFAULT;
825 v = (v)?1:0;
826 if (v != mroute_do_pim) {
827 mroute_do_pim = v;
828 mroute_do_assert = v;
829 #ifdef CONFIG_IP_PIMSM_V2
830 if (mroute_do_pim)
831 inet_add_protocol(&pim_protocol);
832 else
833 inet_del_protocol(&pim_protocol);
834 #endif
836 return 0;
838 #endif
840 * Spurious command, or MRT_VERSION which you cannot
841 * set.
843 default:
844 return -ENOPROTOOPT;
849 * Getsock opt support for the multicast routing system.
852 int ip_mroute_getsockopt(struct sock *sk,int optname,char *optval,int *optlen)
854 int olr;
855 int val;
857 if(sk!=mroute_socket)
858 return -EACCES;
859 if(optname!=MRT_VERSION &&
860 #ifdef CONFIG_IP_PIMSM
861 optname!=MRT_PIM &&
862 #endif
863 optname!=MRT_ASSERT)
864 return -ENOPROTOOPT;
866 if(get_user(olr, optlen))
867 return -EFAULT;
869 olr=min(olr,sizeof(int));
870 if(put_user(olr,optlen))
871 return -EFAULT;
872 if(optname==MRT_VERSION)
873 val=0x0305;
874 #ifdef CONFIG_IP_PIMSM
875 else if(optname==MRT_PIM)
876 val=mroute_do_pim;
877 #endif
878 else
879 val=mroute_do_assert;
880 if(copy_to_user(optval,&val,olr))
881 return -EFAULT;
882 return 0;
886 * The IP multicast ioctl support routines.
889 int ipmr_ioctl(struct sock *sk, int cmd, unsigned long arg)
891 struct sioc_sg_req sr;
892 struct sioc_vif_req vr;
893 struct vif_device *vif;
894 struct mfc_cache *c;
896 switch(cmd)
898 case SIOCGETVIFCNT:
899 if (copy_from_user(&vr,(void *)arg,sizeof(vr)))
900 return -EFAULT;
901 if(vr.vifi>=maxvif)
902 return -EINVAL;
903 vif=&vif_table[vr.vifi];
904 if(vifc_map&(1<<vr.vifi))
906 vr.icount=vif->pkt_in;
907 vr.ocount=vif->pkt_out;
908 vr.ibytes=vif->bytes_in;
909 vr.obytes=vif->bytes_out;
910 if (copy_to_user((void *)arg,&vr,sizeof(vr)))
911 return -EFAULT;
912 return 0;
914 return -EADDRNOTAVAIL;
915 case SIOCGETSGCNT:
916 if (copy_from_user(&sr,(void *)arg,sizeof(sr)))
917 return -EFAULT;
918 for (c = mfc_cache_array[MFC_HASH(sr.grp.s_addr, sr.src.s_addr)];
919 c; c = c->next) {
920 if (sr.grp.s_addr == c->mfc_mcastgrp &&
921 sr.src.s_addr == c->mfc_origin) {
922 sr.pktcnt = c->mfc_pkt;
923 sr.bytecnt = c->mfc_bytes;
924 sr.wrong_if = c->mfc_wrong_if;
925 if (copy_to_user((void *)arg,&sr,sizeof(sr)))
926 return -EFAULT;
927 return 0;
930 return -EADDRNOTAVAIL;
931 default:
932 return -ENOIOCTLCMD;
937 * Close the multicast socket, and clear the vif tables etc
940 void mroute_close(struct sock *sk)
942 int i;
945 * Shut down all active vif entries
947 rtnl_lock();
948 for(i=0; i<maxvif; i++)
949 vif_delete(i);
950 rtnl_unlock();
953 * Wipe the cache
955 for(i=0;i<MFC_LINES;i++)
957 start_bh_atomic();
958 while(mfc_cache_array[i]!=NULL)
959 ipmr_cache_delete(mfc_cache_array[i]);
960 end_bh_atomic();
964 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
966 struct vif_device *v;
967 int ct;
968 if (event != NETDEV_UNREGISTER)
969 return NOTIFY_DONE;
970 v=&vif_table[0];
971 for(ct=0;ct<maxvif;ct++) {
972 if (vifc_map&(1<<ct) && v->dev==ptr)
973 vif_delete(ct);
974 v++;
976 return NOTIFY_DONE;
980 static struct notifier_block ip_mr_notifier={
981 ipmr_device_event,
982 NULL,
987 * Encapsulate a packet by attaching a valid IPIP header to it.
988 * This avoids tunnel drivers and other mess and gives us the speed so
989 * important for multicast video.
992 static void ip_encap(struct sk_buff *skb, u32 saddr, u32 daddr)
994 struct iphdr *iph = (struct iphdr *)skb_push(skb,sizeof(struct iphdr));
996 iph->version = 4;
997 iph->tos = skb->nh.iph->tos;
998 iph->ttl = skb->nh.iph->ttl;
999 iph->frag_off = 0;
1000 iph->daddr = daddr;
1001 iph->saddr = saddr;
1002 iph->protocol = IPPROTO_IPIP;
1003 iph->ihl = 5;
1004 iph->tot_len = htons(skb->len);
1005 iph->id = htons(ip_id_count++);
1006 ip_send_check(iph);
1008 skb->h.ipiph = skb->nh.iph;
1009 skb->nh.iph = iph;
1013 * Processing handlers for ipmr_forward
1016 static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c,
1017 int vifi, int last)
1019 struct iphdr *iph = skb->nh.iph;
1020 struct vif_device *vif = &vif_table[vifi];
1021 struct device *dev;
1022 struct rtable *rt;
1023 int encap = 0;
1024 struct sk_buff *skb2;
1026 #ifdef CONFIG_IP_PIMSM
1027 if (vif->flags & VIFF_REGISTER) {
1028 vif->pkt_out++;
1029 vif->bytes_out+=skb->len;
1030 ((struct net_device_stats*)vif->dev->priv)->tx_bytes += skb->len;
1031 ((struct net_device_stats*)vif->dev->priv)->tx_packets++;
1032 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
1033 return;
1035 #endif
1037 if (vif->flags&VIFF_TUNNEL) {
1038 if (ip_route_output(&rt, vif->remote, vif->local, RT_TOS(iph->tos), vif->link))
1039 return;
1040 encap = sizeof(struct iphdr);
1041 } else {
1042 if (ip_route_output(&rt, iph->daddr, 0, RT_TOS(iph->tos), vif->link))
1043 return;
1046 dev = rt->u.dst.dev;
1048 if (skb->len+encap > rt->u.dst.pmtu /* && (ntohs(iph->frag_off) & IP_DF) */) {
1049 /* Do not fragment multicasts. Alas, IPv4 does not
1050 allow to send ICMP, so that packets will disappear
1051 to blackhole.
1054 ip_statistics.IpFragFails++;
1055 ip_rt_put(rt);
1056 return;
1059 encap += dev->hard_header_len;
1061 if (skb_headroom(skb) < encap || skb_cloned(skb) || !last)
1062 skb2 = skb_realloc_headroom(skb, (encap + 15)&~15);
1063 else if (atomic_read(&skb->users) != 1)
1064 skb2 = skb_clone(skb, GFP_ATOMIC);
1065 else {
1066 atomic_inc(&skb->users);
1067 skb2 = skb;
1070 if (skb2 == NULL) {
1071 ip_rt_put(rt);
1072 return;
1075 vif->pkt_out++;
1076 vif->bytes_out+=skb->len;
1078 dst_release(skb2->dst);
1079 skb2->dst = &rt->u.dst;
1080 iph = skb2->nh.iph;
1081 ip_decrease_ttl(iph);
1083 #ifdef CONFIG_FIREWALL
1084 if (call_fw_firewall(PF_INET, vif->dev, skb2->nh.iph, NULL, &skb2) < FW_ACCEPT) {
1085 kfree_skb(skb2);
1086 return;
1088 if (call_out_firewall(PF_INET, vif->dev, skb2->nh.iph, NULL, &skb2) < FW_ACCEPT) {
1089 kfree_skb(skb2);
1090 return;
1092 #endif
1093 if (vif->flags & VIFF_TUNNEL) {
1094 ip_encap(skb2, vif->local, vif->remote);
1095 #ifdef CONFIG_FIREWALL
1096 /* Double output firewalling on tunnels: one is on tunnel
1097 another one is on real device.
1099 if (call_out_firewall(PF_INET, dev, skb2->nh.iph, NULL, &skb2) < FW_ACCEPT) {
1100 kfree_skb(skb2);
1101 return;
1103 #endif
1104 ((struct ip_tunnel *)vif->dev->priv)->stat.tx_packets++;
1105 ((struct ip_tunnel *)vif->dev->priv)->stat.tx_bytes+=skb2->len;
1108 IPCB(skb2)->flags |= IPSKB_FORWARDED;
1112 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1113 * not only before forwarding, but after forwarding on all output
1114 * interfaces. It is clear, if mrouter runs a multicasting
1115 * program, it should receive packets not depending to what interface
1116 * program is joined.
1117 * If we will not make it, the program will have to join on all
1118 * interfaces. On the other hand, multihoming host (or router, but
1119 * not mrouter) cannot join to more than one interface - it will
1120 * result in receiving multiple packets.
1122 skb2->dst->output(skb2);
1125 int ipmr_find_vif(struct device *dev)
1127 int ct;
1128 for (ct=0; ct<maxvif; ct++) {
1129 if (vifc_map&(1<<ct) && vif_table[ct].dev == dev)
1130 return ct;
1132 return ALL_VIFS;
1135 /* "local" means that we should preserve one skb (for local delivery) */
1137 int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local)
1139 int psend = -1;
1140 int vif, ct;
1142 vif = cache->mfc_parent;
1143 cache->mfc_pkt++;
1144 cache->mfc_bytes += skb->len;
1147 * Wrong interface: drop packet and (maybe) send PIM assert.
1149 if (vif_table[vif].dev != skb->dev) {
1150 int true_vifi;
1152 if (((struct rtable*)skb->dst)->key.iif == 0) {
1153 /* It is our own packet, looped back.
1154 Very complicated situation...
1156 The best workaround until routing daemons will be
1157 fixed is not to redistribute packet, if it was
1158 send through wrong interface. It means, that
1159 multicast applications WILL NOT work for
1160 (S,G), which have default multicast route pointing
1161 to wrong oif. In any case, it is not a good
1162 idea to use multicasting applications on router.
1164 goto dont_forward;
1167 cache->mfc_wrong_if++;
1168 true_vifi = ipmr_find_vif(skb->dev);
1170 if (true_vifi < MAXVIFS && mroute_do_assert &&
1171 /* pimsm uses asserts, when switching from RPT to SPT,
1172 so that we cannot check that packet arrived on an oif.
1173 It is bad, but otherwise we would need to move pretty
1174 large chunk of pimd to kernel. Ough... --ANK
1176 (mroute_do_pim || cache->mfc_ttls[true_vifi] < 255) &&
1177 jiffies - cache->mfc_last_assert > MFC_ASSERT_THRESH) {
1178 cache->mfc_last_assert = jiffies;
1179 ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF);
1181 goto dont_forward;
1184 vif_table[vif].pkt_in++;
1185 vif_table[vif].bytes_in+=skb->len;
1188 * Forward the frame
1190 for (ct = cache->mfc_maxvif-1; ct >= cache->mfc_minvif; ct--) {
1191 if (skb->nh.iph->ttl > cache->mfc_ttls[ct]) {
1192 if (psend != -1)
1193 ipmr_queue_xmit(skb, cache, psend, 0);
1194 psend=ct;
1197 if (psend != -1)
1198 ipmr_queue_xmit(skb, cache, psend, !local);
1200 dont_forward:
1201 if (!local)
1202 kfree_skb(skb);
1203 return 0;
1208 * Multicast packets for forwarding arrive here
1211 int ip_mr_input(struct sk_buff *skb)
1213 struct mfc_cache *cache;
1214 int local = ((struct rtable*)skb->dst)->rt_flags&RTCF_LOCAL;
1216 /* Packet is looped back after forward, it should not be
1217 forwarded second time, but still can be delivered locally.
1219 if (IPCB(skb)->flags&IPSKB_FORWARDED)
1220 goto dont_forward;
1222 if (!local) {
1223 if (IPCB(skb)->opt.router_alert) {
1224 if (ip_call_ra_chain(skb))
1225 return 0;
1226 } else if (skb->nh.iph->protocol == IPPROTO_IGMP && mroute_socket) {
1227 /* IGMPv1 (and broken IGMPv2 implementations sort of
1228 Cisco IOS <= 11.2(8)) do not put router alert
1229 option to IGMP packets destined to routable
1230 groups. It is very bad, because it means
1231 that we can forward NO IGMP messages.
1233 raw_rcv(mroute_socket, skb);
1234 return 0;
1238 cache = ipmr_cache_find(skb->nh.iph->saddr, skb->nh.iph->daddr);
1241 * No usable cache entry
1244 if (cache==NULL || (cache->mfc_flags&MFC_QUEUED)) {
1245 int vif;
1247 if (local) {
1248 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1249 ip_local_deliver(skb);
1250 if (skb2 == NULL)
1251 return -ENOBUFS;
1252 skb = skb2;
1255 vif = ipmr_find_vif(skb->dev);
1256 if (vif != ALL_VIFS) {
1257 ipmr_cache_unresolved(cache, vif, skb);
1258 return -EAGAIN;
1260 kfree_skb(skb);
1261 return 0;
1264 ip_mr_forward(skb, cache, local);
1266 if (local)
1267 return ip_local_deliver(skb);
1268 return 0;
1270 dont_forward:
1271 if (local)
1272 return ip_local_deliver(skb);
1273 kfree_skb(skb);
1274 return 0;
1277 #ifdef CONFIG_IP_PIMSM_V1
1279 * Handle IGMP messages of PIMv1
1282 int pim_rcv_v1(struct sk_buff * skb, unsigned short len)
1284 struct igmphdr *pim = (struct igmphdr*)skb->h.raw;
1285 struct iphdr *encap;
1287 if (!mroute_do_pim ||
1288 len < sizeof(*pim) + sizeof(*encap) ||
1289 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER ||
1290 reg_dev == NULL) {
1291 kfree_skb(skb);
1292 return -EINVAL;
1295 encap = (struct iphdr*)(skb->h.raw + sizeof(struct igmphdr));
1297 Check that:
1298 a. packet is really destinted to a multicast group
1299 b. packet is not a NULL-REGISTER
1300 c. packet is not truncated
1302 if (!MULTICAST(encap->daddr) ||
1303 ntohs(encap->tot_len) == 0 ||
1304 ntohs(encap->tot_len) + sizeof(*pim) > len) {
1305 kfree_skb(skb);
1306 return -EINVAL;
1308 skb->mac.raw = skb->nh.raw;
1309 skb_pull(skb, (u8*)encap - skb->data);
1310 skb->nh.iph = (struct iphdr *)skb->data;
1311 skb->dev = reg_dev;
1312 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
1313 skb->protocol = __constant_htons(ETH_P_IP);
1314 skb->ip_summed = 0;
1315 skb->pkt_type = PACKET_HOST;
1316 dst_release(skb->dst);
1317 skb->dst = NULL;
1318 ((struct net_device_stats*)reg_dev->priv)->rx_bytes += skb->len;
1319 ((struct net_device_stats*)reg_dev->priv)->rx_packets++;
1320 netif_rx(skb);
1321 return 0;
1323 #endif
1325 #ifdef CONFIG_IP_PIMSM_V2
1326 int pim_rcv(struct sk_buff * skb, unsigned short len)
1328 struct pimreghdr *pim = (struct pimreghdr*)skb->h.raw;
1329 struct iphdr *encap;
1331 if (len < sizeof(*pim) + sizeof(*encap) ||
1332 pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
1333 (pim->flags&PIM_NULL_REGISTER) ||
1334 reg_dev == NULL ||
1335 ip_compute_csum((void *)pim, len)) {
1336 kfree_skb(skb);
1337 return -EINVAL;
1340 /* check if the inner packet is destined to mcast group */
1341 encap = (struct iphdr*)(skb->h.raw + sizeof(struct pimreghdr));
1342 if (!MULTICAST(encap->daddr) ||
1343 ntohs(encap->tot_len) == 0 ||
1344 ntohs(encap->tot_len) + sizeof(*pim) > len) {
1345 kfree_skb(skb);
1346 return -EINVAL;
1348 skb->mac.raw = skb->nh.raw;
1349 skb_pull(skb, (u8*)encap - skb->data);
1350 skb->nh.iph = (struct iphdr *)skb->data;
1351 skb->dev = reg_dev;
1352 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
1353 skb->protocol = __constant_htons(ETH_P_IP);
1354 skb->ip_summed = 0;
1355 skb->pkt_type = PACKET_HOST;
1356 dst_release(skb->dst);
1357 ((struct net_device_stats*)reg_dev->priv)->rx_bytes += skb->len;
1358 ((struct net_device_stats*)reg_dev->priv)->rx_packets++;
1359 skb->dst = NULL;
1360 netif_rx(skb);
1361 return 0;
1363 #endif
1365 #ifdef CONFIG_RTNETLINK
1367 static int
1368 ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1370 int ct;
1371 struct rtnexthop *nhp;
1372 struct device *dev = vif_table[c->mfc_parent].dev;
1373 u8 *b = skb->tail;
1374 struct rtattr *mp_head;
1376 if (dev)
1377 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1379 mp_head = (struct rtattr*)skb_put(skb, RTA_LENGTH(0));
1381 for (ct = c->mfc_minvif; ct < c->mfc_maxvif; ct++) {
1382 if (c->mfc_ttls[ct] < 255) {
1383 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1384 goto rtattr_failure;
1385 nhp = (struct rtnexthop*)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1386 nhp->rtnh_flags = 0;
1387 nhp->rtnh_hops = c->mfc_ttls[ct];
1388 nhp->rtnh_ifindex = vif_table[ct].dev->ifindex;
1389 nhp->rtnh_len = sizeof(*nhp);
1392 mp_head->rta_type = RTA_MULTIPATH;
1393 mp_head->rta_len = skb->tail - (u8*)mp_head;
1394 rtm->rtm_type = RTN_MULTICAST;
1395 return 1;
1397 rtattr_failure:
1398 skb_trim(skb, b - skb->data);
1399 return -EMSGSIZE;
1402 int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1404 struct mfc_cache *cache;
1405 struct rtable *rt = (struct rtable*)skb->dst;
1407 start_bh_atomic();
1408 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
1409 if (cache==NULL || (cache->mfc_flags&MFC_QUEUED)) {
1410 struct device *dev;
1411 int vif;
1412 int err;
1414 if (nowait) {
1415 end_bh_atomic();
1416 return -EAGAIN;
1419 dev = skb->dev;
1420 if (dev == NULL || (vif = ipmr_find_vif(dev)) == ALL_VIFS) {
1421 end_bh_atomic();
1422 return -ENODEV;
1424 skb->nh.raw = skb_push(skb, sizeof(struct iphdr));
1425 skb->nh.iph->ihl = sizeof(struct iphdr)>>2;
1426 skb->nh.iph->saddr = rt->rt_src;
1427 skb->nh.iph->daddr = rt->rt_dst;
1428 skb->nh.iph->version = 0;
1429 err = ipmr_cache_unresolved(cache, vif, skb);
1430 end_bh_atomic();
1431 return err;
1433 /* Resolved cache entry is not changed by net bh,
1434 so that we are allowed to enable it.
1436 end_bh_atomic();
1438 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1439 cache->mfc_flags |= MFC_NOTIFY;
1440 return ipmr_fill_mroute(skb, cache, rtm);
1442 #endif
1445 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
1448 int ipmr_vif_info(char *buffer, char **start, off_t offset, int length, int dummy)
1450 struct vif_device *vif;
1451 int len=0;
1452 off_t pos=0;
1453 off_t begin=0;
1454 int size;
1455 int ct;
1457 len += sprintf(buffer,
1458 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
1459 pos=len;
1461 for (ct=0;ct<maxvif;ct++)
1463 char *name = "none";
1464 vif=&vif_table[ct];
1465 if(!(vifc_map&(1<<ct)))
1466 continue;
1467 if (vif->dev)
1468 name = vif->dev->name;
1469 size = sprintf(buffer+len, "%2d %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
1470 ct, name, vif->bytes_in, vif->pkt_in, vif->bytes_out, vif->pkt_out,
1471 vif->flags, vif->local, vif->remote);
1472 len+=size;
1473 pos+=size;
1474 if(pos<offset)
1476 len=0;
1477 begin=pos;
1479 if(pos>offset+length)
1480 break;
1483 *start=buffer+(offset-begin);
1484 len-=(offset-begin);
1485 if(len>length)
1486 len=length;
1487 return len;
1490 int ipmr_mfc_info(char *buffer, char **start, off_t offset, int length, int dummy)
1492 struct mfc_cache *mfc;
1493 int len=0;
1494 off_t pos=0;
1495 off_t begin=0;
1496 int size;
1497 int ct;
1499 len += sprintf(buffer,
1500 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
1501 pos=len;
1503 for (ct=0;ct<MFC_LINES;ct++)
1505 start_bh_atomic();
1506 mfc=mfc_cache_array[ct];
1507 while(mfc!=NULL)
1509 int n;
1512 * Interface forwarding map
1514 size = sprintf(buffer+len, "%08lX %08lX %-3d %8ld %8ld %8ld",
1515 (unsigned long)mfc->mfc_mcastgrp,
1516 (unsigned long)mfc->mfc_origin,
1517 mfc->mfc_parent == ALL_VIFS ? -1 : mfc->mfc_parent,
1518 (mfc->mfc_flags & MFC_QUEUED) ? mfc->mfc_unresolved.qlen : mfc->mfc_pkt,
1519 mfc->mfc_bytes,
1520 mfc->mfc_wrong_if);
1521 for(n=mfc->mfc_minvif;n<mfc->mfc_maxvif;n++)
1523 if(vifc_map&(1<<n) && mfc->mfc_ttls[n] < 255)
1524 size += sprintf(buffer+len+size, " %2d:%-3d", n, mfc->mfc_ttls[n]);
1526 size += sprintf(buffer+len+size, "\n");
1527 len+=size;
1528 pos+=size;
1529 if(pos<offset)
1531 len=0;
1532 begin=pos;
1534 if(pos>offset+length)
1536 end_bh_atomic();
1537 goto done;
1539 mfc=mfc->next;
1541 end_bh_atomic();
1543 done:
1544 *start=buffer+(offset-begin);
1545 len-=(offset-begin);
1546 if(len>length)
1547 len=length;
1548 if (len < 0) {
1549 len = 0;
1551 return len;
1554 #ifdef CONFIG_PROC_FS
1555 static struct proc_dir_entry proc_net_ipmr_vif = {
1556 PROC_NET_IPMR_VIF, 9 ,"ip_mr_vif",
1557 S_IFREG | S_IRUGO, 1, 0, 0,
1558 0, &proc_net_inode_operations,
1559 ipmr_vif_info
1561 static struct proc_dir_entry proc_net_ipmr_mfc = {
1562 PROC_NET_IPMR_MFC, 11 ,"ip_mr_cache",
1563 S_IFREG | S_IRUGO, 1, 0, 0,
1564 0, &proc_net_inode_operations,
1565 ipmr_mfc_info
1567 #endif
1569 #ifdef CONFIG_IP_PIMSM_V2
1570 struct inet_protocol pim_protocol =
1572 pim_rcv, /* PIM handler */
1573 NULL, /* PIM error control */
1574 NULL, /* next */
1575 IPPROTO_PIM, /* protocol ID */
1576 0, /* copy */
1577 NULL, /* data */
1578 "PIM" /* name */
1580 #endif
1584 * Setup for IP multicast routing
1587 __initfunc(void ip_mr_init(void))
1589 printk(KERN_INFO "Linux IP multicast router 0.06 plus PIM-SM\n");
1590 register_netdevice_notifier(&ip_mr_notifier);
1591 #ifdef CONFIG_PROC_FS
1592 proc_net_register(&proc_net_ipmr_vif);
1593 proc_net_register(&proc_net_ipmr_mfc);
1594 #endif