[SK_BUFF]: Introduce skb_reset_transport_header(skb)
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / ipv4 / ipmr.c
blob03869d91f6f0aa1dd2a51aa7e98ba1119e7f9cdc
1 /*
2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@redhat.com>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Version: $Id: ipmr.c,v 1.65 2001/10/31 21:55:54 davem Exp $
14 * Fixes:
15 * Michael Chastain : Incorrect size of copying.
16 * Alan Cox : Added the cache manager code
17 * Alan Cox : Fixed the clone/copy bug and device race.
18 * Mike McLagan : Routing by source
19 * Malcolm Beattie : Buffer handling fixes.
20 * Alexey Kuznetsov : Double buffer free and other fixes.
21 * SVR Anand : Fixed several multicast bugs and problems.
22 * Alexey Kuznetsov : Status, optimisations and more.
23 * Brad Parker : Better behaviour on mrouted upcall
24 * overflow.
25 * Carlos Picoto : PIMv1 Support
26 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
27 * Relax this requrement to work with older peers.
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/timer.h>
37 #include <linux/mm.h>
38 #include <linux/kernel.h>
39 #include <linux/fcntl.h>
40 #include <linux/stat.h>
41 #include <linux/socket.h>
42 #include <linux/in.h>
43 #include <linux/inet.h>
44 #include <linux/netdevice.h>
45 #include <linux/inetdevice.h>
46 #include <linux/igmp.h>
47 #include <linux/proc_fs.h>
48 #include <linux/seq_file.h>
49 #include <linux/mroute.h>
50 #include <linux/init.h>
51 #include <linux/if_ether.h>
52 #include <net/ip.h>
53 #include <net/protocol.h>
54 #include <linux/skbuff.h>
55 #include <net/route.h>
56 #include <net/sock.h>
57 #include <net/icmp.h>
58 #include <net/udp.h>
59 #include <net/raw.h>
60 #include <linux/notifier.h>
61 #include <linux/if_arp.h>
62 #include <linux/netfilter_ipv4.h>
63 #include <net/ipip.h>
64 #include <net/checksum.h>
66 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
67 #define CONFIG_IP_PIMSM 1
68 #endif
70 static struct sock *mroute_socket;
73 /* Big lock, protecting vif table, mrt cache and mroute socket state.
74 Note that the changes are semaphored via rtnl_lock.
77 static DEFINE_RWLOCK(mrt_lock);
80 * Multicast router control variables
83 static struct vif_device vif_table[MAXVIFS]; /* Devices */
84 static int maxvif;
86 #define VIF_EXISTS(idx) (vif_table[idx].dev != NULL)
88 static int mroute_do_assert; /* Set in PIM assert */
89 static int mroute_do_pim;
91 static struct mfc_cache *mfc_cache_array[MFC_LINES]; /* Forwarding cache */
93 static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */
94 static atomic_t cache_resolve_queue_len; /* Size of unresolved */
96 /* Special spinlock for queue of unresolved entries */
97 static DEFINE_SPINLOCK(mfc_unres_lock);
99 /* We return to original Alan's scheme. Hash table of resolved
100 entries is changed only in process context and protected
101 with weak lock mrt_lock. Queue of unresolved entries is protected
102 with strong spinlock mfc_unres_lock.
104 In this case data path is free of exclusive locks at all.
107 static struct kmem_cache *mrt_cachep __read_mostly;
109 static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
110 static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
111 static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
113 #ifdef CONFIG_IP_PIMSM_V2
114 static struct net_protocol pim_protocol;
115 #endif
117 static struct timer_list ipmr_expire_timer;
119 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
121 static
122 struct net_device *ipmr_new_tunnel(struct vifctl *v)
124 struct net_device *dev;
126 dev = __dev_get_by_name("tunl0");
128 if (dev) {
129 int err;
130 struct ifreq ifr;
131 mm_segment_t oldfs;
132 struct ip_tunnel_parm p;
133 struct in_device *in_dev;
135 memset(&p, 0, sizeof(p));
136 p.iph.daddr = v->vifc_rmt_addr.s_addr;
137 p.iph.saddr = v->vifc_lcl_addr.s_addr;
138 p.iph.version = 4;
139 p.iph.ihl = 5;
140 p.iph.protocol = IPPROTO_IPIP;
141 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
142 ifr.ifr_ifru.ifru_data = (void*)&p;
144 oldfs = get_fs(); set_fs(KERNEL_DS);
145 err = dev->do_ioctl(dev, &ifr, SIOCADDTUNNEL);
146 set_fs(oldfs);
148 dev = NULL;
150 if (err == 0 && (dev = __dev_get_by_name(p.name)) != NULL) {
151 dev->flags |= IFF_MULTICAST;
153 in_dev = __in_dev_get_rtnl(dev);
154 if (in_dev == NULL && (in_dev = inetdev_init(dev)) == NULL)
155 goto failure;
156 in_dev->cnf.rp_filter = 0;
158 if (dev_open(dev))
159 goto failure;
162 return dev;
164 failure:
165 /* allow the register to be completed before unregistering. */
166 rtnl_unlock();
167 rtnl_lock();
169 unregister_netdevice(dev);
170 return NULL;
173 #ifdef CONFIG_IP_PIMSM
175 static int reg_vif_num = -1;
177 static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
179 read_lock(&mrt_lock);
180 ((struct net_device_stats*)netdev_priv(dev))->tx_bytes += skb->len;
181 ((struct net_device_stats*)netdev_priv(dev))->tx_packets++;
182 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT);
183 read_unlock(&mrt_lock);
184 kfree_skb(skb);
185 return 0;
188 static struct net_device_stats *reg_vif_get_stats(struct net_device *dev)
190 return (struct net_device_stats*)netdev_priv(dev);
193 static void reg_vif_setup(struct net_device *dev)
195 dev->type = ARPHRD_PIMREG;
196 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
197 dev->flags = IFF_NOARP;
198 dev->hard_start_xmit = reg_vif_xmit;
199 dev->get_stats = reg_vif_get_stats;
200 dev->destructor = free_netdev;
203 static struct net_device *ipmr_reg_vif(void)
205 struct net_device *dev;
206 struct in_device *in_dev;
208 dev = alloc_netdev(sizeof(struct net_device_stats), "pimreg",
209 reg_vif_setup);
211 if (dev == NULL)
212 return NULL;
214 if (register_netdevice(dev)) {
215 free_netdev(dev);
216 return NULL;
218 dev->iflink = 0;
220 if ((in_dev = inetdev_init(dev)) == NULL)
221 goto failure;
223 in_dev->cnf.rp_filter = 0;
225 if (dev_open(dev))
226 goto failure;
228 return dev;
230 failure:
231 /* allow the register to be completed before unregistering. */
232 rtnl_unlock();
233 rtnl_lock();
235 unregister_netdevice(dev);
236 return NULL;
238 #endif
241 * Delete a VIF entry
244 static int vif_delete(int vifi)
246 struct vif_device *v;
247 struct net_device *dev;
248 struct in_device *in_dev;
250 if (vifi < 0 || vifi >= maxvif)
251 return -EADDRNOTAVAIL;
253 v = &vif_table[vifi];
255 write_lock_bh(&mrt_lock);
256 dev = v->dev;
257 v->dev = NULL;
259 if (!dev) {
260 write_unlock_bh(&mrt_lock);
261 return -EADDRNOTAVAIL;
264 #ifdef CONFIG_IP_PIMSM
265 if (vifi == reg_vif_num)
266 reg_vif_num = -1;
267 #endif
269 if (vifi+1 == maxvif) {
270 int tmp;
271 for (tmp=vifi-1; tmp>=0; tmp--) {
272 if (VIF_EXISTS(tmp))
273 break;
275 maxvif = tmp+1;
278 write_unlock_bh(&mrt_lock);
280 dev_set_allmulti(dev, -1);
282 if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
283 in_dev->cnf.mc_forwarding--;
284 ip_rt_multicast_event(in_dev);
287 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
288 unregister_netdevice(dev);
290 dev_put(dev);
291 return 0;
294 /* Destroy an unresolved cache entry, killing queued skbs
295 and reporting error to netlink readers.
298 static void ipmr_destroy_unres(struct mfc_cache *c)
300 struct sk_buff *skb;
301 struct nlmsgerr *e;
303 atomic_dec(&cache_resolve_queue_len);
305 while ((skb=skb_dequeue(&c->mfc_un.unres.unresolved))) {
306 if (ip_hdr(skb)->version == 0) {
307 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
308 nlh->nlmsg_type = NLMSG_ERROR;
309 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
310 skb_trim(skb, nlh->nlmsg_len);
311 e = NLMSG_DATA(nlh);
312 e->error = -ETIMEDOUT;
313 memset(&e->msg, 0, sizeof(e->msg));
315 rtnl_unicast(skb, NETLINK_CB(skb).pid);
316 } else
317 kfree_skb(skb);
320 kmem_cache_free(mrt_cachep, c);
324 /* Single timer process for all the unresolved queue. */
326 static void ipmr_expire_process(unsigned long dummy)
328 unsigned long now;
329 unsigned long expires;
330 struct mfc_cache *c, **cp;
332 if (!spin_trylock(&mfc_unres_lock)) {
333 mod_timer(&ipmr_expire_timer, jiffies+HZ/10);
334 return;
337 if (atomic_read(&cache_resolve_queue_len) == 0)
338 goto out;
340 now = jiffies;
341 expires = 10*HZ;
342 cp = &mfc_unres_queue;
344 while ((c=*cp) != NULL) {
345 if (time_after(c->mfc_un.unres.expires, now)) {
346 unsigned long interval = c->mfc_un.unres.expires - now;
347 if (interval < expires)
348 expires = interval;
349 cp = &c->next;
350 continue;
353 *cp = c->next;
355 ipmr_destroy_unres(c);
358 if (atomic_read(&cache_resolve_queue_len))
359 mod_timer(&ipmr_expire_timer, jiffies + expires);
361 out:
362 spin_unlock(&mfc_unres_lock);
365 /* Fill oifs list. It is called under write locked mrt_lock. */
367 static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
369 int vifi;
371 cache->mfc_un.res.minvif = MAXVIFS;
372 cache->mfc_un.res.maxvif = 0;
373 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
375 for (vifi=0; vifi<maxvif; vifi++) {
376 if (VIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) {
377 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
378 if (cache->mfc_un.res.minvif > vifi)
379 cache->mfc_un.res.minvif = vifi;
380 if (cache->mfc_un.res.maxvif <= vifi)
381 cache->mfc_un.res.maxvif = vifi + 1;
386 static int vif_add(struct vifctl *vifc, int mrtsock)
388 int vifi = vifc->vifc_vifi;
389 struct vif_device *v = &vif_table[vifi];
390 struct net_device *dev;
391 struct in_device *in_dev;
393 /* Is vif busy ? */
394 if (VIF_EXISTS(vifi))
395 return -EADDRINUSE;
397 switch (vifc->vifc_flags) {
398 #ifdef CONFIG_IP_PIMSM
399 case VIFF_REGISTER:
401 * Special Purpose VIF in PIM
402 * All the packets will be sent to the daemon
404 if (reg_vif_num >= 0)
405 return -EADDRINUSE;
406 dev = ipmr_reg_vif();
407 if (!dev)
408 return -ENOBUFS;
409 break;
410 #endif
411 case VIFF_TUNNEL:
412 dev = ipmr_new_tunnel(vifc);
413 if (!dev)
414 return -ENOBUFS;
415 break;
416 case 0:
417 dev = ip_dev_find(vifc->vifc_lcl_addr.s_addr);
418 if (!dev)
419 return -EADDRNOTAVAIL;
420 dev_put(dev);
421 break;
422 default:
423 return -EINVAL;
426 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
427 return -EADDRNOTAVAIL;
428 in_dev->cnf.mc_forwarding++;
429 dev_set_allmulti(dev, +1);
430 ip_rt_multicast_event(in_dev);
433 * Fill in the VIF structures
435 v->rate_limit=vifc->vifc_rate_limit;
436 v->local=vifc->vifc_lcl_addr.s_addr;
437 v->remote=vifc->vifc_rmt_addr.s_addr;
438 v->flags=vifc->vifc_flags;
439 if (!mrtsock)
440 v->flags |= VIFF_STATIC;
441 v->threshold=vifc->vifc_threshold;
442 v->bytes_in = 0;
443 v->bytes_out = 0;
444 v->pkt_in = 0;
445 v->pkt_out = 0;
446 v->link = dev->ifindex;
447 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
448 v->link = dev->iflink;
450 /* And finish update writing critical data */
451 write_lock_bh(&mrt_lock);
452 dev_hold(dev);
453 v->dev=dev;
454 #ifdef CONFIG_IP_PIMSM
455 if (v->flags&VIFF_REGISTER)
456 reg_vif_num = vifi;
457 #endif
458 if (vifi+1 > maxvif)
459 maxvif = vifi+1;
460 write_unlock_bh(&mrt_lock);
461 return 0;
464 static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
466 int line=MFC_HASH(mcastgrp,origin);
467 struct mfc_cache *c;
469 for (c=mfc_cache_array[line]; c; c = c->next) {
470 if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp)
471 break;
473 return c;
477 * Allocate a multicast cache entry
479 static struct mfc_cache *ipmr_cache_alloc(void)
481 struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
482 if (c==NULL)
483 return NULL;
484 c->mfc_un.res.minvif = MAXVIFS;
485 return c;
488 static struct mfc_cache *ipmr_cache_alloc_unres(void)
490 struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
491 if (c==NULL)
492 return NULL;
493 skb_queue_head_init(&c->mfc_un.unres.unresolved);
494 c->mfc_un.unres.expires = jiffies + 10*HZ;
495 return c;
499 * A cache entry has gone into a resolved state from queued
502 static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
504 struct sk_buff *skb;
505 struct nlmsgerr *e;
508 * Play the pending entries through our router
511 while ((skb=__skb_dequeue(&uc->mfc_un.unres.unresolved))) {
512 if (ip_hdr(skb)->version == 0) {
513 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
515 if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
516 nlh->nlmsg_len = skb->tail - (u8*)nlh;
517 } else {
518 nlh->nlmsg_type = NLMSG_ERROR;
519 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
520 skb_trim(skb, nlh->nlmsg_len);
521 e = NLMSG_DATA(nlh);
522 e->error = -EMSGSIZE;
523 memset(&e->msg, 0, sizeof(e->msg));
526 rtnl_unicast(skb, NETLINK_CB(skb).pid);
527 } else
528 ip_mr_forward(skb, c, 0);
533 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
534 * expects the following bizarre scheme.
536 * Called under mrt_lock.
539 static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
541 struct sk_buff *skb;
542 const int ihl = ip_hdrlen(pkt);
543 struct igmphdr *igmp;
544 struct igmpmsg *msg;
545 int ret;
547 #ifdef CONFIG_IP_PIMSM
548 if (assert == IGMPMSG_WHOLEPKT)
549 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
550 else
551 #endif
552 skb = alloc_skb(128, GFP_ATOMIC);
554 if (!skb)
555 return -ENOBUFS;
557 #ifdef CONFIG_IP_PIMSM
558 if (assert == IGMPMSG_WHOLEPKT) {
559 /* Ugly, but we have no choice with this interface.
560 Duplicate old header, fix ihl, length etc.
561 And all this only to mangle msg->im_msgtype and
562 to set msg->im_mbz to "mbz" :-)
564 skb_push(skb, sizeof(struct iphdr));
565 skb_reset_network_header(skb);
566 skb_reset_transport_header(skb);
567 msg = (struct igmpmsg *)skb_network_header(skb);
568 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
569 msg->im_msgtype = IGMPMSG_WHOLEPKT;
570 msg->im_mbz = 0;
571 msg->im_vif = reg_vif_num;
572 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
573 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
574 sizeof(struct iphdr));
575 } else
576 #endif
580 * Copy the IP header
583 skb->nh.raw = skb_put(skb, ihl);
584 memcpy(skb->data,pkt->data,ihl);
585 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
586 msg = (struct igmpmsg *)skb_network_header(skb);
587 msg->im_vif = vifi;
588 skb->dst = dst_clone(pkt->dst);
591 * Add our header
594 igmp=(struct igmphdr *)skb_put(skb,sizeof(struct igmphdr));
595 igmp->type =
596 msg->im_msgtype = assert;
597 igmp->code = 0;
598 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
599 skb->h.raw = skb->nh.raw;
602 if (mroute_socket == NULL) {
603 kfree_skb(skb);
604 return -EINVAL;
608 * Deliver to mrouted
610 if ((ret=sock_queue_rcv_skb(mroute_socket,skb))<0) {
611 if (net_ratelimit())
612 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
613 kfree_skb(skb);
616 return ret;
620 * Queue a packet for resolution. It gets locked cache entry!
623 static int
624 ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
626 int err;
627 struct mfc_cache *c;
628 const struct iphdr *iph = ip_hdr(skb);
630 spin_lock_bh(&mfc_unres_lock);
631 for (c=mfc_unres_queue; c; c=c->next) {
632 if (c->mfc_mcastgrp == iph->daddr &&
633 c->mfc_origin == iph->saddr)
634 break;
637 if (c == NULL) {
639 * Create a new entry if allowable
642 if (atomic_read(&cache_resolve_queue_len)>=10 ||
643 (c=ipmr_cache_alloc_unres())==NULL) {
644 spin_unlock_bh(&mfc_unres_lock);
646 kfree_skb(skb);
647 return -ENOBUFS;
651 * Fill in the new cache entry
653 c->mfc_parent = -1;
654 c->mfc_origin = iph->saddr;
655 c->mfc_mcastgrp = iph->daddr;
658 * Reflect first query at mrouted.
660 if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) {
661 /* If the report failed throw the cache entry
662 out - Brad Parker
664 spin_unlock_bh(&mfc_unres_lock);
666 kmem_cache_free(mrt_cachep, c);
667 kfree_skb(skb);
668 return err;
671 atomic_inc(&cache_resolve_queue_len);
672 c->next = mfc_unres_queue;
673 mfc_unres_queue = c;
675 mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires);
679 * See if we can append the packet
681 if (c->mfc_un.unres.unresolved.qlen>3) {
682 kfree_skb(skb);
683 err = -ENOBUFS;
684 } else {
685 skb_queue_tail(&c->mfc_un.unres.unresolved,skb);
686 err = 0;
689 spin_unlock_bh(&mfc_unres_lock);
690 return err;
694 * MFC cache manipulation by user space mroute daemon
697 static int ipmr_mfc_delete(struct mfcctl *mfc)
699 int line;
700 struct mfc_cache *c, **cp;
702 line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
704 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
705 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
706 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
707 write_lock_bh(&mrt_lock);
708 *cp = c->next;
709 write_unlock_bh(&mrt_lock);
711 kmem_cache_free(mrt_cachep, c);
712 return 0;
715 return -ENOENT;
718 static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
720 int line;
721 struct mfc_cache *uc, *c, **cp;
723 line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
725 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
726 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
727 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr)
728 break;
731 if (c != NULL) {
732 write_lock_bh(&mrt_lock);
733 c->mfc_parent = mfc->mfcc_parent;
734 ipmr_update_thresholds(c, mfc->mfcc_ttls);
735 if (!mrtsock)
736 c->mfc_flags |= MFC_STATIC;
737 write_unlock_bh(&mrt_lock);
738 return 0;
741 if (!MULTICAST(mfc->mfcc_mcastgrp.s_addr))
742 return -EINVAL;
744 c=ipmr_cache_alloc();
745 if (c==NULL)
746 return -ENOMEM;
748 c->mfc_origin=mfc->mfcc_origin.s_addr;
749 c->mfc_mcastgrp=mfc->mfcc_mcastgrp.s_addr;
750 c->mfc_parent=mfc->mfcc_parent;
751 ipmr_update_thresholds(c, mfc->mfcc_ttls);
752 if (!mrtsock)
753 c->mfc_flags |= MFC_STATIC;
755 write_lock_bh(&mrt_lock);
756 c->next = mfc_cache_array[line];
757 mfc_cache_array[line] = c;
758 write_unlock_bh(&mrt_lock);
761 * Check to see if we resolved a queued list. If so we
762 * need to send on the frames and tidy up.
764 spin_lock_bh(&mfc_unres_lock);
765 for (cp = &mfc_unres_queue; (uc=*cp) != NULL;
766 cp = &uc->next) {
767 if (uc->mfc_origin == c->mfc_origin &&
768 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
769 *cp = uc->next;
770 if (atomic_dec_and_test(&cache_resolve_queue_len))
771 del_timer(&ipmr_expire_timer);
772 break;
775 spin_unlock_bh(&mfc_unres_lock);
777 if (uc) {
778 ipmr_cache_resolve(uc, c);
779 kmem_cache_free(mrt_cachep, uc);
781 return 0;
785 * Close the multicast socket, and clear the vif tables etc
788 static void mroute_clean_tables(struct sock *sk)
790 int i;
793 * Shut down all active vif entries
795 for (i=0; i<maxvif; i++) {
796 if (!(vif_table[i].flags&VIFF_STATIC))
797 vif_delete(i);
801 * Wipe the cache
803 for (i=0;i<MFC_LINES;i++) {
804 struct mfc_cache *c, **cp;
806 cp = &mfc_cache_array[i];
807 while ((c = *cp) != NULL) {
808 if (c->mfc_flags&MFC_STATIC) {
809 cp = &c->next;
810 continue;
812 write_lock_bh(&mrt_lock);
813 *cp = c->next;
814 write_unlock_bh(&mrt_lock);
816 kmem_cache_free(mrt_cachep, c);
820 if (atomic_read(&cache_resolve_queue_len) != 0) {
821 struct mfc_cache *c;
823 spin_lock_bh(&mfc_unres_lock);
824 while (mfc_unres_queue != NULL) {
825 c = mfc_unres_queue;
826 mfc_unres_queue = c->next;
827 spin_unlock_bh(&mfc_unres_lock);
829 ipmr_destroy_unres(c);
831 spin_lock_bh(&mfc_unres_lock);
833 spin_unlock_bh(&mfc_unres_lock);
837 static void mrtsock_destruct(struct sock *sk)
839 rtnl_lock();
840 if (sk == mroute_socket) {
841 ipv4_devconf.mc_forwarding--;
843 write_lock_bh(&mrt_lock);
844 mroute_socket=NULL;
845 write_unlock_bh(&mrt_lock);
847 mroute_clean_tables(sk);
849 rtnl_unlock();
853 * Socket options and virtual interface manipulation. The whole
854 * virtual interface system is a complete heap, but unfortunately
855 * that's how BSD mrouted happens to think. Maybe one day with a proper
856 * MOSPF/PIM router set up we can clean this up.
859 int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int optlen)
861 int ret;
862 struct vifctl vif;
863 struct mfcctl mfc;
865 if (optname != MRT_INIT) {
866 if (sk != mroute_socket && !capable(CAP_NET_ADMIN))
867 return -EACCES;
870 switch (optname) {
871 case MRT_INIT:
872 if (sk->sk_type != SOCK_RAW ||
873 inet_sk(sk)->num != IPPROTO_IGMP)
874 return -EOPNOTSUPP;
875 if (optlen!=sizeof(int))
876 return -ENOPROTOOPT;
878 rtnl_lock();
879 if (mroute_socket) {
880 rtnl_unlock();
881 return -EADDRINUSE;
884 ret = ip_ra_control(sk, 1, mrtsock_destruct);
885 if (ret == 0) {
886 write_lock_bh(&mrt_lock);
887 mroute_socket=sk;
888 write_unlock_bh(&mrt_lock);
890 ipv4_devconf.mc_forwarding++;
892 rtnl_unlock();
893 return ret;
894 case MRT_DONE:
895 if (sk!=mroute_socket)
896 return -EACCES;
897 return ip_ra_control(sk, 0, NULL);
898 case MRT_ADD_VIF:
899 case MRT_DEL_VIF:
900 if (optlen!=sizeof(vif))
901 return -EINVAL;
902 if (copy_from_user(&vif,optval,sizeof(vif)))
903 return -EFAULT;
904 if (vif.vifc_vifi >= MAXVIFS)
905 return -ENFILE;
906 rtnl_lock();
907 if (optname==MRT_ADD_VIF) {
908 ret = vif_add(&vif, sk==mroute_socket);
909 } else {
910 ret = vif_delete(vif.vifc_vifi);
912 rtnl_unlock();
913 return ret;
916 * Manipulate the forwarding caches. These live
917 * in a sort of kernel/user symbiosis.
919 case MRT_ADD_MFC:
920 case MRT_DEL_MFC:
921 if (optlen!=sizeof(mfc))
922 return -EINVAL;
923 if (copy_from_user(&mfc,optval, sizeof(mfc)))
924 return -EFAULT;
925 rtnl_lock();
926 if (optname==MRT_DEL_MFC)
927 ret = ipmr_mfc_delete(&mfc);
928 else
929 ret = ipmr_mfc_add(&mfc, sk==mroute_socket);
930 rtnl_unlock();
931 return ret;
933 * Control PIM assert.
935 case MRT_ASSERT:
937 int v;
938 if (get_user(v,(int __user *)optval))
939 return -EFAULT;
940 mroute_do_assert=(v)?1:0;
941 return 0;
943 #ifdef CONFIG_IP_PIMSM
944 case MRT_PIM:
946 int v, ret;
947 if (get_user(v,(int __user *)optval))
948 return -EFAULT;
949 v = (v)?1:0;
950 rtnl_lock();
951 ret = 0;
952 if (v != mroute_do_pim) {
953 mroute_do_pim = v;
954 mroute_do_assert = v;
955 #ifdef CONFIG_IP_PIMSM_V2
956 if (mroute_do_pim)
957 ret = inet_add_protocol(&pim_protocol,
958 IPPROTO_PIM);
959 else
960 ret = inet_del_protocol(&pim_protocol,
961 IPPROTO_PIM);
962 if (ret < 0)
963 ret = -EAGAIN;
964 #endif
966 rtnl_unlock();
967 return ret;
969 #endif
971 * Spurious command, or MRT_VERSION which you cannot
972 * set.
974 default:
975 return -ENOPROTOOPT;
980 * Getsock opt support for the multicast routing system.
983 int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __user *optlen)
985 int olr;
986 int val;
988 if (optname!=MRT_VERSION &&
989 #ifdef CONFIG_IP_PIMSM
990 optname!=MRT_PIM &&
991 #endif
992 optname!=MRT_ASSERT)
993 return -ENOPROTOOPT;
995 if (get_user(olr, optlen))
996 return -EFAULT;
998 olr = min_t(unsigned int, olr, sizeof(int));
999 if (olr < 0)
1000 return -EINVAL;
1002 if (put_user(olr,optlen))
1003 return -EFAULT;
1004 if (optname==MRT_VERSION)
1005 val=0x0305;
1006 #ifdef CONFIG_IP_PIMSM
1007 else if (optname==MRT_PIM)
1008 val=mroute_do_pim;
1009 #endif
1010 else
1011 val=mroute_do_assert;
1012 if (copy_to_user(optval,&val,olr))
1013 return -EFAULT;
1014 return 0;
1018 * The IP multicast ioctl support routines.
1021 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1023 struct sioc_sg_req sr;
1024 struct sioc_vif_req vr;
1025 struct vif_device *vif;
1026 struct mfc_cache *c;
1028 switch (cmd) {
1029 case SIOCGETVIFCNT:
1030 if (copy_from_user(&vr,arg,sizeof(vr)))
1031 return -EFAULT;
1032 if (vr.vifi>=maxvif)
1033 return -EINVAL;
1034 read_lock(&mrt_lock);
1035 vif=&vif_table[vr.vifi];
1036 if (VIF_EXISTS(vr.vifi)) {
1037 vr.icount=vif->pkt_in;
1038 vr.ocount=vif->pkt_out;
1039 vr.ibytes=vif->bytes_in;
1040 vr.obytes=vif->bytes_out;
1041 read_unlock(&mrt_lock);
1043 if (copy_to_user(arg,&vr,sizeof(vr)))
1044 return -EFAULT;
1045 return 0;
1047 read_unlock(&mrt_lock);
1048 return -EADDRNOTAVAIL;
1049 case SIOCGETSGCNT:
1050 if (copy_from_user(&sr,arg,sizeof(sr)))
1051 return -EFAULT;
1053 read_lock(&mrt_lock);
1054 c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr);
1055 if (c) {
1056 sr.pktcnt = c->mfc_un.res.pkt;
1057 sr.bytecnt = c->mfc_un.res.bytes;
1058 sr.wrong_if = c->mfc_un.res.wrong_if;
1059 read_unlock(&mrt_lock);
1061 if (copy_to_user(arg,&sr,sizeof(sr)))
1062 return -EFAULT;
1063 return 0;
1065 read_unlock(&mrt_lock);
1066 return -EADDRNOTAVAIL;
1067 default:
1068 return -ENOIOCTLCMD;
1073 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1075 struct vif_device *v;
1076 int ct;
1077 if (event != NETDEV_UNREGISTER)
1078 return NOTIFY_DONE;
1079 v=&vif_table[0];
1080 for (ct=0;ct<maxvif;ct++,v++) {
1081 if (v->dev==ptr)
1082 vif_delete(ct);
1084 return NOTIFY_DONE;
1088 static struct notifier_block ip_mr_notifier={
1089 .notifier_call = ipmr_device_event,
1093 * Encapsulate a packet by attaching a valid IPIP header to it.
1094 * This avoids tunnel drivers and other mess and gives us the speed so
1095 * important for multicast video.
1098 static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1100 struct iphdr *iph;
1101 struct iphdr *old_iph = ip_hdr(skb);
1103 skb_push(skb, sizeof(struct iphdr));
1104 skb->h.raw = skb->nh.raw;
1105 skb_reset_network_header(skb);
1106 iph = ip_hdr(skb);
1108 iph->version = 4;
1109 iph->tos = old_iph->tos;
1110 iph->ttl = old_iph->ttl;
1111 iph->frag_off = 0;
1112 iph->daddr = daddr;
1113 iph->saddr = saddr;
1114 iph->protocol = IPPROTO_IPIP;
1115 iph->ihl = 5;
1116 iph->tot_len = htons(skb->len);
1117 ip_select_ident(iph, skb->dst, NULL);
1118 ip_send_check(iph);
1120 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1121 nf_reset(skb);
1124 static inline int ipmr_forward_finish(struct sk_buff *skb)
1126 struct ip_options * opt = &(IPCB(skb)->opt);
1128 IP_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
1130 if (unlikely(opt->optlen))
1131 ip_forward_options(skb);
1133 return dst_output(skb);
1137 * Processing handlers for ipmr_forward
1140 static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1142 const struct iphdr *iph = ip_hdr(skb);
1143 struct vif_device *vif = &vif_table[vifi];
1144 struct net_device *dev;
1145 struct rtable *rt;
1146 int encap = 0;
1148 if (vif->dev == NULL)
1149 goto out_free;
1151 #ifdef CONFIG_IP_PIMSM
1152 if (vif->flags & VIFF_REGISTER) {
1153 vif->pkt_out++;
1154 vif->bytes_out+=skb->len;
1155 ((struct net_device_stats*)netdev_priv(vif->dev))->tx_bytes += skb->len;
1156 ((struct net_device_stats*)netdev_priv(vif->dev))->tx_packets++;
1157 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
1158 kfree_skb(skb);
1159 return;
1161 #endif
1163 if (vif->flags&VIFF_TUNNEL) {
1164 struct flowi fl = { .oif = vif->link,
1165 .nl_u = { .ip4_u =
1166 { .daddr = vif->remote,
1167 .saddr = vif->local,
1168 .tos = RT_TOS(iph->tos) } },
1169 .proto = IPPROTO_IPIP };
1170 if (ip_route_output_key(&rt, &fl))
1171 goto out_free;
1172 encap = sizeof(struct iphdr);
1173 } else {
1174 struct flowi fl = { .oif = vif->link,
1175 .nl_u = { .ip4_u =
1176 { .daddr = iph->daddr,
1177 .tos = RT_TOS(iph->tos) } },
1178 .proto = IPPROTO_IPIP };
1179 if (ip_route_output_key(&rt, &fl))
1180 goto out_free;
1183 dev = rt->u.dst.dev;
1185 if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {
1186 /* Do not fragment multicasts. Alas, IPv4 does not
1187 allow to send ICMP, so that packets will disappear
1188 to blackhole.
1191 IP_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);
1192 ip_rt_put(rt);
1193 goto out_free;
1196 encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
1198 if (skb_cow(skb, encap)) {
1199 ip_rt_put(rt);
1200 goto out_free;
1203 vif->pkt_out++;
1204 vif->bytes_out+=skb->len;
1206 dst_release(skb->dst);
1207 skb->dst = &rt->u.dst;
1208 ip_decrease_ttl(ip_hdr(skb));
1210 /* FIXME: forward and output firewalls used to be called here.
1211 * What do we do with netfilter? -- RR */
1212 if (vif->flags & VIFF_TUNNEL) {
1213 ip_encap(skb, vif->local, vif->remote);
1214 /* FIXME: extra output firewall step used to be here. --RR */
1215 ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_packets++;
1216 ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_bytes+=skb->len;
1219 IPCB(skb)->flags |= IPSKB_FORWARDED;
1222 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1223 * not only before forwarding, but after forwarding on all output
1224 * interfaces. It is clear, if mrouter runs a multicasting
1225 * program, it should receive packets not depending to what interface
1226 * program is joined.
1227 * If we will not make it, the program will have to join on all
1228 * interfaces. On the other hand, multihoming host (or router, but
1229 * not mrouter) cannot join to more than one interface - it will
1230 * result in receiving multiple packets.
1232 NF_HOOK(PF_INET, NF_IP_FORWARD, skb, skb->dev, dev,
1233 ipmr_forward_finish);
1234 return;
1236 out_free:
1237 kfree_skb(skb);
1238 return;
1241 static int ipmr_find_vif(struct net_device *dev)
1243 int ct;
1244 for (ct=maxvif-1; ct>=0; ct--) {
1245 if (vif_table[ct].dev == dev)
1246 break;
1248 return ct;
1251 /* "local" means that we should preserve one skb (for local delivery) */
1253 static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local)
1255 int psend = -1;
1256 int vif, ct;
1258 vif = cache->mfc_parent;
1259 cache->mfc_un.res.pkt++;
1260 cache->mfc_un.res.bytes += skb->len;
1263 * Wrong interface: drop packet and (maybe) send PIM assert.
1265 if (vif_table[vif].dev != skb->dev) {
1266 int true_vifi;
1268 if (((struct rtable*)skb->dst)->fl.iif == 0) {
1269 /* It is our own packet, looped back.
1270 Very complicated situation...
1272 The best workaround until routing daemons will be
1273 fixed is not to redistribute packet, if it was
1274 send through wrong interface. It means, that
1275 multicast applications WILL NOT work for
1276 (S,G), which have default multicast route pointing
1277 to wrong oif. In any case, it is not a good
1278 idea to use multicasting applications on router.
1280 goto dont_forward;
1283 cache->mfc_un.res.wrong_if++;
1284 true_vifi = ipmr_find_vif(skb->dev);
1286 if (true_vifi >= 0 && mroute_do_assert &&
1287 /* pimsm uses asserts, when switching from RPT to SPT,
1288 so that we cannot check that packet arrived on an oif.
1289 It is bad, but otherwise we would need to move pretty
1290 large chunk of pimd to kernel. Ough... --ANK
1292 (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
1293 time_after(jiffies,
1294 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1295 cache->mfc_un.res.last_assert = jiffies;
1296 ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF);
1298 goto dont_forward;
1301 vif_table[vif].pkt_in++;
1302 vif_table[vif].bytes_in+=skb->len;
1305 * Forward the frame
1307 for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) {
1308 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1309 if (psend != -1) {
1310 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1311 if (skb2)
1312 ipmr_queue_xmit(skb2, cache, psend);
1314 psend=ct;
1317 if (psend != -1) {
1318 if (local) {
1319 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1320 if (skb2)
1321 ipmr_queue_xmit(skb2, cache, psend);
1322 } else {
1323 ipmr_queue_xmit(skb, cache, psend);
1324 return 0;
1328 dont_forward:
1329 if (!local)
1330 kfree_skb(skb);
1331 return 0;
1336 * Multicast packets for forwarding arrive here
1339 int ip_mr_input(struct sk_buff *skb)
1341 struct mfc_cache *cache;
1342 int local = ((struct rtable*)skb->dst)->rt_flags&RTCF_LOCAL;
1344 /* Packet is looped back after forward, it should not be
1345 forwarded second time, but still can be delivered locally.
1347 if (IPCB(skb)->flags&IPSKB_FORWARDED)
1348 goto dont_forward;
1350 if (!local) {
1351 if (IPCB(skb)->opt.router_alert) {
1352 if (ip_call_ra_chain(skb))
1353 return 0;
1354 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP){
1355 /* IGMPv1 (and broken IGMPv2 implementations sort of
1356 Cisco IOS <= 11.2(8)) do not put router alert
1357 option to IGMP packets destined to routable
1358 groups. It is very bad, because it means
1359 that we can forward NO IGMP messages.
1361 read_lock(&mrt_lock);
1362 if (mroute_socket) {
1363 nf_reset(skb);
1364 raw_rcv(mroute_socket, skb);
1365 read_unlock(&mrt_lock);
1366 return 0;
1368 read_unlock(&mrt_lock);
1372 read_lock(&mrt_lock);
1373 cache = ipmr_cache_find(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1376 * No usable cache entry
1378 if (cache==NULL) {
1379 int vif;
1381 if (local) {
1382 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1383 ip_local_deliver(skb);
1384 if (skb2 == NULL) {
1385 read_unlock(&mrt_lock);
1386 return -ENOBUFS;
1388 skb = skb2;
1391 vif = ipmr_find_vif(skb->dev);
1392 if (vif >= 0) {
1393 int err = ipmr_cache_unresolved(vif, skb);
1394 read_unlock(&mrt_lock);
1396 return err;
1398 read_unlock(&mrt_lock);
1399 kfree_skb(skb);
1400 return -ENODEV;
1403 ip_mr_forward(skb, cache, local);
1405 read_unlock(&mrt_lock);
1407 if (local)
1408 return ip_local_deliver(skb);
1410 return 0;
1412 dont_forward:
1413 if (local)
1414 return ip_local_deliver(skb);
1415 kfree_skb(skb);
1416 return 0;
1419 #ifdef CONFIG_IP_PIMSM_V1
1421 * Handle IGMP messages of PIMv1
1424 int pim_rcv_v1(struct sk_buff * skb)
1426 struct igmphdr *pim;
1427 struct iphdr *encap;
1428 struct net_device *reg_dev = NULL;
1430 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
1431 goto drop;
1433 pim = (struct igmphdr*)skb->h.raw;
1435 if (!mroute_do_pim ||
1436 skb->len < sizeof(*pim) + sizeof(*encap) ||
1437 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1438 goto drop;
1440 encap = (struct iphdr*)(skb->h.raw + sizeof(struct igmphdr));
1442 Check that:
1443 a. packet is really destinted to a multicast group
1444 b. packet is not a NULL-REGISTER
1445 c. packet is not truncated
1447 if (!MULTICAST(encap->daddr) ||
1448 encap->tot_len == 0 ||
1449 ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
1450 goto drop;
1452 read_lock(&mrt_lock);
1453 if (reg_vif_num >= 0)
1454 reg_dev = vif_table[reg_vif_num].dev;
1455 if (reg_dev)
1456 dev_hold(reg_dev);
1457 read_unlock(&mrt_lock);
1459 if (reg_dev == NULL)
1460 goto drop;
1462 skb->mac.raw = skb->nh.raw;
1463 skb_pull(skb, (u8*)encap - skb->data);
1464 skb_reset_network_header(skb);
1465 skb->dev = reg_dev;
1466 skb->protocol = htons(ETH_P_IP);
1467 skb->ip_summed = 0;
1468 skb->pkt_type = PACKET_HOST;
1469 dst_release(skb->dst);
1470 skb->dst = NULL;
1471 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len;
1472 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++;
1473 nf_reset(skb);
1474 netif_rx(skb);
1475 dev_put(reg_dev);
1476 return 0;
1477 drop:
1478 kfree_skb(skb);
1479 return 0;
1481 #endif
1483 #ifdef CONFIG_IP_PIMSM_V2
1484 static int pim_rcv(struct sk_buff * skb)
1486 struct pimreghdr *pim;
1487 struct iphdr *encap;
1488 struct net_device *reg_dev = NULL;
1490 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
1491 goto drop;
1493 pim = (struct pimreghdr*)skb->h.raw;
1494 if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
1495 (pim->flags&PIM_NULL_REGISTER) ||
1496 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
1497 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1498 goto drop;
1500 /* check if the inner packet is destined to mcast group */
1501 encap = (struct iphdr*)(skb->h.raw + sizeof(struct pimreghdr));
1502 if (!MULTICAST(encap->daddr) ||
1503 encap->tot_len == 0 ||
1504 ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
1505 goto drop;
1507 read_lock(&mrt_lock);
1508 if (reg_vif_num >= 0)
1509 reg_dev = vif_table[reg_vif_num].dev;
1510 if (reg_dev)
1511 dev_hold(reg_dev);
1512 read_unlock(&mrt_lock);
1514 if (reg_dev == NULL)
1515 goto drop;
1517 skb->mac.raw = skb->nh.raw;
1518 skb_pull(skb, (u8*)encap - skb->data);
1519 skb_reset_network_header(skb);
1520 skb->dev = reg_dev;
1521 skb->protocol = htons(ETH_P_IP);
1522 skb->ip_summed = 0;
1523 skb->pkt_type = PACKET_HOST;
1524 dst_release(skb->dst);
1525 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len;
1526 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++;
1527 skb->dst = NULL;
1528 nf_reset(skb);
1529 netif_rx(skb);
1530 dev_put(reg_dev);
1531 return 0;
1532 drop:
1533 kfree_skb(skb);
1534 return 0;
1536 #endif
1538 static int
1539 ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1541 int ct;
1542 struct rtnexthop *nhp;
1543 struct net_device *dev = vif_table[c->mfc_parent].dev;
1544 u8 *b = skb->tail;
1545 struct rtattr *mp_head;
1547 if (dev)
1548 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1550 mp_head = (struct rtattr*)skb_put(skb, RTA_LENGTH(0));
1552 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1553 if (c->mfc_un.res.ttls[ct] < 255) {
1554 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1555 goto rtattr_failure;
1556 nhp = (struct rtnexthop*)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1557 nhp->rtnh_flags = 0;
1558 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1559 nhp->rtnh_ifindex = vif_table[ct].dev->ifindex;
1560 nhp->rtnh_len = sizeof(*nhp);
1563 mp_head->rta_type = RTA_MULTIPATH;
1564 mp_head->rta_len = skb->tail - (u8*)mp_head;
1565 rtm->rtm_type = RTN_MULTICAST;
1566 return 1;
1568 rtattr_failure:
1569 skb_trim(skb, b - skb->data);
1570 return -EMSGSIZE;
1573 int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1575 int err;
1576 struct mfc_cache *cache;
1577 struct rtable *rt = (struct rtable*)skb->dst;
1579 read_lock(&mrt_lock);
1580 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
1582 if (cache==NULL) {
1583 struct sk_buff *skb2;
1584 struct iphdr *iph;
1585 struct net_device *dev;
1586 int vif;
1588 if (nowait) {
1589 read_unlock(&mrt_lock);
1590 return -EAGAIN;
1593 dev = skb->dev;
1594 if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) {
1595 read_unlock(&mrt_lock);
1596 return -ENODEV;
1598 skb2 = skb_clone(skb, GFP_ATOMIC);
1599 if (!skb2) {
1600 read_unlock(&mrt_lock);
1601 return -ENOMEM;
1604 skb_push(skb2, sizeof(struct iphdr));
1605 skb_reset_network_header(skb2);
1606 iph = ip_hdr(skb2);
1607 iph->ihl = sizeof(struct iphdr) >> 2;
1608 iph->saddr = rt->rt_src;
1609 iph->daddr = rt->rt_dst;
1610 iph->version = 0;
1611 err = ipmr_cache_unresolved(vif, skb2);
1612 read_unlock(&mrt_lock);
1613 return err;
1616 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1617 cache->mfc_flags |= MFC_NOTIFY;
1618 err = ipmr_fill_mroute(skb, cache, rtm);
1619 read_unlock(&mrt_lock);
1620 return err;
1623 #ifdef CONFIG_PROC_FS
1625 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
1627 struct ipmr_vif_iter {
1628 int ct;
1631 static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter,
1632 loff_t pos)
1634 for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
1635 if (!VIF_EXISTS(iter->ct))
1636 continue;
1637 if (pos-- == 0)
1638 return &vif_table[iter->ct];
1640 return NULL;
1643 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
1645 read_lock(&mrt_lock);
1646 return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1)
1647 : SEQ_START_TOKEN;
1650 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1652 struct ipmr_vif_iter *iter = seq->private;
1654 ++*pos;
1655 if (v == SEQ_START_TOKEN)
1656 return ipmr_vif_seq_idx(iter, 0);
1658 while (++iter->ct < maxvif) {
1659 if (!VIF_EXISTS(iter->ct))
1660 continue;
1661 return &vif_table[iter->ct];
1663 return NULL;
1666 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
1668 read_unlock(&mrt_lock);
1671 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1673 if (v == SEQ_START_TOKEN) {
1674 seq_puts(seq,
1675 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
1676 } else {
1677 const struct vif_device *vif = v;
1678 const char *name = vif->dev ? vif->dev->name : "none";
1680 seq_printf(seq,
1681 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
1682 vif - vif_table,
1683 name, vif->bytes_in, vif->pkt_in,
1684 vif->bytes_out, vif->pkt_out,
1685 vif->flags, vif->local, vif->remote);
1687 return 0;
1690 static const struct seq_operations ipmr_vif_seq_ops = {
1691 .start = ipmr_vif_seq_start,
1692 .next = ipmr_vif_seq_next,
1693 .stop = ipmr_vif_seq_stop,
1694 .show = ipmr_vif_seq_show,
1697 static int ipmr_vif_open(struct inode *inode, struct file *file)
1699 struct seq_file *seq;
1700 int rc = -ENOMEM;
1701 struct ipmr_vif_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
1703 if (!s)
1704 goto out;
1706 rc = seq_open(file, &ipmr_vif_seq_ops);
1707 if (rc)
1708 goto out_kfree;
1710 s->ct = 0;
1711 seq = file->private_data;
1712 seq->private = s;
1713 out:
1714 return rc;
1715 out_kfree:
1716 kfree(s);
1717 goto out;
1721 static const struct file_operations ipmr_vif_fops = {
1722 .owner = THIS_MODULE,
1723 .open = ipmr_vif_open,
1724 .read = seq_read,
1725 .llseek = seq_lseek,
1726 .release = seq_release_private,
1729 struct ipmr_mfc_iter {
1730 struct mfc_cache **cache;
1731 int ct;
1735 static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
1737 struct mfc_cache *mfc;
1739 it->cache = mfc_cache_array;
1740 read_lock(&mrt_lock);
1741 for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
1742 for (mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next)
1743 if (pos-- == 0)
1744 return mfc;
1745 read_unlock(&mrt_lock);
1747 it->cache = &mfc_unres_queue;
1748 spin_lock_bh(&mfc_unres_lock);
1749 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
1750 if (pos-- == 0)
1751 return mfc;
1752 spin_unlock_bh(&mfc_unres_lock);
1754 it->cache = NULL;
1755 return NULL;
1759 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
1761 struct ipmr_mfc_iter *it = seq->private;
1762 it->cache = NULL;
1763 it->ct = 0;
1764 return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
1765 : SEQ_START_TOKEN;
1768 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1770 struct mfc_cache *mfc = v;
1771 struct ipmr_mfc_iter *it = seq->private;
1773 ++*pos;
1775 if (v == SEQ_START_TOKEN)
1776 return ipmr_mfc_seq_idx(seq->private, 0);
1778 if (mfc->next)
1779 return mfc->next;
1781 if (it->cache == &mfc_unres_queue)
1782 goto end_of_list;
1784 BUG_ON(it->cache != mfc_cache_array);
1786 while (++it->ct < MFC_LINES) {
1787 mfc = mfc_cache_array[it->ct];
1788 if (mfc)
1789 return mfc;
1792 /* exhausted cache_array, show unresolved */
1793 read_unlock(&mrt_lock);
1794 it->cache = &mfc_unres_queue;
1795 it->ct = 0;
1797 spin_lock_bh(&mfc_unres_lock);
1798 mfc = mfc_unres_queue;
1799 if (mfc)
1800 return mfc;
1802 end_of_list:
1803 spin_unlock_bh(&mfc_unres_lock);
1804 it->cache = NULL;
1806 return NULL;
1809 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
1811 struct ipmr_mfc_iter *it = seq->private;
1813 if (it->cache == &mfc_unres_queue)
1814 spin_unlock_bh(&mfc_unres_lock);
1815 else if (it->cache == mfc_cache_array)
1816 read_unlock(&mrt_lock);
1819 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1821 int n;
1823 if (v == SEQ_START_TOKEN) {
1824 seq_puts(seq,
1825 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
1826 } else {
1827 const struct mfc_cache *mfc = v;
1828 const struct ipmr_mfc_iter *it = seq->private;
1830 seq_printf(seq, "%08lX %08lX %-3d %8ld %8ld %8ld",
1831 (unsigned long) mfc->mfc_mcastgrp,
1832 (unsigned long) mfc->mfc_origin,
1833 mfc->mfc_parent,
1834 mfc->mfc_un.res.pkt,
1835 mfc->mfc_un.res.bytes,
1836 mfc->mfc_un.res.wrong_if);
1838 if (it->cache != &mfc_unres_queue) {
1839 for (n = mfc->mfc_un.res.minvif;
1840 n < mfc->mfc_un.res.maxvif; n++ ) {
1841 if (VIF_EXISTS(n)
1842 && mfc->mfc_un.res.ttls[n] < 255)
1843 seq_printf(seq,
1844 " %2d:%-3d",
1845 n, mfc->mfc_un.res.ttls[n]);
1848 seq_putc(seq, '\n');
1850 return 0;
1853 static const struct seq_operations ipmr_mfc_seq_ops = {
1854 .start = ipmr_mfc_seq_start,
1855 .next = ipmr_mfc_seq_next,
1856 .stop = ipmr_mfc_seq_stop,
1857 .show = ipmr_mfc_seq_show,
1860 static int ipmr_mfc_open(struct inode *inode, struct file *file)
1862 struct seq_file *seq;
1863 int rc = -ENOMEM;
1864 struct ipmr_mfc_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
1866 if (!s)
1867 goto out;
1869 rc = seq_open(file, &ipmr_mfc_seq_ops);
1870 if (rc)
1871 goto out_kfree;
1873 seq = file->private_data;
1874 seq->private = s;
1875 out:
1876 return rc;
1877 out_kfree:
1878 kfree(s);
1879 goto out;
1883 static const struct file_operations ipmr_mfc_fops = {
1884 .owner = THIS_MODULE,
1885 .open = ipmr_mfc_open,
1886 .read = seq_read,
1887 .llseek = seq_lseek,
1888 .release = seq_release_private,
1890 #endif
1892 #ifdef CONFIG_IP_PIMSM_V2
1893 static struct net_protocol pim_protocol = {
1894 .handler = pim_rcv,
1896 #endif
1900 * Setup for IP multicast routing
1903 void __init ip_mr_init(void)
1905 mrt_cachep = kmem_cache_create("ip_mrt_cache",
1906 sizeof(struct mfc_cache),
1907 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1908 NULL, NULL);
1909 init_timer(&ipmr_expire_timer);
1910 ipmr_expire_timer.function=ipmr_expire_process;
1911 register_netdevice_notifier(&ip_mr_notifier);
1912 #ifdef CONFIG_PROC_FS
1913 proc_net_fops_create("ip_mr_vif", 0, &ipmr_vif_fops);
1914 proc_net_fops_create("ip_mr_cache", 0, &ipmr_mfc_fops);
1915 #endif