MFC r1.6 r1.30 r1.28 (HEAD):
[dragonfly.git] / usr.sbin / mrouted / prune.c
bloba7bd73c423f033e230bb5cee1d21039d8906a460
1 /*
2 * The mrouted program is covered by the license in the accompanying file
3 * named "LICENSE". Use of the mrouted program represents acceptance of
4 * the terms and conditions listed in that file.
6 * The mrouted program is COPYRIGHT 1989 by The Board of Trustees of
7 * Leland Stanford Junior University.
10 * prune.c,v 3.8.4.59 1998/03/01 02:06:32 fenner Exp
12 * $FreeBSD: src/usr.sbin/mrouted/prune.c,v 1.17.2.1 2000/10/29 03:59:57 kris Exp $
13 * $DragonFly: src/usr.sbin/mrouted/prune.c,v 1.6 2008/04/20 13:44:26 swildner Exp $
16 #include "defs.h"
18 extern int cache_lifetime;
19 extern int prune_lifetime;
20 extern struct rtentry *routing_table;
22 extern int phys_vif;
24 extern int allow_black_holes;
27 * randomize value to obtain a value between .5x and 1.5x
28 * in order to prevent synchronization
30 #ifdef SYSV
31 #define JITTERED_VALUE(x) ((x)/2 + (lrand48() % (x)))
32 #else
33 #define JITTERED_VALUE(x) ((x)/2 + (arc4random() % (x)))
34 #endif
35 #define CACHE_LIFETIME(x) JITTERED_VALUE(x) /* XXX */
37 struct gtable *kernel_table; /* ptr to list of kernel grp entries*/
38 static struct gtable *kernel_no_route; /* list of grp entries w/o routes */
39 struct gtable *gtp; /* pointer for kernel rt entries */
40 unsigned int kroutes; /* current number of cache entries */
42 /****************************************************************************
43 Functions that are local to prune.c
44 ****************************************************************************/
45 static int scoped_addr(vifi_t vifi, u_int32 addr);
46 static void prun_add_ttls(struct gtable *gt);
47 static int pruning_neighbor(vifi_t vifi, u_int32 addr);
48 static int can_mtrace(vifi_t vifi, u_int32 addr);
49 static struct ptable * find_prune_entry(u_int32 vr, struct ptable *pt);
50 static void remove_sources(struct gtable *gt);
51 static void rexmit_prune(void *arg);
52 static void expire_prune(vifi_t vifi, struct gtable *gt);
53 static void send_prune(struct gtable *gt);
54 static void send_graft(struct gtable *gt);
55 static void send_graft_ack(u_int32 src, u_int32 dst,
56 u_int32 origin, u_int32 grp,
57 vifi_t vifi);
58 static void update_kernel(struct gtable *g);
60 /*
61 * Updates the ttl values for each vif.
63 static void
64 prun_add_ttls(struct gtable *gt)
66 struct uvif *v;
67 vifi_t vifi;
69 for (vifi = 0, v = uvifs; vifi < numvifs; ++vifi, ++v) {
70 if (VIFM_ISSET(vifi, gt->gt_grpmems))
71 gt->gt_ttls[vifi] = v->uv_threshold;
72 else
73 gt->gt_ttls[vifi] = 0;
78 * checks for scoped multicast addresses
79 * XXX I want to make the check of allow_black_holes based on ALLOW_BLACK_HOLES
80 * but macros are not functions.
82 #define GET_SCOPE(gt) { \
83 vifi_t _i; \
85 VIFM_CLRALL((gt)->gt_scope); \
86 if (allow_black_holes || \
87 (ntohl((gt)->gt_mcastgrp) & 0xff000000) == 0xef000000) \
88 for (_i = 0; _i < numvifs; _i++) \
89 if (scoped_addr(_i, (gt)->gt_mcastgrp)) \
90 VIFM_SET(_i, (gt)->gt_scope); \
91 } \
92 if ((gt)->gt_route == NULL || ((gt)->gt_route->rt_parent != NO_VIF && \
93 VIFM_ISSET((gt)->gt_route->rt_parent, (gt)->gt_scope))) \
94 VIFM_SETALL((gt)->gt_scope);
96 #define APPLY_SCOPE(gt) VIFM_CLR_MASK((gt)->gt_grpmems, (gt)->gt_scope)
98 #define GET_MEMBERSHIP(gt, vifi) { \
99 if ((gt)->gt_route && \
100 VIFM_ISSET((vifi), (gt)->gt_route->rt_children) && \
101 (!SUBS_ARE_PRUNED((gt)->gt_route->rt_subordinates, \
102 uvifs[vifi].uv_nbrmap, (gt)->gt_prunes) || \
103 grplst_mem((vifi), (gt)->gt_mcastgrp))) \
104 VIFM_SET((vifi), (gt)->gt_grpmems); \
107 static int
108 scoped_addr(vifi_t vifi, u_int32 addr)
110 struct vif_acl *acl;
112 for (acl = uvifs[vifi].uv_acl; acl; acl = acl->acl_next)
113 if ((addr & acl->acl_mask) == acl->acl_addr)
114 return 1;
116 return 0;
120 * Determine the list of outgoing vifs, based upon
121 * route subordinates, prunes received, and group
122 * memberships.
124 void
125 determine_forwvifs(struct gtable *gt)
127 vifi_t i;
129 VIFM_CLRALL(gt->gt_grpmems);
130 for (i = 0; i < numvifs; i++) {
131 GET_MEMBERSHIP(gt, i);
133 GET_SCOPE(gt);
134 APPLY_SCOPE(gt);
138 * Send a prune or a graft if necessary.
140 void
141 send_prune_or_graft(struct gtable *gt)
143 if (VIFM_ISEMPTY(gt->gt_grpmems))
144 send_prune(gt);
145 else if (gt->gt_prsent_timer)
146 send_graft(gt);
150 * Determine if mcastgrp has a listener on vifi
153 grplst_mem(vifi_t vifi, u_int32 mcastgrp)
155 struct listaddr *g;
156 struct uvif *v;
158 v = &uvifs[vifi];
160 for (g = v->uv_groups; g != NULL; g = g->al_next)
161 if (mcastgrp == g->al_addr)
162 return 1;
164 return 0;
168 * Finds the group entry with the specified source and netmask.
169 * If netmask is 0, it uses the route's netmask.
171 * Returns TRUE if found a match, and the global variable gtp is left
172 * pointing to entry before the found entry.
173 * Returns FALSE if no exact match found, gtp is left pointing to before
174 * the entry in question belongs, or is NULL if the it belongs at the
175 * head of the list.
178 find_src_grp(u_int32 src, u_int32 mask, u_int32 grp)
180 struct gtable *gt;
182 gtp = NULL;
183 gt = kernel_table;
184 while (gt != NULL) {
185 if (grp == gt->gt_mcastgrp &&
186 (mask ? (gt->gt_route->rt_origin == src &&
187 gt->gt_route->rt_originmask == mask) :
188 ((src & gt->gt_route->rt_originmask) ==
189 gt->gt_route->rt_origin)))
190 return TRUE;
191 if (ntohl(grp) > ntohl(gt->gt_mcastgrp) ||
192 (grp == gt->gt_mcastgrp &&
193 (ntohl(mask) < ntohl(gt->gt_route->rt_originmask) ||
194 (mask == gt->gt_route->rt_originmask &&
195 (ntohl(src) > ntohl(gt->gt_route->rt_origin)))))) {
196 gtp = gt;
197 gt = gt->gt_gnext;
199 else break;
201 return FALSE;
205 * Check if the neighbor supports pruning
207 static int
208 pruning_neighbor(vifi_t vifi, u_int32 addr)
210 struct listaddr *n = neighbor_info(vifi, addr);
211 int vers;
213 if (n == NULL)
214 return 0;
216 vers = NBR_VERS(n);
217 return (vers >= 0x0300 && ((vers & 0xff00) != 0x0a00));
221 * Can the neighbor in question handle multicast traceroute?
223 static int
224 can_mtrace(vifi_t vifi, u_int32 addr)
226 struct listaddr *n = neighbor_info(vifi, addr);
227 int vers;
229 if (n == NULL)
230 return 1; /* fail "safe" */
232 vers = NBR_VERS(n);
233 return (vers >= 0x0303 && ((vers & 0xff00) != 0x0a00));
237 * Returns the prune entry of the router, or NULL if none exists
239 static struct ptable *
240 find_prune_entry(u_int32 vr, struct ptable *pt)
242 while (pt) {
243 if (pt->pt_router == vr)
244 return pt;
245 pt = pt->pt_next;
248 return NULL;
252 * Remove all the sources hanging off the group table entry from the kernel
253 * cache. Remember the packet counts wherever possible, to keep the mtrace
254 * counters consistent. This prepares for possible prune retransmission,
255 * either on a multi-access network or when a prune that we sent upstream
256 * has expired.
258 static void
259 remove_sources(struct gtable *gt)
261 struct stable *st;
262 struct sioc_sg_req sg_req;
264 sg_req.grp.s_addr = gt->gt_mcastgrp;
267 * call k_del_rg() on every one of the gt->gt_srctbl entries
268 * but first save the packet count so that the mtrace packet
269 * counters can remain approximately correct. There's a race
270 * here but it's minor.
272 for (st = gt->gt_srctbl; st; st = st->st_next) {
273 if (st->st_ctime == 0)
274 continue;
275 IF_DEBUG(DEBUG_PRUNE)
276 log(LOG_DEBUG, 0, "rexmit_prune deleting (%s %s) (next is %d sec)",
277 inet_fmt(st->st_origin, s1),
278 inet_fmt(gt->gt_mcastgrp, s2),
279 gt->gt_prune_rexmit);
280 sg_req.src.s_addr = st->st_origin;
281 if (ioctl(udp_socket, SIOCGETSGCNT, (char *)&sg_req) < 0) {
282 sg_req.pktcnt = 0;
284 k_del_rg(st->st_origin, gt);
285 st->st_ctime = 0; /* flag that it's not in the kernel any more */
286 st->st_savpkt += sg_req.pktcnt;
287 kroutes--;
291 * Now, add_table_entry will prune when asked to add a cache entry.
296 * Prepare for possible prune retransmission
298 static void
299 rexmit_prune(void *arg)
301 struct gtable *gt = *(struct gtable **)arg;
303 free(arg);
305 gt->gt_rexmit_timer = 0;
307 /* Make sure we're still not forwarding traffic */
308 if (!VIFM_ISEMPTY(gt->gt_grpmems)) {
309 IF_DEBUG(DEBUG_PRUNE)
310 log(LOG_DEBUG, 0, "rexmit_prune (%s %s): gm:%x",
311 RT_FMT(gt->gt_route, s1), inet_fmt(gt->gt_mcastgrp, s2),
312 gt->gt_grpmems);
313 return;
316 remove_sources(gt);
320 * Send a prune message to the dominant router for
321 * this source.
323 * Record an entry that a prune was sent for this group
325 static void
326 send_prune(struct gtable *gt)
328 struct ptable *pt;
329 char *p;
330 int i;
331 int datalen;
332 u_int32 dst;
333 u_int32 tmp;
334 int rexmitting = 0;
335 struct uvif *v;
338 * Can't process a prune if we don't have an associated route
339 * or if the route points to a local interface.
341 if (gt->gt_route == NULL || gt->gt_route->rt_parent == NO_VIF ||
342 gt->gt_route->rt_gateway == 0)
343 return;
345 /* Don't send a prune to a non-pruning router */
346 if (!pruning_neighbor(gt->gt_route->rt_parent, gt->gt_route->rt_gateway))
347 return;
349 v = &uvifs[gt->gt_route->rt_parent];
351 * sends a prune message to the router upstream.
353 #if 0
354 dst = v->uv_flags & VIFF_TUNNEL ? dvmrp_group : gt->gt_route->rt_gateway; /*XXX*/
355 #else
356 dst = gt->gt_route->rt_gateway;
357 #endif
359 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
360 datalen = 0;
363 * determine prune lifetime, if this isn't a retransmission.
365 * Use interface-specified lifetime if there is one.
367 if (gt->gt_prsent_timer == 0) {
368 int l = prune_lifetime;
370 if (v->uv_prune_lifetime != 0)
371 l = v->uv_prune_lifetime;
373 gt->gt_prsent_timer = JITTERED_VALUE(l);
374 for (pt = gt->gt_pruntbl; pt; pt = pt->pt_next)
375 if (pt->pt_timer < gt->gt_prsent_timer)
376 gt->gt_prsent_timer = pt->pt_timer;
377 } else if (gt->gt_prsent_timer < 0) {
378 IF_DEBUG(DEBUG_PRUNE)
379 log(LOG_DEBUG, 0, "asked to rexmit? (%s,%s)/%d on vif %d to %s with negative time",
380 RT_FMT(gt->gt_route, s1), inet_fmt(gt->gt_mcastgrp, s2),
381 gt->gt_prsent_timer, gt->gt_route->rt_parent,
382 inet_fmt(gt->gt_route->rt_gateway, s3));
383 return;
384 } else
385 rexmitting = 1;
387 if (rexmitting && !(v->uv_flags & VIFF_REXMIT_PRUNES)) {
388 IF_DEBUG(DEBUG_PRUNE)
389 log(LOG_DEBUG, 0, "not rexmitting prune for (%s %s)/%d on vif %d to %s",
390 RT_FMT(gt->gt_route, s1), inet_fmt(gt->gt_mcastgrp, s2),
391 gt->gt_prsent_timer, gt->gt_route->rt_parent,
392 inet_fmt(gt->gt_route->rt_gateway, s3));
393 return;
395 if (gt->gt_prsent_timer <= MIN_PRUNE_LIFE) {
396 IF_DEBUG(DEBUG_PRUNE)
397 log(LOG_DEBUG, 0, "not bothering to send prune for (%s,%s)/%d on vif %d to %s because it's too short",
398 RT_FMT(gt->gt_route, s1), inet_fmt(gt->gt_mcastgrp, s2),
399 gt->gt_prsent_timer, gt->gt_route->rt_parent,
400 inet_fmt(gt->gt_route->rt_gateway, s3));
401 return;
405 * If we have a graft pending, cancel graft retransmission
407 gt->gt_grftsnt = 0;
409 for (i = 0; i < 4; i++)
410 *p++ = ((char *)&(gt->gt_route->rt_origin))[i];
411 for (i = 0; i < 4; i++)
412 *p++ = ((char *)&(gt->gt_mcastgrp))[i];
413 tmp = htonl(gt->gt_prsent_timer);
414 for (i = 0; i < 4; i++)
415 *p++ = ((char *)&(tmp))[i];
416 datalen += 12;
418 send_on_vif(v, dst, DVMRP_PRUNE, datalen);
420 IF_DEBUG(DEBUG_PRUNE)
421 log(LOG_DEBUG, 0, "%s prune for (%s %s)/%d on vif %d to %s",
422 rexmitting ? "rexmitted" : "sent",
423 RT_FMT(gt->gt_route, s1), inet_fmt(gt->gt_mcastgrp, s2),
424 gt->gt_prsent_timer, gt->gt_route->rt_parent,
425 inet_fmt(gt->gt_route->rt_gateway, s3));
427 if ((v->uv_flags & VIFF_REXMIT_PRUNES) &&
428 gt->gt_rexmit_timer == 0 &&
429 gt->gt_prsent_timer > gt->gt_prune_rexmit) {
430 struct gtable **arg =
431 (struct gtable **)malloc(sizeof (struct gtable **));
433 *arg = gt;
434 gt->gt_rexmit_timer = timer_setTimer(
435 JITTERED_VALUE(gt->gt_prune_rexmit),
436 rexmit_prune, arg);
437 gt->gt_prune_rexmit *= 2;
442 * a prune was sent upstream
443 * so, a graft has to be sent to annul the prune
444 * set up a graft timer so that if an ack is not
445 * heard within that time, another graft request
446 * is sent out.
448 static void
449 send_graft(struct gtable *gt)
451 char *p;
452 int i;
453 int datalen;
454 u_int32 dst;
456 /* Can't send a graft without an associated route */
457 if (gt->gt_route == NULL || gt->gt_route->rt_parent == NO_VIF) {
458 gt->gt_grftsnt = 0;
459 return;
462 gt->gt_prsent_timer = 0;
463 gt->gt_prune_rexmit = PRUNE_REXMIT_VAL;
464 if (gt->gt_rexmit_timer)
465 timer_clearTimer(gt->gt_rexmit_timer);
467 if (gt->gt_grftsnt == 0)
468 gt->gt_grftsnt = 1;
470 #if 0
471 dst = uvifs[gt->gt_route->rt_parent].uv_flags & VIFF_TUNNEL ? dvmrp_group : gt->gt_route->rt_gateway; /*XXX*/
472 #else
473 dst = gt->gt_route->rt_gateway;
474 #endif
476 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
477 datalen = 0;
479 for (i = 0; i < 4; i++)
480 *p++ = ((char *)&(gt->gt_route->rt_origin))[i];
481 for (i = 0; i < 4; i++)
482 *p++ = ((char *)&(gt->gt_mcastgrp))[i];
483 datalen += 8;
485 send_on_vif(&uvifs[gt->gt_route->rt_parent], dst, DVMRP_GRAFT, datalen);
486 IF_DEBUG(DEBUG_PRUNE)
487 log(LOG_DEBUG, 0, "sent graft for (%s %s) to %s on vif %d",
488 RT_FMT(gt->gt_route, s1), inet_fmt(gt->gt_mcastgrp, s2),
489 inet_fmt(gt->gt_route->rt_gateway, s3), gt->gt_route->rt_parent);
493 * Send an ack that a graft was received
495 static void
496 send_graft_ack(u_int32 src, u_int32 dst, u_int32 origin, u_int32 grp,
497 vifi_t vifi)
499 char *p;
500 int i;
501 int datalen;
503 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
504 datalen = 0;
506 for (i = 0; i < 4; i++)
507 *p++ = ((char *)&(origin))[i];
508 for (i = 0; i < 4; i++)
509 *p++ = ((char *)&(grp))[i];
510 datalen += 8;
512 if (vifi == NO_VIF)
513 send_igmp(src, dst, IGMP_DVMRP, DVMRP_GRAFT_ACK,
514 htonl(MROUTED_LEVEL), datalen);
515 else {
516 #if 0
517 if (uvifs[vifi].uv_flags & VIFF_TUNNEL)
518 dst = dvmrp_group; /* XXX */
519 #endif
520 send_on_vif(&uvifs[vifi], dst, DVMRP_GRAFT_ACK, datalen);
523 IF_DEBUG(DEBUG_PRUNE)
524 if (vifi == NO_VIF)
525 log(LOG_DEBUG, 0, "sent graft ack for (%s, %s) to %s",
526 inet_fmt(origin, s1), inet_fmt(grp, s2), inet_fmt(dst, s3));
527 else
528 log(LOG_DEBUG, 0, "sent graft ack for (%s, %s) to %s on vif %d",
529 inet_fmt(origin, s1), inet_fmt(grp, s2), inet_fmt(dst, s3), vifi);
533 * Update the kernel cache with all the routes hanging off the group entry
535 static void
536 update_kernel(struct gtable *g)
538 struct stable *st;
540 for (st = g->gt_srctbl; st; st = st->st_next)
541 if (st->st_ctime != 0)
542 k_add_rg(st->st_origin, g);
545 /****************************************************************************
546 Functions that are used externally
547 ****************************************************************************/
549 #ifdef SNMP
550 #include <sys/types.h>
551 #include "snmp.h"
554 * Find a specific group entry in the group table
556 struct gtable *
557 find_grp(u_int32 grp)
559 struct gtable *gt;
561 for (gt = kernel_table; gt; gt = gt->gt_gnext) {
562 if (ntohl(grp) < ntohl(gt->gt_mcastgrp))
563 break;
564 if (gt->gt_mcastgrp == grp)
565 return gt;
567 return NULL;
571 * Given a group entry and source, find the corresponding source table
572 * entry
574 struct stable *
575 find_grp_src(struct gtable *gt, u_int32 src)
577 struct stable *st;
578 u_long grp = gt->gt_mcastgrp;
579 struct gtable *gtcurr;
581 for (gtcurr = gt; gtcurr->gt_mcastgrp == grp; gtcurr = gtcurr->gt_gnext) {
582 for (st = gtcurr->gt_srctbl; st; st = st->st_next)
583 if (st->st_origin == src)
584 return st;
586 return NULL;
590 * Find next entry > specification
593 next_grp_src_mask(struct gtable **gtpp, /* ordered by group */
594 struct stable **stpp, /* ordered by source */
595 u_int32 grp, u_int32 src, u_int32 mask)
597 struct gtable *gt, *gbest = NULL;
598 struct stable *st, *sbest = NULL;
600 /* Find first group entry >= grp spec */
601 (*gtpp) = kernel_table;
602 while ((*gtpp) && ntohl((*gtpp)->gt_mcastgrp) < ntohl(grp))
603 (*gtpp)=(*gtpp)->gt_gnext;
604 if (!(*gtpp))
605 return 0; /* no more groups */
607 for (gt = kernel_table; gt; gt=gt->gt_gnext) {
608 /* Since grps are ordered, we can stop when group changes from gbest */
609 if (gbest && gbest->gt_mcastgrp != gt->gt_mcastgrp)
610 break;
611 for (st = gt->gt_srctbl; st; st=st->st_next) {
613 /* Among those entries > spec, find "lowest" one */
614 if (((ntohl(gt->gt_mcastgrp)> ntohl(grp))
615 || (ntohl(gt->gt_mcastgrp)==ntohl(grp)
616 && ntohl(st->st_origin)> ntohl(src))
617 || (ntohl(gt->gt_mcastgrp)==ntohl(grp)
618 && ntohl(st->st_origin)==src && 0xFFFFFFFF>ntohl(mask)))
619 && (!gbest
620 || (ntohl(gt->gt_mcastgrp)< ntohl(gbest->gt_mcastgrp))
621 || (ntohl(gt->gt_mcastgrp)==ntohl(gbest->gt_mcastgrp)
622 && ntohl(st->st_origin)< ntohl(sbest->st_origin)))) {
623 gbest = gt;
624 sbest = st;
628 (*gtpp) = gbest;
629 (*stpp) = sbest;
630 return (*gtpp)!=0;
634 * Ensure that sg contains current information for the given group,source.
635 * This is fetched from the kernel as a unit so that counts for the entry
636 * are consistent, i.e. packet and byte counts for the same entry are
637 * read at the same time.
639 void
640 refresh_sg(struct sioc_sg_req *sg, struct gtable *gt, struct stable *st)
642 static int lastq = -1;
644 if (quantum != lastq || sg->src.s_addr!=st->st_origin
645 || sg->grp.s_addr!=gt->gt_mcastgrp) {
646 lastq = quantum;
647 sg->src.s_addr = st->st_origin;
648 sg->grp.s_addr = gt->gt_mcastgrp;
649 ioctl(udp_socket, SIOCGETSGCNT, (char *)sg);
654 * Given a routing table entry, and a vifi, find the next entry
655 * equal to or greater than those
658 next_child(struct gtable **gtpp, struct stable **stpp,
659 u_int32 grp, u_int32 src, u_int32 mask,
660 vifi_t vifi) /* vif at which to start looking */
663 /* Get (G,S,M) entry */
664 if (mask!=0xFFFFFFFF
665 || !((*gtpp) = find_grp(grp))
666 || !((*stpp) = find_grp_src((*gtpp),src)))
667 if (!next_grp_src_mask(gtpp, stpp, grp, src, mask))
668 return 0;
670 /* Continue until we get one with a valid next vif */
671 do {
672 for (; (*gtpp)->gt_route->rt_children && *vifi<numvifs; (*vifi)++)
673 if (VIFM_ISSET(*vifi, (*gtpp)->gt_route->rt_children))
674 return 1;
675 *vifi = 0;
676 } while (next_grp_src_mask(gtpp, stpp, (*gtpp)->gt_mcastgrp,
677 (*stpp)->st_origin, 0xFFFFFFFF) );
679 return 0;
681 #endif /* SNMP */
684 * Initialize the kernel table structure
686 void
687 init_ktable(void)
689 kernel_table = NULL;
690 kernel_no_route = NULL;
691 kroutes = 0;
695 * Add a new table entry for (origin, mcastgrp)
697 void
698 add_table_entry(u_int32 origin, u_int32 mcastgrp)
700 struct rtentry *r;
701 struct gtable *gt,**gtnp,*prev_gt;
702 struct stable *st,**stnp;
705 * Since we have to enable mrouting to get the version number,
706 * some cache creation requests can sneak through. Ignore them
707 * since we're not going to do useful stuff until we've performed
708 * final initialization.
710 if (!did_final_init)
711 return;
713 #ifdef DEBUG_MFC
714 md_log(MD_MISS, origin, mcastgrp);
715 #endif
717 r = determine_route(origin);
718 prev_gt = NULL;
719 if (r == NULL) {
721 * Look for it on the no_route table; if it is found then
722 * it will be detected as a duplicate below.
724 for (gt = kernel_no_route; gt; gt = gt->gt_next)
725 if (mcastgrp == gt->gt_mcastgrp &&
726 gt->gt_srctbl && gt->gt_srctbl->st_origin == origin)
727 break;
728 gtnp = &kernel_no_route;
729 } else {
730 gtnp = &r->rt_groups;
731 while ((gt = *gtnp) != NULL) {
732 if (gt->gt_mcastgrp >= mcastgrp)
733 break;
734 gtnp = &gt->gt_next;
735 prev_gt = gt;
739 if (gt == NULL || gt->gt_mcastgrp != mcastgrp) {
740 gt = (struct gtable *)malloc(sizeof(struct gtable));
741 if (gt == NULL)
742 log(LOG_ERR, 0, "ran out of memory");
744 gt->gt_mcastgrp = mcastgrp;
745 gt->gt_timer = CACHE_LIFETIME(cache_lifetime);
746 time(&gt->gt_ctime);
747 gt->gt_prsent_timer = 0;
748 gt->gt_grftsnt = 0;
749 gt->gt_srctbl = NULL;
750 gt->gt_pruntbl = NULL;
751 gt->gt_route = r;
752 gt->gt_rexmit_timer = 0;
753 NBRM_CLRALL(gt->gt_prunes);
754 gt->gt_prune_rexmit = PRUNE_REXMIT_VAL;
755 #ifdef RSRR
756 gt->gt_rsrr_cache = NULL;
757 #endif
759 /* Calculate forwarding vifs */
760 determine_forwvifs(gt);
762 /* update ttls */
763 prun_add_ttls(gt);
765 gt->gt_next = *gtnp;
766 *gtnp = gt;
767 if (gt->gt_next)
768 gt->gt_next->gt_prev = gt;
769 gt->gt_prev = prev_gt;
771 if (r) {
772 if (find_src_grp(r->rt_origin, r->rt_originmask, gt->gt_mcastgrp)) {
773 struct gtable *g;
775 g = gtp ? gtp->gt_gnext : kernel_table;
776 log(LOG_WARNING, 0, "Entry for (%s %s) (rt:%x) exists (rt:%x)",
777 RT_FMT(r, s1), inet_fmt(g->gt_mcastgrp, s2),
778 r, g->gt_route);
779 } else {
780 if (gtp) {
781 gt->gt_gnext = gtp->gt_gnext;
782 gt->gt_gprev = gtp;
783 gtp->gt_gnext = gt;
784 } else {
785 gt->gt_gnext = kernel_table;
786 gt->gt_gprev = NULL;
787 kernel_table = gt;
789 if (gt->gt_gnext)
790 gt->gt_gnext->gt_gprev = gt;
792 } else {
793 gt->gt_gnext = gt->gt_gprev = NULL;
797 stnp = &gt->gt_srctbl;
798 while ((st = *stnp) != NULL) {
799 if (ntohl(st->st_origin) >= ntohl(origin))
800 break;
801 stnp = &st->st_next;
804 if (st == NULL || st->st_origin != origin) {
805 st = (struct stable *)malloc(sizeof(struct stable));
806 if (st == NULL)
807 log(LOG_ERR, 0, "ran out of memory");
809 st->st_origin = origin;
810 st->st_pktcnt = 0;
811 st->st_savpkt = 0;
812 time(&st->st_ctime);
813 st->st_next = *stnp;
814 *stnp = st;
815 } else {
816 if (st->st_ctime == 0) {
817 /* An old source which we're keeping around for statistics */
818 time(&st->st_ctime);
819 } else {
820 #ifdef DEBUG_MFC
821 md_log(MD_DUPE, origin, mcastgrp);
822 #endif
823 /* Ignore kernel->mrouted retransmissions */
824 if (time(0) - st->st_ctime > 5)
825 log(LOG_WARNING, 0, "kernel entry already exists for (%s %s)",
826 inet_fmt(origin, s1), inet_fmt(mcastgrp, s2));
827 k_add_rg(origin, gt);
828 return;
832 kroutes++;
833 k_add_rg(origin, gt);
835 IF_DEBUG(DEBUG_CACHE)
836 log(LOG_DEBUG, 0, "add cache entry (%s %s) gm:%x, parent-vif:%d",
837 inet_fmt(origin, s1),
838 inet_fmt(mcastgrp, s2),
839 gt->gt_grpmems, r ? r->rt_parent : -1);
842 * If there are no downstream routers that want traffic for
843 * this group, send (or retransmit) a prune upstream.
845 if (VIFM_ISEMPTY(gt->gt_grpmems))
846 send_prune(gt);
850 * A router has gone down. Remove prune state pertinent to that router.
852 void
853 reset_neighbor_state(vifi_t vifi, u_int32 addr)
855 struct rtentry *r;
856 struct gtable *g;
857 struct ptable *pt, **ptnp;
858 struct stable *st;
860 for (g = kernel_table; g; g = g->gt_gnext) {
861 r = g->gt_route;
864 * If neighbor was the parent, remove the prune sent state
865 * and all of the source cache info so that prunes get
866 * regenerated.
868 if (vifi == r->rt_parent) {
869 if (addr == r->rt_gateway) {
870 IF_DEBUG(DEBUG_PEER)
871 log(LOG_DEBUG, 0, "reset_neighbor_state parent reset (%s %s)",
872 RT_FMT(r, s1), inet_fmt(g->gt_mcastgrp, s2));
874 g->gt_prsent_timer = 0;
875 g->gt_grftsnt = 0;
876 while ((st = g->gt_srctbl) != NULL) {
877 g->gt_srctbl = st->st_next;
878 if (st->st_ctime != 0) {
879 k_del_rg(st->st_origin, g);
880 kroutes--;
882 free(st);
885 } else {
887 * Remove any prunes that this router has sent us.
889 ptnp = &g->gt_pruntbl;
890 while ((pt = *ptnp) != NULL) {
891 if (pt->pt_vifi == vifi && pt->pt_router == addr) {
892 NBRM_CLR(pt->pt_index, g->gt_prunes);
893 *ptnp = pt->pt_next;
894 free(pt);
895 } else
896 ptnp = &pt->pt_next;
900 * And see if we want to forward again.
902 if (!VIFM_ISSET(vifi, g->gt_grpmems)) {
903 GET_MEMBERSHIP(g, vifi);
904 APPLY_SCOPE(g);
905 prun_add_ttls(g);
907 /* Update kernel state */
908 update_kernel(g);
909 #ifdef RSRR
910 /* Send route change notification to reservation protocol. */
911 rsrr_cache_send(g,1);
912 #endif /* RSRR */
915 * If removing this prune causes us to start forwarding
916 * (e.g. the neighbor rebooted), and we sent a prune upstream,
917 * send a graft to cancel the prune.
919 if (!VIFM_ISEMPTY(g->gt_grpmems) && g->gt_prsent_timer)
920 send_graft(g);
922 IF_DEBUG(DEBUG_PEER)
923 log(LOG_DEBUG, 0, "reset neighbor state (%s %s) gm:%x",
924 RT_FMT(r, s1),
925 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
932 * Delete table entry from the kernel
933 * del_flag determines how many entries to delete
935 void
936 del_table_entry(struct rtentry *r, u_int32 mcastgrp, u_int del_flag)
938 struct gtable *g, *prev_g;
939 struct stable *st, *prev_st;
940 struct ptable *pt, *prev_pt;
942 if (del_flag == DEL_ALL_ROUTES) {
943 g = r->rt_groups;
944 while (g) {
945 IF_DEBUG(DEBUG_CACHE)
946 log(LOG_DEBUG, 0, "del_table_entry deleting (%s %s)",
947 RT_FMT(r, s1), inet_fmt(g->gt_mcastgrp, s2));
948 st = g->gt_srctbl;
949 while (st) {
950 if (st->st_ctime != 0) {
951 if (k_del_rg(st->st_origin, g) < 0) {
952 log(LOG_WARNING, errno,
953 "del_table_entry trying to delete (%s, %s)",
954 inet_fmt(st->st_origin, s1),
955 inet_fmt(g->gt_mcastgrp, s2));
957 kroutes--;
959 prev_st = st;
960 st = st->st_next;
961 free(prev_st);
963 g->gt_srctbl = NULL;
965 pt = g->gt_pruntbl;
966 while (pt) {
967 prev_pt = pt;
968 pt = pt->pt_next;
969 free(prev_pt);
971 g->gt_pruntbl = NULL;
973 if (g->gt_gnext)
974 g->gt_gnext->gt_gprev = g->gt_gprev;
975 if (g->gt_gprev)
976 g->gt_gprev->gt_gnext = g->gt_gnext;
977 else
978 kernel_table = g->gt_gnext;
980 #ifdef RSRR
981 /* Send route change notification to reservation protocol. */
982 rsrr_cache_send(g,0);
983 rsrr_cache_clean(g);
984 #endif /* RSRR */
985 if (g->gt_rexmit_timer)
986 timer_clearTimer(g->gt_rexmit_timer);
988 prev_g = g;
989 g = g->gt_next;
990 free(prev_g);
992 r->rt_groups = NULL;
996 * Dummy routine - someday this may be needed, so it is just there
998 if (del_flag == DEL_RTE_GROUP) {
999 prev_g = (struct gtable *)&r->rt_groups;
1000 for (g = r->rt_groups; g; g = g->gt_next) {
1001 if (g->gt_mcastgrp == mcastgrp) {
1002 IF_DEBUG(DEBUG_CACHE)
1003 log(LOG_DEBUG, 0, "del_table_entry deleting (%s %s)",
1004 RT_FMT(r, s1), inet_fmt(g->gt_mcastgrp, s2));
1005 st = g->gt_srctbl;
1006 while (st) {
1007 if (st->st_ctime != 0) {
1008 if (k_del_rg(st->st_origin, g) < 0) {
1009 log(LOG_WARNING, errno,
1010 "del_table_entry trying to delete (%s, %s)",
1011 inet_fmt(st->st_origin, s1),
1012 inet_fmt(g->gt_mcastgrp, s2));
1014 kroutes--;
1016 prev_st = st;
1017 st = st->st_next;
1018 free(prev_st);
1020 g->gt_srctbl = NULL;
1022 pt = g->gt_pruntbl;
1023 while (pt) {
1024 prev_pt = pt;
1025 pt = pt->pt_next;
1026 free(prev_pt);
1028 g->gt_pruntbl = NULL;
1030 if (g->gt_gnext)
1031 g->gt_gnext->gt_gprev = g->gt_gprev;
1032 if (g->gt_gprev)
1033 g->gt_gprev->gt_gnext = g->gt_gnext;
1034 else
1035 kernel_table = g->gt_gnext;
1037 if (prev_g != (struct gtable *)&r->rt_groups)
1038 g->gt_next->gt_prev = prev_g;
1039 else
1040 g->gt_next->gt_prev = NULL;
1041 prev_g->gt_next = g->gt_next;
1043 if (g->gt_rexmit_timer)
1044 timer_clearTimer(g->gt_rexmit_timer);
1045 #ifdef RSRR
1046 /* Send route change notification to reservation protocol. */
1047 rsrr_cache_send(g,0);
1048 rsrr_cache_clean(g);
1049 #endif /* RSRR */
1050 free(g);
1051 g = prev_g;
1052 } else {
1053 prev_g = g;
1060 * update kernel table entry when a route entry changes
1062 void
1063 update_table_entry(struct rtentry *r, u_int32 old_parent_gw)
1065 struct gtable *g;
1066 struct ptable *pt, **ptnp;
1068 for (g = r->rt_groups; g; g = g->gt_next) {
1069 ptnp = &g->gt_pruntbl;
1071 * Delete prune entries from non-children, or non-subordinates.
1073 while ((pt = *ptnp)) {
1074 if (!VIFM_ISSET(pt->pt_vifi, r->rt_children) ||
1075 !NBRM_ISSET(pt->pt_index, r->rt_subordinates)) {
1077 IF_DEBUG(DEBUG_PRUNE)
1078 log(LOG_DEBUG, 0, "update_table_entry deleting prune for (%s %s) from %s on vif %d -%s%s",
1079 RT_FMT(r, s1), inet_fmt(g->gt_mcastgrp, s2),
1080 inet_fmt(pt->pt_router, s3), pt->pt_vifi,
1081 VIFM_ISSET(pt->pt_vifi, r->rt_children) ? "" : " not a child",
1082 NBRM_ISSET(pt->pt_index, r->rt_subordinates) ? "" : " not a subordinate");
1084 if (!NBRM_ISSET(pt->pt_index, g->gt_prunes)) {
1085 log(LOG_WARNING, 0,
1086 "gt_prunes lost track of (%s %s) from %s on vif %d",
1087 RT_FMT(r, s1), inet_fmt(g->gt_mcastgrp, s2),
1088 inet_fmt(pt->pt_router, s3), pt->pt_vifi);
1091 NBRM_CLR(pt->pt_index, g->gt_prunes);
1092 *ptnp = pt->pt_next;
1093 free(pt);
1094 continue;
1096 ptnp = &((*ptnp)->pt_next);
1099 IF_DEBUG(DEBUG_CACHE)
1100 log(LOG_DEBUG, 0, "updating cache entries (%s %s) old gm:%x",
1101 RT_FMT(r, s1), inet_fmt(g->gt_mcastgrp, s2),
1102 g->gt_grpmems);
1105 * Forget about a prune or graft that we sent previously if we
1106 * have a new parent router (since the new parent router will
1107 * know nothing about what I sent to the previous parent). The
1108 * old parent will forget any prune state it is keeping for us.
1110 if (old_parent_gw != r->rt_gateway) {
1111 g->gt_prsent_timer = 0;
1112 g->gt_grftsnt = 0;
1115 /* Recalculate membership */
1116 determine_forwvifs(g);
1117 /* send a prune or graft if needed. */
1118 send_prune_or_graft(g);
1120 IF_DEBUG(DEBUG_CACHE)
1121 log(LOG_DEBUG, 0, "updating cache entries (%s %s) new gm:%x",
1122 RT_FMT(r, s1), inet_fmt(g->gt_mcastgrp, s2),
1123 g->gt_grpmems);
1125 /* update ttls and add entry into kernel */
1126 prun_add_ttls(g);
1127 update_kernel(g);
1128 #ifdef RSRR
1129 /* Send route change notification to reservation protocol. */
1130 rsrr_cache_send(g,1);
1131 #endif /* RSRR */
1136 * set the forwarding flag for all mcastgrps on this vifi
1138 void
1139 update_lclgrp(vifi_t vifi, u_int32 mcastgrp)
1141 struct rtentry *r;
1142 struct gtable *g;
1144 IF_DEBUG(DEBUG_MEMBER)
1145 log(LOG_DEBUG, 0, "group %s joined on vif %d",
1146 inet_fmt(mcastgrp, s1), vifi);
1148 for (g = kernel_table; g; g = g->gt_gnext) {
1149 if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1150 break;
1152 r = g->gt_route;
1153 if (g->gt_mcastgrp == mcastgrp &&
1154 VIFM_ISSET(vifi, r->rt_children)) {
1156 VIFM_SET(vifi, g->gt_grpmems);
1157 APPLY_SCOPE(g);
1158 if (VIFM_ISEMPTY(g->gt_grpmems))
1159 continue;
1161 prun_add_ttls(g);
1162 IF_DEBUG(DEBUG_CACHE)
1163 log(LOG_DEBUG, 0, "update lclgrp (%s %s) gm:%x",
1164 RT_FMT(r, s1),
1165 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1167 update_kernel(g);
1168 #ifdef RSRR
1169 /* Send route change notification to reservation protocol. */
1170 rsrr_cache_send(g,1);
1171 #endif /* RSRR */
1177 * reset forwarding flag for all mcastgrps on this vifi
1179 void
1180 delete_lclgrp(vifi_t vifi, u_int32 mcastgrp)
1182 struct gtable *g;
1184 IF_DEBUG(DEBUG_MEMBER)
1185 log(LOG_DEBUG, 0, "group %s left on vif %d",
1186 inet_fmt(mcastgrp, s1), vifi);
1188 for (g = kernel_table; g; g = g->gt_gnext) {
1189 if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1190 break;
1192 if (g->gt_mcastgrp == mcastgrp && VIFM_ISSET(vifi, g->gt_grpmems)) {
1193 if (g->gt_route == NULL ||
1194 SUBS_ARE_PRUNED(g->gt_route->rt_subordinates,
1195 uvifs[vifi].uv_nbrmap, g->gt_prunes)) {
1196 VIFM_CLR(vifi, g->gt_grpmems);
1197 IF_DEBUG(DEBUG_CACHE)
1198 log(LOG_DEBUG, 0, "delete lclgrp (%s %s) gm:%x",
1199 RT_FMT(g->gt_route, s1),
1200 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1202 prun_add_ttls(g);
1203 update_kernel(g);
1204 #ifdef RSRR
1205 /* Send route change notification to reservation protocol. */
1206 rsrr_cache_send(g,1);
1207 #endif /* RSRR */
1210 * If there are no more members of this particular group,
1211 * send prune upstream
1213 if (VIFM_ISEMPTY(g->gt_grpmems) && g->gt_route->rt_gateway)
1214 send_prune(g);
1221 * Takes the prune message received and then strips it to
1222 * determine the (src, grp) pair to be pruned.
1224 * Adds the router to the (src, grp) entry then.
1226 * Determines if further packets have to be sent down that vif
1228 * Determines if a corresponding prune message has to be generated
1230 void
1231 accept_prune(u_int32 src, u_int32 dst, char *p, int datalen)
1233 u_int32 prun_src;
1234 u_int32 prun_grp;
1235 u_int32 prun_tmr;
1236 vifi_t vifi;
1237 int i;
1238 struct rtentry *r;
1239 struct gtable *g;
1240 struct ptable *pt;
1242 if ((vifi = find_vif(src, dst)) == NO_VIF) {
1243 log(LOG_INFO, 0,
1244 "ignoring prune report from non-neighbor %s",
1245 inet_fmt(src, s1));
1246 return;
1249 /* Check if enough data is present */
1250 if (datalen < 12)
1252 log(LOG_WARNING, 0,
1253 "non-decipherable prune from %s",
1254 inet_fmt(src, s1));
1255 return;
1258 for (i = 0; i< 4; i++)
1259 ((char *)&prun_src)[i] = *p++;
1260 for (i = 0; i< 4; i++)
1261 ((char *)&prun_grp)[i] = *p++;
1262 for (i = 0; i< 4; i++)
1263 ((char *)&prun_tmr)[i] = *p++;
1264 prun_tmr = ntohl(prun_tmr);
1266 if (prun_tmr <= MIN_PRUNE_LIFE) {
1267 IF_DEBUG(DEBUG_PRUNE)
1268 log(LOG_DEBUG, 0, "ignoring prune from %s on vif %d for (%s %s)/%d because its lifetime is too short",
1269 inet_fmt(src, s1), vifi,
1270 inet_fmt(prun_src, s2), inet_fmt(prun_grp, s3), prun_tmr);
1271 return;
1274 IF_DEBUG(DEBUG_PRUNE)
1275 log(LOG_DEBUG, 0, "%s on vif %d prunes (%s %s)/%d",
1276 inet_fmt(src, s1), vifi,
1277 inet_fmt(prun_src, s2), inet_fmt(prun_grp, s3), prun_tmr);
1280 * Find the subnet for the prune
1282 if (find_src_grp(prun_src, 0, prun_grp)) {
1283 g = gtp ? gtp->gt_gnext : kernel_table;
1284 r = g->gt_route;
1286 IF_DEBUG(DEBUG_PRUNE)
1287 log(LOG_DEBUG, 0, "found grp state, (%s %s), metric is %d, children are %x, subords are %08x%08x",
1288 RT_FMT(r, s1), inet_fmt(g->gt_mcastgrp, s2), r->rt_metric,
1289 r->rt_children, r->rt_subordinates.hi, r->rt_subordinates.lo);
1290 if (!VIFM_ISSET(vifi, r->rt_children)) {
1291 IF_DEBUG(DEBUG_PRUNE)
1292 log(LOG_WARNING, 0, "prune received from non-child %s for (%s %s) (dominant on vif %d is %s)",
1293 inet_fmt(src, s1), inet_fmt(prun_src, s2),
1294 inet_fmt(prun_grp, s3), vifi,
1295 inet_fmt(r->rt_dominants[vifi], s4));
1296 #ifdef RINGBUFFER
1297 printringbuf();
1298 #endif
1299 return;
1301 if (VIFM_ISSET(vifi, g->gt_scope)) {
1302 log(LOG_WARNING, 0, "prune received from %s on scoped grp (%s %s)",
1303 inet_fmt(src, s1), inet_fmt(prun_src, s2),
1304 inet_fmt(prun_grp, s3));
1305 return;
1307 if ((pt = find_prune_entry(src, g->gt_pruntbl)) != NULL) {
1308 IF_DEBUG(DEBUG_PRUNE)
1309 log(LOG_DEBUG, 0, "%s %d from %s for (%s %s)/%d %s %d %s %x",
1310 "duplicate prune received on vif",
1311 vifi, inet_fmt(src, s1), inet_fmt(prun_src, s2),
1312 inet_fmt(prun_grp, s3), prun_tmr,
1313 "old timer:", pt->pt_timer, "cur gm:", g->gt_grpmems);
1314 pt->pt_timer = prun_tmr;
1315 } else {
1316 struct listaddr *n = neighbor_info(vifi, src);
1318 if (!n) {
1319 log(LOG_WARNING, 0, "Prune from non-neighbor %s on vif %d!?",
1320 inet_fmt(src, s1), vifi);
1321 return;
1324 /* allocate space for the prune structure */
1325 pt = (struct ptable *)(malloc(sizeof(struct ptable)));
1326 if (pt == NULL)
1327 log(LOG_ERR, 0, "pt: ran out of memory");
1329 pt->pt_vifi = vifi;
1330 pt->pt_router = src;
1331 pt->pt_timer = prun_tmr;
1333 pt->pt_next = g->gt_pruntbl;
1334 g->gt_pruntbl = pt;
1336 if (n) {
1337 pt->pt_index = n->al_index;
1338 NBRM_SET(n->al_index, g->gt_prunes);
1343 * check if any more packets need to be sent on the
1344 * vif which sent this message
1346 if (SUBS_ARE_PRUNED(r->rt_subordinates,
1347 uvifs[vifi].uv_nbrmap, g->gt_prunes) &&
1348 !grplst_mem(vifi, prun_grp)) {
1349 nbrbitmap_t tmp;
1351 VIFM_CLR(vifi, g->gt_grpmems);
1352 IF_DEBUG(DEBUG_PRUNE)
1353 log(LOG_DEBUG, 0, "vifnbrs=0x%08x%08x, subord=0x%08x%08x prunes=0x%08x%08x",
1354 uvifs[vifi].uv_nbrmap.hi,uvifs[vifi].uv_nbrmap.lo,
1355 r->rt_subordinates.hi, r->rt_subordinates.lo,
1356 g->gt_prunes.hi, g->gt_prunes.lo);
1357 /* XXX debugging */
1358 NBRM_COPY(r->rt_subordinates, tmp);
1359 NBRM_MASK(tmp, uvifs[vifi].uv_nbrmap);
1360 if (!NBRM_ISSETALLMASK(g->gt_prunes, tmp))
1361 log(LOG_WARNING, 0, "subordinate error");
1362 /* XXX end debugging */
1363 IF_DEBUG(DEBUG_PRUNE|DEBUG_CACHE)
1364 log(LOG_DEBUG, 0, "prune (%s %s), stop sending on vif %d, gm:%x",
1365 RT_FMT(r, s1),
1366 inet_fmt(g->gt_mcastgrp, s2), vifi, g->gt_grpmems);
1368 prun_add_ttls(g);
1369 update_kernel(g);
1370 #ifdef RSRR
1371 /* Send route change notification to reservation protocol. */
1372 rsrr_cache_send(g,1);
1373 #endif /* RSRR */
1377 * check if all the child routers have expressed no interest
1378 * in this group and if this group does not exist in the
1379 * interface
1380 * Send a prune message then upstream
1382 if (VIFM_ISEMPTY(g->gt_grpmems) && r->rt_gateway) {
1383 send_prune(g);
1385 } else {
1387 * There is no kernel entry for this group. Therefore, we can
1388 * simply ignore the prune, as we are not forwarding this traffic
1389 * downstream.
1391 IF_DEBUG(DEBUG_PRUNE|DEBUG_CACHE)
1392 log(LOG_DEBUG, 0, "%s (%s %s)/%d from %s",
1393 "prune message received with no kernel entry for",
1394 inet_fmt(prun_src, s1), inet_fmt(prun_grp, s2),
1395 prun_tmr, inet_fmt(src, s3));
1396 return;
1401 * Checks if this mcastgrp is present in the kernel table
1402 * If so and if a prune was sent, it sends a graft upwards
1404 void
1405 chkgrp_graft(vifi_t vifi, u_int32 mcastgrp)
1407 struct rtentry *r;
1408 struct gtable *g;
1410 for (g = kernel_table; g; g = g->gt_gnext) {
1411 if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1412 break;
1414 r = g->gt_route;
1415 if (g->gt_mcastgrp == mcastgrp && VIFM_ISSET(vifi, r->rt_children))
1416 if (g->gt_prsent_timer) {
1417 VIFM_SET(vifi, g->gt_grpmems);
1420 * If the vif that was joined was a scoped vif,
1421 * ignore it ; don't graft back
1423 APPLY_SCOPE(g);
1424 if (VIFM_ISEMPTY(g->gt_grpmems))
1425 continue;
1427 /* send graft upwards */
1428 send_graft(g);
1430 /* update cache timer*/
1431 g->gt_timer = CACHE_LIFETIME(cache_lifetime);
1433 IF_DEBUG(DEBUG_PRUNE|DEBUG_CACHE)
1434 log(LOG_DEBUG, 0, "chkgrp graft (%s %s) gm:%x",
1435 RT_FMT(r, s1),
1436 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1438 prun_add_ttls(g);
1439 update_kernel(g);
1440 #ifdef RSRR
1441 /* Send route change notification to reservation protocol. */
1442 rsrr_cache_send(g,1);
1443 #endif /* RSRR */
1448 /* determine the multicast group and src
1450 * if it does, then determine if a prune was sent
1451 * upstream.
1452 * if prune sent upstream, send graft upstream and send
1453 * ack downstream.
1455 * if no prune sent upstream, change the forwarding bit
1456 * for this interface and send ack downstream.
1458 * if no entry exists for this group send ack downstream.
1460 void
1461 accept_graft(u_int32 src, u_int32 dst, char *p, int datalen)
1463 vifi_t vifi;
1464 u_int32 graft_src;
1465 u_int32 graft_grp;
1466 int i;
1467 struct rtentry *r;
1468 struct gtable *g;
1469 struct ptable *pt, **ptnp;
1471 if (datalen < 8) {
1472 log(LOG_WARNING, 0,
1473 "received non-decipherable graft from %s",
1474 inet_fmt(src, s1));
1475 return;
1478 for (i = 0; i< 4; i++)
1479 ((char *)&graft_src)[i] = *p++;
1480 for (i = 0; i< 4; i++)
1481 ((char *)&graft_grp)[i] = *p++;
1483 vifi = find_vif(src, dst);
1484 send_graft_ack(dst, src, graft_src, graft_grp, vifi);
1486 if (vifi == NO_VIF) {
1487 log(LOG_INFO, 0,
1488 "ignoring graft for (%s %s) from non-neighbor %s",
1489 inet_fmt(graft_src, s2), inet_fmt(graft_grp, s3),
1490 inet_fmt(src, s1));
1491 return;
1494 IF_DEBUG(DEBUG_PRUNE)
1495 log(LOG_DEBUG, 0, "%s on vif %d grafts (%s %s)",
1496 inet_fmt(src, s1), vifi,
1497 inet_fmt(graft_src, s2), inet_fmt(graft_grp, s3));
1500 * Find the subnet for the graft
1502 if (find_src_grp(graft_src, 0, graft_grp)) {
1503 g = gtp ? gtp->gt_gnext : kernel_table;
1504 r = g->gt_route;
1506 if (VIFM_ISSET(vifi, g->gt_scope)) {
1507 log(LOG_WARNING, 0, "graft received from %s on scoped grp (%s %s)",
1508 inet_fmt(src, s1), inet_fmt(graft_src, s2),
1509 inet_fmt(graft_grp, s3));
1510 return;
1513 ptnp = &g->gt_pruntbl;
1514 while ((pt = *ptnp) != NULL) {
1515 if ((pt->pt_vifi == vifi) && (pt->pt_router == src)) {
1516 NBRM_CLR(pt->pt_index, g->gt_prunes);
1517 *ptnp = pt->pt_next;
1518 free(pt);
1520 VIFM_SET(vifi, g->gt_grpmems);
1521 IF_DEBUG(DEBUG_PRUNE|DEBUG_CACHE)
1522 log(LOG_DEBUG, 0, "accept graft (%s %s) gm:%x",
1523 RT_FMT(r, s1),
1524 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1526 prun_add_ttls(g);
1527 update_kernel(g);
1528 #ifdef RSRR
1529 /* Send route change notification to reservation protocol. */
1530 rsrr_cache_send(g,1);
1531 #endif /* RSRR */
1532 break;
1533 } else {
1534 ptnp = &pt->pt_next;
1538 g->gt_timer = CACHE_LIFETIME(cache_lifetime);
1540 if (g->gt_prsent_timer)
1541 /* send graft upwards */
1542 send_graft(g);
1543 } else {
1545 * We have no state for the source and group in question.
1546 * This is fine, since we know that we have no prune state, and
1547 * grafts are requests to remove prune state.
1549 IF_DEBUG(DEBUG_PRUNE)
1550 log(LOG_DEBUG, 0, "%s (%s %s) from %s",
1551 "graft received with no kernel entry for",
1552 inet_fmt(graft_src, s1), inet_fmt(graft_grp, s2),
1553 inet_fmt(src, s3));
1554 return;
1559 * find out which group is involved first of all
1560 * then determine if a graft was sent.
1561 * if no graft sent, ignore the message
1562 * if graft was sent and the ack is from the right
1563 * source, remove the graft timer so that we don't
1564 * have send a graft again
1566 void
1567 accept_g_ack(u_int32 src, u_int32 dst, char *p, int datalen)
1569 struct gtable *g;
1570 vifi_t vifi;
1571 u_int32 grft_src;
1572 u_int32 grft_grp;
1573 int i;
1575 if ((vifi = find_vif(src, dst)) == NO_VIF) {
1576 log(LOG_INFO, 0,
1577 "ignoring graft ack from non-neighbor %s",
1578 inet_fmt(src, s1));
1579 return;
1582 if (datalen < 0 || datalen > 8) {
1583 log(LOG_WARNING, 0,
1584 "received non-decipherable graft ack from %s",
1585 inet_fmt(src, s1));
1586 return;
1589 for (i = 0; i< 4; i++)
1590 ((char *)&grft_src)[i] = *p++;
1591 for (i = 0; i< 4; i++)
1592 ((char *)&grft_grp)[i] = *p++;
1594 IF_DEBUG(DEBUG_PRUNE)
1595 log(LOG_DEBUG, 0, "%s on vif %d acks graft (%s, %s)",
1596 inet_fmt(src, s1), vifi,
1597 inet_fmt(grft_src, s2), inet_fmt(grft_grp, s3));
1600 * Find the subnet for the graft ack
1602 if (find_src_grp(grft_src, 0, grft_grp)) {
1603 g = gtp ? gtp->gt_gnext : kernel_table;
1604 g->gt_grftsnt = 0;
1605 } else {
1606 log(LOG_WARNING, 0, "%s (%s, %s) from %s",
1607 "rcvd graft ack with no kernel entry for",
1608 inet_fmt(grft_src, s1), inet_fmt(grft_grp, s2),
1609 inet_fmt(src, s3));
1610 #ifdef RINGBUFFER
1611 printringbuf();
1612 #endif
1613 return;
1619 * free all prune entries and kernel routes
1620 * normally, this should inform the kernel that all of its routes
1621 * are going away, but this is only called by restart(), which is
1622 * about to call MRT_DONE which does that anyway.
1624 void
1625 free_all_prunes(void)
1627 struct rtentry *r;
1628 struct gtable *g, *prev_g;
1629 struct stable *s, *prev_s;
1630 struct ptable *p, *prev_p;
1632 for (r = routing_table; r; r = r->rt_next) {
1633 g = r->rt_groups;
1634 while (g) {
1635 s = g->gt_srctbl;
1636 while (s) {
1637 prev_s = s;
1638 s = s->st_next;
1639 free(prev_s);
1642 p = g->gt_pruntbl;
1643 while (p) {
1644 prev_p = p;
1645 p = p->pt_next;
1646 free(prev_p);
1649 prev_g = g;
1650 g = g->gt_next;
1651 if (prev_g->gt_rexmit_timer)
1652 timer_clearTimer(prev_g->gt_rexmit_timer);
1653 free(prev_g);
1655 r->rt_groups = NULL;
1657 kernel_table = NULL;
1659 g = kernel_no_route;
1660 while (g) {
1661 if (g->gt_srctbl)
1662 free(g->gt_srctbl);
1664 prev_g = g;
1665 g = g->gt_next;
1666 if (prev_g->gt_rexmit_timer)
1667 timer_clearTimer(prev_g->gt_rexmit_timer);
1668 free(prev_g);
1670 kernel_no_route = NULL;
1674 * When a new route is created, search
1675 * a) The less-specific part of the routing table
1676 * b) The route-less kernel table
1677 * for sources that the new route might want to handle.
1679 * "Inheriting" these sources might be cleanest, but simply deleting
1680 * them is easier, and letting the kernel re-request them.
1682 void
1683 steal_sources(struct rtentry *rt)
1685 struct rtentry *rp;
1686 struct gtable *gt, **gtnp;
1687 struct stable *st, **stnp;
1689 for (rp = rt->rt_next; rp; rp = rp->rt_next) {
1690 if (rp->rt_groups == NULL)
1691 continue;
1692 if ((rt->rt_origin & rp->rt_originmask) == rp->rt_origin) {
1693 IF_DEBUG(DEBUG_ROUTE)
1694 log(LOG_DEBUG, 0, "Route for %s stealing sources from %s",
1695 RT_FMT(rt, s1), RT_FMT(rp, s2));
1696 for (gt = rp->rt_groups; gt; gt = gt->gt_next) {
1697 stnp = &gt->gt_srctbl;
1698 while ((st = *stnp) != NULL) {
1699 if ((st->st_origin & rt->rt_originmask) == rt->rt_origin) {
1700 IF_DEBUG(DEBUG_ROUTE)
1701 log(LOG_DEBUG, 0, "%s stealing (%s %s) from %s",
1702 RT_FMT(rt, s1),
1703 inet_fmt(st->st_origin, s3),
1704 inet_fmt(gt->gt_mcastgrp, s4),
1705 RT_FMT(rp, s2));
1706 if (st->st_ctime != 0) {
1707 if (k_del_rg(st->st_origin, gt) < 0) {
1708 log(LOG_WARNING, errno, "%s (%s, %s)",
1709 "steal_sources trying to delete",
1710 inet_fmt(st->st_origin, s1),
1711 inet_fmt(gt->gt_mcastgrp, s2));
1713 kroutes--;
1715 *stnp = st->st_next;
1716 free(st);
1717 } else {
1718 stnp = &st->st_next;
1725 gtnp = &kernel_no_route;
1726 while ((gt = *gtnp) != NULL) {
1727 if (gt->gt_srctbl && ((gt->gt_srctbl->st_origin & rt->rt_originmask)
1728 == rt->rt_origin)) {
1729 IF_DEBUG(DEBUG_ROUTE)
1730 log(LOG_DEBUG, 0, "%s stealing (%s %s) from %s",
1731 RT_FMT(rt, s1),
1732 inet_fmt(gt->gt_srctbl->st_origin, s3),
1733 inet_fmt(gt->gt_mcastgrp, s4),
1734 "no_route table");
1735 if (gt->gt_srctbl->st_ctime != 0) {
1736 if (k_del_rg(gt->gt_srctbl->st_origin, gt) < 0) {
1737 log(LOG_WARNING, errno, "%s (%s %s)",
1738 "steal_sources trying to delete",
1739 inet_fmt(gt->gt_srctbl->st_origin, s1),
1740 inet_fmt(gt->gt_mcastgrp, s2));
1742 kroutes--;
1744 free(gt->gt_srctbl);
1745 *gtnp = gt->gt_next;
1746 if (gt->gt_next)
1747 gt->gt_next->gt_prev = gt->gt_prev;
1748 if (gt->gt_rexmit_timer)
1749 timer_clearTimer(gt->gt_rexmit_timer);
1750 free(gt);
1751 } else {
1752 gtnp = &gt->gt_next;
1758 * Advance the timers on all the cache entries.
1759 * If there are any entries whose timers have expired,
1760 * remove these entries from the kernel cache.
1762 void
1763 age_table_entry(void)
1765 struct rtentry *r;
1766 struct gtable *gt, **gtnptr;
1767 struct stable *st, **stnp;
1768 struct ptable *pt, **ptnp;
1769 struct sioc_sg_req sg_req;
1771 IF_DEBUG(DEBUG_PRUNE|DEBUG_CACHE)
1772 log(LOG_DEBUG, 0, "aging forwarding cache entries");
1774 gtnptr = &kernel_table;
1775 while ((gt = *gtnptr) != NULL) {
1776 vifi_t i; /* XXX Debugging */
1777 int fixit = 0; /* XXX Debugging */
1779 r = gt->gt_route;
1781 /* XXX Debugging... */
1782 for (i = 0; i < numvifs; i++) {
1784 * If we're not sending on this vif,
1785 * And this group isn't scoped on this vif,
1786 * And I'm the parent for this route on this vif,
1787 * And there are subordinates on this vif,
1788 * And all of the subordinates haven't pruned,
1789 * YELL LOUDLY
1790 * and remember to fix it up later
1792 if (!VIFM_ISSET(i, gt->gt_grpmems) &&
1793 !VIFM_ISSET(i, gt->gt_scope) &&
1794 VIFM_ISSET(i, r->rt_children) &&
1795 NBRM_ISSETMASK(uvifs[i].uv_nbrmap, r->rt_subordinates) &&
1796 !SUBS_ARE_PRUNED(r->rt_subordinates, uvifs[i].uv_nbrmap, gt->gt_prunes)) {
1797 log(LOG_WARNING, 0, "(%s %s) is blackholing on vif %d",
1798 RT_FMT(r, s1), inet_fmt(gt->gt_mcastgrp, s2), i);
1799 fixit = 1;
1802 if (fixit) {
1803 log(LOG_WARNING, 0, "fixing membership for (%s %s) gm:%x",
1804 RT_FMT(r, s1), inet_fmt(gt->gt_mcastgrp, s2), gt->gt_grpmems);
1805 determine_forwvifs(gt);
1806 send_prune_or_graft(gt);
1807 log(LOG_WARNING, 0, "fixed membership for (%s %s) gm:%x",
1808 RT_FMT(r, s1), inet_fmt(gt->gt_mcastgrp, s2), gt->gt_grpmems);
1809 #ifdef RINGBUFFER
1810 printringbuf();
1811 #endif
1813 /*DEBUG2*/
1814 /* If there are group members,
1815 * and there are recent sources,
1816 * and we have a route,
1817 * and it's not directly connected,
1818 * and we haven't sent a prune,
1819 * if there are any cache entries in the kernel
1820 * [if there aren't we're probably waiting to rexmit],
1821 * YELL LOUDLY
1822 * and send a prune
1824 if (VIFM_ISEMPTY(gt->gt_grpmems) && gt->gt_srctbl && r && r->rt_gateway && gt->gt_prsent_timer == 0) {
1825 for (st = gt->gt_srctbl; st; st = st->st_next)
1826 if (st->st_ctime != 0)
1827 break;
1828 if (st != NULL) {
1829 log(LOG_WARNING, 0, "grpmems for (%s %s) is empty but no prune state!", RT_FMT(r, s1), inet_fmt(gt->gt_mcastgrp, s2));
1830 send_prune_or_graft(gt);
1831 #ifdef RINGBUFFER
1832 printringbuf();
1833 #endif
1836 /* XXX ...Debugging */
1838 /* advance the timer for the kernel entry */
1839 gt->gt_timer -= TIMER_INTERVAL;
1841 /* decrement prune timer if need be */
1842 if (gt->gt_prsent_timer > 0) {
1843 gt->gt_prsent_timer -= TIMER_INTERVAL;
1844 if (gt->gt_prsent_timer <= 0) {
1845 IF_DEBUG(DEBUG_PRUNE)
1846 log(LOG_DEBUG, 0, "upstream prune tmo (%s %s)",
1847 RT_FMT(r, s1),
1848 inet_fmt(gt->gt_mcastgrp, s2));
1849 gt->gt_prsent_timer = -1;
1850 /* Reset the prune retransmission timer to its initial value */
1851 gt->gt_prune_rexmit = PRUNE_REXMIT_VAL;
1855 /* retransmit graft with exponential backoff */
1856 if (gt->gt_grftsnt) {
1857 int y;
1859 y = ++gt->gt_grftsnt;
1860 while (y && !(y & 1))
1861 y >>= 1;
1862 if (y == 1)
1863 send_graft(gt);
1867 * Age prunes
1869 * If a prune expires, forward again on that vif.
1871 ptnp = &gt->gt_pruntbl;
1872 while ((pt = *ptnp) != NULL) {
1873 if ((pt->pt_timer -= TIMER_INTERVAL) <= 0) {
1874 IF_DEBUG(DEBUG_PRUNE)
1875 log(LOG_DEBUG, 0, "expire prune (%s %s) from %s on vif %d",
1876 RT_FMT(r, s1),
1877 inet_fmt(gt->gt_mcastgrp, s2),
1878 inet_fmt(pt->pt_router, s3),
1879 pt->pt_vifi);
1880 if (gt->gt_prsent_timer > 0) {
1881 log(LOG_WARNING, 0, "prune (%s %s) from %s on vif %d expires with %d left on prsent timer",
1882 RT_FMT(r, s1),
1883 inet_fmt(gt->gt_mcastgrp, s2),
1884 inet_fmt(pt->pt_router, s3),
1885 pt->pt_vifi, gt->gt_prsent_timer);
1886 /* Send a graft to heal the tree. */
1887 send_graft(gt);
1890 NBRM_CLR(pt->pt_index, gt->gt_prunes);
1891 expire_prune(pt->pt_vifi, gt);
1893 /* remove the router's prune entry and await new one */
1894 *ptnp = pt->pt_next;
1895 free(pt);
1896 } else {
1897 ptnp = &pt->pt_next;
1902 * If the cache entry has expired, delete source table entries for
1903 * silent sources. If there are no source entries left, and there
1904 * are no downstream prunes, then the entry is deleted.
1905 * Otherwise, the cache entry's timer is refreshed.
1907 if (gt->gt_timer <= 0) {
1908 IF_DEBUG(DEBUG_CACHE)
1909 log(LOG_DEBUG, 0, "(%s %s) timed out, checking for traffic",
1910 RT_FMT(gt->gt_route, s1),
1911 inet_fmt(gt->gt_mcastgrp, s2));
1912 /* Check for traffic before deleting source entries */
1913 sg_req.grp.s_addr = gt->gt_mcastgrp;
1914 stnp = &gt->gt_srctbl;
1915 while ((st = *stnp) != NULL) {
1917 * Source entries with no ctime are not actually in the
1918 * kernel; they have been removed by rexmit_prune() so
1919 * are safe to remove from the list at this point.
1921 if (st->st_ctime) {
1922 sg_req.src.s_addr = st->st_origin;
1923 if (ioctl(udp_socket, SIOCGETSGCNT, (char *)&sg_req) < 0) {
1924 log(LOG_WARNING, errno, "%s (%s %s)",
1925 "age_table_entry: SIOCGETSGCNT failing for",
1926 inet_fmt(st->st_origin, s1),
1927 inet_fmt(gt->gt_mcastgrp, s2));
1928 /* Make sure it gets deleted below */
1929 sg_req.pktcnt = st->st_pktcnt;
1931 } else {
1932 sg_req.pktcnt = st->st_pktcnt;
1934 if (sg_req.pktcnt == st->st_pktcnt) {
1935 *stnp = st->st_next;
1936 IF_DEBUG(DEBUG_CACHE)
1937 log(LOG_DEBUG, 0, "age_table_entry deleting (%s %s)",
1938 inet_fmt(st->st_origin, s1),
1939 inet_fmt(gt->gt_mcastgrp, s2));
1940 if (st->st_ctime != 0) {
1941 if (k_del_rg(st->st_origin, gt) < 0) {
1942 log(LOG_WARNING, errno,
1943 "age_table_entry trying to delete (%s %s)",
1944 inet_fmt(st->st_origin, s1),
1945 inet_fmt(gt->gt_mcastgrp, s2));
1947 kroutes--;
1949 free(st);
1950 } else {
1951 st->st_pktcnt = sg_req.pktcnt;
1952 stnp = &st->st_next;
1957 * Retain the group entry if we have downstream prunes or if
1958 * there is at least one source in the list that still has
1959 * traffic, or if our upstream prune timer or graft
1960 * retransmission timer is running.
1962 if (gt->gt_pruntbl != NULL || gt->gt_srctbl != NULL ||
1963 gt->gt_prsent_timer > 0 || gt->gt_grftsnt > 0) {
1964 IF_DEBUG(DEBUG_CACHE)
1965 log(LOG_DEBUG, 0, "refresh lifetim of cache entry %s%s%s%s(%s, %s)",
1966 gt->gt_pruntbl ? "(dstrm prunes) " : "",
1967 gt->gt_srctbl ? "(trfc flow) " : "",
1968 gt->gt_prsent_timer > 0 ? "(upstrm prune) " : "",
1969 gt->gt_grftsnt > 0 ? "(grft rexmit) " : "",
1970 RT_FMT(r, s1),
1971 inet_fmt(gt->gt_mcastgrp, s2));
1972 gt->gt_timer = CACHE_LIFETIME(cache_lifetime);
1973 if (gt->gt_prsent_timer == -1) {
1975 * The upstream prune timed out. Remove any kernel
1976 * state.
1978 gt->gt_prsent_timer = 0;
1979 if (gt->gt_pruntbl) {
1980 log(LOG_WARNING, 0, "upstream prune for (%s %s) expires with downstream prunes active",
1981 RT_FMT(r, s1), inet_fmt(gt->gt_mcastgrp, s2));
1983 remove_sources(gt);
1985 gtnptr = &gt->gt_gnext;
1986 continue;
1989 IF_DEBUG(DEBUG_CACHE)
1990 log(LOG_DEBUG, 0, "timeout cache entry (%s, %s)",
1991 RT_FMT(r, s1),
1992 inet_fmt(gt->gt_mcastgrp, s2));
1994 if (gt->gt_prev)
1995 gt->gt_prev->gt_next = gt->gt_next;
1996 else
1997 gt->gt_route->rt_groups = gt->gt_next;
1998 if (gt->gt_next)
1999 gt->gt_next->gt_prev = gt->gt_prev;
2001 if (gt->gt_gprev) {
2002 gt->gt_gprev->gt_gnext = gt->gt_gnext;
2003 gtnptr = &gt->gt_gprev->gt_gnext;
2004 } else {
2005 kernel_table = gt->gt_gnext;
2006 gtnptr = &kernel_table;
2008 if (gt->gt_gnext)
2009 gt->gt_gnext->gt_gprev = gt->gt_gprev;
2011 #ifdef RSRR
2012 /* Send route change notification to reservation protocol. */
2013 rsrr_cache_send(gt,0);
2014 rsrr_cache_clean(gt);
2015 #endif /* RSRR */
2016 if (gt->gt_rexmit_timer)
2017 timer_clearTimer(gt->gt_rexmit_timer);
2019 free((char *)gt);
2020 } else {
2021 if (gt->gt_prsent_timer == -1) {
2023 * The upstream prune timed out. Remove any kernel
2024 * state.
2026 gt->gt_prsent_timer = 0;
2027 if (gt->gt_pruntbl) {
2028 log(LOG_WARNING, 0, "upstream prune for (%s %s) expires with downstream prunes active",
2029 RT_FMT(r, s1), inet_fmt(gt->gt_mcastgrp, s2));
2031 remove_sources(gt);
2033 gtnptr = &gt->gt_gnext;
2038 * When traversing the no_route table, the decision is much easier.
2039 * Just delete it if it has timed out.
2041 gtnptr = &kernel_no_route;
2042 while ((gt = *gtnptr) != NULL) {
2043 /* advance the timer for the kernel entry */
2044 gt->gt_timer -= TIMER_INTERVAL;
2046 if (gt->gt_timer < 0) {
2047 if (gt->gt_srctbl) {
2048 if (gt->gt_srctbl->st_ctime != 0) {
2049 if (k_del_rg(gt->gt_srctbl->st_origin, gt) < 0) {
2050 log(LOG_WARNING, errno, "%s (%s %s)",
2051 "age_table_entry trying to delete no-route",
2052 inet_fmt(gt->gt_srctbl->st_origin, s1),
2053 inet_fmt(gt->gt_mcastgrp, s2));
2055 kroutes--;
2057 free(gt->gt_srctbl);
2059 *gtnptr = gt->gt_next;
2060 if (gt->gt_next)
2061 gt->gt_next->gt_prev = gt->gt_prev;
2063 if (gt->gt_rexmit_timer)
2064 timer_clearTimer(gt->gt_rexmit_timer);
2066 free((char *)gt);
2067 } else {
2068 gtnptr = &gt->gt_next;
2074 * Modify the kernel to forward packets when one or multiple prunes that
2075 * were received on the vif given by vifi, for the group given by gt,
2076 * have expired.
2078 static void
2079 expire_prune(vifi_t vifi, struct gtable *gt)
2082 * No need to send a graft, any prunes that we sent
2083 * will expire before any prunes that we have received.
2084 * However, in the case that we did make a mistake,
2085 * send a graft to compensate.
2087 if (gt->gt_prsent_timer >= MIN_PRUNE_LIFE) {
2088 IF_DEBUG(DEBUG_PRUNE)
2089 log(LOG_DEBUG, 0, "prune expired with %d left on %s",
2090 gt->gt_prsent_timer, "prsent_timer");
2091 gt->gt_prsent_timer = 0;
2092 send_graft(gt);
2095 /* modify the kernel entry to forward packets */
2096 if (!VIFM_ISSET(vifi, gt->gt_grpmems)) {
2097 struct rtentry *rt = gt->gt_route;
2098 VIFM_SET(vifi, gt->gt_grpmems);
2099 IF_DEBUG(DEBUG_CACHE)
2100 log(LOG_DEBUG, 0, "forw again (%s %s) gm:%x vif:%d",
2101 RT_FMT(rt, s1),
2102 inet_fmt(gt->gt_mcastgrp, s2), gt->gt_grpmems, vifi);
2104 prun_add_ttls(gt);
2105 update_kernel(gt);
2106 #ifdef RSRR
2107 /* Send route change notification to reservation protocol. */
2108 rsrr_cache_send(gt,1);
2109 #endif /* RSRR */
2114 * Print the contents of the cache table on file 'fp2'.
2116 void
2117 dump_cache(FILE *fp2)
2119 struct rtentry *r;
2120 struct gtable *gt;
2121 struct stable *st;
2122 struct ptable *pt;
2123 vifi_t i;
2124 char c;
2125 time_t thyme;
2127 thyme = time(0);
2128 fprintf(fp2,
2129 "Multicast Routing Cache Table (%d entries)\n%s", kroutes,
2130 " Origin Mcast-group CTmr Age Ptmr Rx IVif Forwvifs\n");
2131 fprintf(fp2,
2132 "<(prunesrc:vif[idx]/tmr) prunebitmap\n%s",
2133 ">Source Lifetime SavPkt Pkts Bytes RPFf\n");
2135 for (gt = kernel_no_route; gt; gt = gt->gt_next) {
2136 if (gt->gt_srctbl) {
2137 fprintf(fp2, " %-18s %-15s %-8s %-8s - -1 (no route)\n",
2138 inet_fmts(gt->gt_srctbl->st_origin, 0xffffffff, s1),
2139 inet_fmt(gt->gt_mcastgrp, s2), scaletime(gt->gt_timer),
2140 scaletime(thyme - gt->gt_ctime));
2141 fprintf(fp2, ">%s\n", inet_fmt(gt->gt_srctbl->st_origin, s1));
2145 for (gt = kernel_table; gt; gt = gt->gt_gnext) {
2146 r = gt->gt_route;
2147 fprintf(fp2, " %-18s %-15s",
2148 RT_FMT(r, s1),
2149 inet_fmt(gt->gt_mcastgrp, s2));
2151 fprintf(fp2, " %-8s", scaletime(gt->gt_timer));
2153 fprintf(fp2, " %-8s %-8s ", scaletime(thyme - gt->gt_ctime),
2154 gt->gt_prsent_timer ? scaletime(gt->gt_prsent_timer) :
2155 " -");
2157 if (gt->gt_prune_rexmit) {
2158 int i = gt->gt_prune_rexmit;
2159 int n = 0;
2161 while (i > PRUNE_REXMIT_VAL) {
2162 n++;
2163 i /= 2;
2165 if (n == 0 && gt->gt_prsent_timer == 0)
2166 fprintf(fp2, " -");
2167 else
2168 fprintf(fp2, "%2d", n);
2169 } else {
2170 fprintf(fp2, " -");
2173 fprintf(fp2, " %2u%c%c", r->rt_parent,
2174 gt->gt_prsent_timer ? 'P' :
2175 gt->gt_grftsnt ? 'G' : ' ',
2176 VIFM_ISSET(r->rt_parent, gt->gt_scope) ? 'B' : ' ');
2178 for (i = 0; i < numvifs; ++i) {
2179 if (VIFM_ISSET(i, gt->gt_grpmems))
2180 fprintf(fp2, " %u ", i);
2181 else if (VIFM_ISSET(i, r->rt_children) &&
2182 NBRM_ISSETMASK(uvifs[i].uv_nbrmap, r->rt_subordinates))
2183 fprintf(fp2, " %u%c", i,
2184 VIFM_ISSET(i, gt->gt_scope) ? 'b' :
2185 SUBS_ARE_PRUNED(r->rt_subordinates,
2186 uvifs[i].uv_nbrmap, gt->gt_prunes) ? 'p' : '!');
2188 fprintf(fp2, "\n");
2189 if (gt->gt_pruntbl) {
2190 fprintf(fp2, "<");
2191 c = '(';
2192 for (pt = gt->gt_pruntbl; pt; pt = pt->pt_next) {
2193 fprintf(fp2, "%c%s:%d[%d]/%d", c, inet_fmt(pt->pt_router, s1),
2194 pt->pt_vifi, pt->pt_index, pt->pt_timer);
2195 c = ',';
2197 fprintf(fp2, ")");
2198 fprintf(fp2, " 0x%08lx%08lx\n",/*XXX*/
2199 gt->gt_prunes.hi, gt->gt_prunes.lo);
2201 for (st = gt->gt_srctbl; st; st = st->st_next) {
2202 fprintf(fp2, ">%-18s %-8s %6ld", inet_fmt(st->st_origin, s1),
2203 st->st_ctime ? scaletime(thyme - st->st_ctime) : "-",
2204 st->st_savpkt);
2205 if (st->st_ctime) {
2206 struct sioc_sg_req sg_req;
2208 sg_req.src.s_addr = st->st_origin;
2209 sg_req.grp.s_addr = gt->gt_mcastgrp;
2210 if (ioctl(udp_socket, SIOCGETSGCNT, (char *)&sg_req) < 0) {
2211 log(LOG_WARNING, errno, "SIOCGETSGCNT on (%s %s)",
2212 inet_fmt(st->st_origin, s1),
2213 inet_fmt(gt->gt_mcastgrp, s2));
2214 } else {
2215 fprintf(fp2, " %8ld %8ld %4ld", sg_req.pktcnt,
2216 sg_req.bytecnt, sg_req.wrong_if);
2219 fprintf(fp2, "\n");
2225 * Traceroute function which returns traceroute replies to the requesting
2226 * router. Also forwards the request to downstream routers.
2228 void
2229 accept_mtrace(u_int32 src, u_int32 dst, u_int32 group, char *data,
2230 u_int no, /* promoted u_char */
2231 int datalen)
2233 u_char type;
2234 struct rtentry *rt;
2235 struct gtable *gt;
2236 struct tr_query *qry;
2237 struct tr_resp *resp;
2238 int vifi;
2239 char *p;
2240 int rcount;
2241 int errcode = TR_NO_ERR;
2242 int resptype;
2243 struct timeval tp;
2244 struct sioc_vif_req v_req;
2245 struct sioc_sg_req sg_req;
2247 /* Remember qid across invocations */
2248 static u_int32 oqid = 0;
2250 /* timestamp the request/response */
2251 gettimeofday(&tp, 0);
2254 * Check if it is a query or a response
2256 if (datalen == QLEN) {
2257 type = QUERY;
2258 IF_DEBUG(DEBUG_TRACE)
2259 log(LOG_DEBUG, 0, "Initial traceroute query rcvd from %s to %s",
2260 inet_fmt(src, s1), inet_fmt(dst, s2));
2262 else if ((datalen - QLEN) % RLEN == 0) {
2263 type = RESP;
2264 IF_DEBUG(DEBUG_TRACE)
2265 log(LOG_DEBUG, 0, "In-transit traceroute query rcvd from %s to %s",
2266 inet_fmt(src, s1), inet_fmt(dst, s2));
2267 if (IN_MULTICAST(ntohl(dst))) {
2268 IF_DEBUG(DEBUG_TRACE)
2269 log(LOG_DEBUG, 0, "Dropping multicast response");
2270 return;
2273 else {
2274 log(LOG_WARNING, 0, "%s from %s to %s",
2275 "Non decipherable traceroute request received",
2276 inet_fmt(src, s1), inet_fmt(dst, s2));
2277 return;
2280 qry = (struct tr_query *)data;
2283 * if it is a packet with all reports filled, drop it
2285 if ((rcount = (datalen - QLEN)/RLEN) == no) {
2286 IF_DEBUG(DEBUG_TRACE)
2287 log(LOG_DEBUG, 0, "packet with all reports filled in");
2288 return;
2291 IF_DEBUG(DEBUG_TRACE) {
2292 log(LOG_DEBUG, 0, "s: %s g: %s d: %s ", inet_fmt(qry->tr_src, s1),
2293 inet_fmt(group, s2), inet_fmt(qry->tr_dst, s3));
2294 log(LOG_DEBUG, 0, "rttl: %d rd: %s", qry->tr_rttl,
2295 inet_fmt(qry->tr_raddr, s1));
2296 log(LOG_DEBUG, 0, "rcount:%d, qid:%06x", rcount, qry->tr_qid);
2299 /* determine the routing table entry for this traceroute */
2300 rt = determine_route(qry->tr_src);
2301 IF_DEBUG(DEBUG_TRACE)
2302 if (rt) {
2303 log(LOG_DEBUG, 0, "rt parent vif: %d rtr: %s metric: %d",
2304 rt->rt_parent, inet_fmt(rt->rt_gateway, s1), rt->rt_metric);
2305 log(LOG_DEBUG, 0, "rt origin %s",
2306 RT_FMT(rt, s1));
2307 } else
2308 log(LOG_DEBUG, 0, "...no route");
2311 * Query type packet - check if rte exists
2312 * Check if the query destination is a vif connected to me.
2313 * and if so, whether I should start response back
2315 if (type == QUERY) {
2316 if (oqid == qry->tr_qid) {
2318 * If the multicast router is a member of the group being
2319 * queried, and the query is multicasted, then the router can
2320 * recieve multiple copies of the same query. If we have already
2321 * replied to this traceroute, just ignore it this time.
2323 * This is not a total solution, but since if this fails you
2324 * only get N copies, N <= the number of interfaces on the router,
2325 * it is not fatal.
2327 IF_DEBUG(DEBUG_TRACE)
2328 log(LOG_DEBUG, 0, "ignoring duplicate traceroute packet");
2329 return;
2332 if (rt == NULL) {
2333 IF_DEBUG(DEBUG_TRACE)
2334 log(LOG_DEBUG, 0, "Mcast traceroute: no route entry %s",
2335 inet_fmt(qry->tr_src, s1));
2336 if (IN_MULTICAST(ntohl(dst)))
2337 return;
2339 vifi = find_vif(qry->tr_dst, 0);
2341 if (vifi == NO_VIF) {
2342 /* The traceroute destination is not on one of my subnet vifs. */
2343 IF_DEBUG(DEBUG_TRACE)
2344 log(LOG_DEBUG, 0, "Destination %s not an interface",
2345 inet_fmt(qry->tr_dst, s1));
2346 if (IN_MULTICAST(ntohl(dst)))
2347 return;
2348 errcode = TR_WRONG_IF;
2349 } else if (rt != NULL && !VIFM_ISSET(vifi, rt->rt_children)) {
2350 IF_DEBUG(DEBUG_TRACE)
2351 log(LOG_DEBUG, 0, "Destination %s not on forwarding tree for src %s",
2352 inet_fmt(qry->tr_dst, s1), inet_fmt(qry->tr_src, s2));
2353 if (IN_MULTICAST(ntohl(dst)))
2354 return;
2355 errcode = TR_WRONG_IF;
2358 else {
2360 * determine which interface the packet came in on
2361 * RESP packets travel hop-by-hop so this either traversed
2362 * a tunnel or came from a directly attached mrouter.
2364 if ((vifi = find_vif(src, dst)) == NO_VIF) {
2365 IF_DEBUG(DEBUG_TRACE)
2366 log(LOG_DEBUG, 0, "Wrong interface for packet");
2367 errcode = TR_WRONG_IF;
2371 /* Now that we've decided to send a response, save the qid */
2372 oqid = qry->tr_qid;
2374 IF_DEBUG(DEBUG_TRACE)
2375 log(LOG_DEBUG, 0, "Sending traceroute response");
2377 /* copy the packet to the sending buffer */
2378 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
2380 bcopy(data, p, datalen);
2382 p += datalen;
2385 * If there is no room to insert our reply, coopt the previous hop
2386 * error indication to relay this fact.
2388 if (p + sizeof(struct tr_resp) > send_buf + RECV_BUF_SIZE) {
2389 resp = (struct tr_resp *)p - 1;
2390 resp->tr_rflags = TR_NO_SPACE;
2391 rt = NULL;
2392 goto sendit;
2396 * fill in initial response fields
2398 resp = (struct tr_resp *)p;
2399 bzero(resp, sizeof(struct tr_resp));
2400 datalen += RLEN;
2402 resp->tr_qarr = htonl(((tp.tv_sec + JAN_1970) << 16) +
2403 ((tp.tv_usec << 10) / 15625));
2405 resp->tr_rproto = PROTO_DVMRP;
2406 resp->tr_outaddr = (vifi == NO_VIF) ? dst : uvifs[vifi].uv_lcl_addr;
2407 resp->tr_fttl = (vifi == NO_VIF) ? 0 : uvifs[vifi].uv_threshold;
2408 resp->tr_rflags = errcode;
2411 * obtain # of packets out on interface
2413 v_req.vifi = vifi;
2414 if (vifi != NO_VIF && ioctl(udp_socket, SIOCGETVIFCNT, (char *)&v_req) >= 0)
2415 resp->tr_vifout = htonl(v_req.ocount);
2416 else
2417 resp->tr_vifout = 0xffffffff;
2420 * fill in scoping & pruning information
2422 if (rt)
2423 for (gt = rt->rt_groups; gt; gt = gt->gt_next) {
2424 if (gt->gt_mcastgrp >= group)
2425 break;
2427 else
2428 gt = NULL;
2430 if (gt && gt->gt_mcastgrp == group) {
2431 struct stable *st;
2433 for (st = gt->gt_srctbl; st; st = st->st_next)
2434 if (qry->tr_src == st->st_origin)
2435 break;
2437 sg_req.src.s_addr = qry->tr_src;
2438 sg_req.grp.s_addr = group;
2439 if (st && st->st_ctime != 0 &&
2440 ioctl(udp_socket, SIOCGETSGCNT, (char *)&sg_req) >= 0)
2441 resp->tr_pktcnt = htonl(sg_req.pktcnt + st->st_savpkt);
2442 else
2443 resp->tr_pktcnt = htonl(st ? st->st_savpkt : 0xffffffff);
2445 if (VIFM_ISSET(vifi, gt->gt_scope))
2446 resp->tr_rflags = TR_SCOPED;
2447 else if (gt->gt_prsent_timer)
2448 resp->tr_rflags = TR_PRUNED;
2449 else if (!VIFM_ISSET(vifi, gt->gt_grpmems))
2450 if (!NBRM_ISEMPTY(uvifs[vifi].uv_nbrmap) &&
2451 SUBS_ARE_PRUNED(rt->rt_subordinates,
2452 uvifs[vifi].uv_nbrmap, gt->gt_prunes))
2453 resp->tr_rflags = TR_OPRUNED;
2454 else
2455 resp->tr_rflags = TR_NO_FWD;
2456 } else {
2457 if ((vifi != NO_VIF && scoped_addr(vifi, group)) ||
2458 (rt && scoped_addr(rt->rt_parent, group)))
2459 resp->tr_rflags = TR_SCOPED;
2460 else if (rt && !VIFM_ISSET(vifi, rt->rt_children))
2461 resp->tr_rflags = TR_NO_FWD;
2465 * if no rte exists, set NO_RTE error
2467 if (rt == NULL) {
2468 src = dst; /* the dst address of resp. pkt */
2469 resp->tr_inaddr = 0;
2470 resp->tr_rflags = TR_NO_RTE;
2471 resp->tr_rmtaddr = 0;
2472 } else {
2473 /* get # of packets in on interface */
2474 v_req.vifi = rt->rt_parent;
2475 if (ioctl(udp_socket, SIOCGETVIFCNT, (char *)&v_req) >= 0)
2476 resp->tr_vifin = htonl(v_req.icount);
2477 else
2478 resp->tr_vifin = 0xffffffff;
2480 MASK_TO_VAL(rt->rt_originmask, resp->tr_smask);
2481 src = uvifs[rt->rt_parent].uv_lcl_addr;
2482 resp->tr_inaddr = src;
2483 resp->tr_rmtaddr = rt->rt_gateway;
2484 if (!VIFM_ISSET(vifi, rt->rt_children)) {
2485 IF_DEBUG(DEBUG_TRACE)
2486 log(LOG_DEBUG, 0, "Destination %s not on forwarding tree for src %s",
2487 inet_fmt(qry->tr_dst, s1), inet_fmt(qry->tr_src, s2));
2488 resp->tr_rflags = TR_WRONG_IF;
2490 if (rt->rt_metric >= UNREACHABLE) {
2491 resp->tr_rflags = TR_NO_RTE;
2492 /* Hack to send reply directly */
2493 rt = NULL;
2497 sendit:
2499 * if metric is 1 or no. of reports is 1, send response to requestor
2500 * else send to upstream router. If the upstream router can't handle
2501 * mtrace, set an error code and send to requestor anyway.
2503 IF_DEBUG(DEBUG_TRACE)
2504 log(LOG_DEBUG, 0, "rcount:%d, no:%d", rcount, no);
2506 if ((rcount + 1 == no) || (rt == NULL) || (rt->rt_metric == 1)) {
2507 resptype = IGMP_MTRACE_RESP;
2508 dst = qry->tr_raddr;
2509 } else
2510 if (!can_mtrace(rt->rt_parent, rt->rt_gateway)) {
2511 dst = qry->tr_raddr;
2512 resp->tr_rflags = TR_OLD_ROUTER;
2513 resptype = IGMP_MTRACE_RESP;
2514 } else {
2515 dst = rt->rt_gateway;
2516 resptype = IGMP_MTRACE;
2519 if (IN_MULTICAST(ntohl(dst))) {
2521 * Send the reply on a known multicast capable vif.
2522 * If we don't have one, we can't source any multicasts anyway.
2524 if (phys_vif != -1) {
2525 IF_DEBUG(DEBUG_TRACE)
2526 log(LOG_DEBUG, 0, "Sending reply to %s from %s",
2527 inet_fmt(dst, s1), inet_fmt(uvifs[phys_vif].uv_lcl_addr, s2));
2528 k_set_ttl(qry->tr_rttl);
2529 send_igmp(uvifs[phys_vif].uv_lcl_addr, dst,
2530 resptype, no, group,
2531 datalen);
2532 k_set_ttl(1);
2533 } else
2534 log(LOG_INFO, 0, "No enabled phyints -- %s",
2535 "dropping traceroute reply");
2536 } else {
2537 IF_DEBUG(DEBUG_TRACE)
2538 log(LOG_DEBUG, 0, "Sending %s to %s from %s",
2539 resptype == IGMP_MTRACE_RESP ? "reply" : "request on",
2540 inet_fmt(dst, s1), inet_fmt(src, s2));
2542 send_igmp(src, dst,
2543 resptype, no, group,
2544 datalen);
2546 return;