Remove unnecessary whitespace in macro arguments in some manual pages.
[dragonfly.git] / sbin / routed / output.c
blob3e8d10c58e590bec1938ce2902981255e98afa2c
1 /*
2 * Copyright (c) 1983, 1988, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgment:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
33 * $FreeBSD: src/sbin/routed/output.c,v 1.5.2.1 2000/08/14 17:00:03 sheldonh Exp $
36 #include "defs.h"
38 #if !defined(sgi) && !defined(__NetBSD__)
39 static char sccsid[] __attribute__((unused)) = "@(#)output.c 8.1 (Berkeley) 6/5/93";
40 #elif defined(__NetBSD__)
41 __RCSID("$NetBSD$");
42 #endif
45 u_int update_seqno;
48 /* walk the tree of routes with this for output
50 struct {
51 struct sockaddr_in to;
52 naddr to_mask;
53 naddr to_net;
54 naddr to_std_mask;
55 naddr to_std_net;
56 struct interface *ifp; /* usually output interface */
57 struct auth *a;
58 char metric; /* adjust metrics by interface */
59 int npackets;
60 int gen_limit;
61 u_int state;
62 #define WS_ST_FLASH 0x001 /* send only changed routes */
63 #define WS_ST_RIP2_ALL 0x002 /* send full featured RIPv2 */
64 #define WS_ST_AG 0x004 /* ok to aggregate subnets */
65 #define WS_ST_SUPER_AG 0x008 /* ok to aggregate networks */
66 #define WS_ST_QUERY 0x010 /* responding to a query */
67 #define WS_ST_TO_ON_NET 0x020 /* sending onto one of our nets */
68 #define WS_ST_DEFAULT 0x040 /* faking a default */
69 } ws;
71 /* A buffer for what can be heard by both RIPv1 and RIPv2 listeners */
72 struct ws_buf v12buf;
73 union pkt_buf ripv12_buf;
75 /* Another for only RIPv2 listeners */
76 struct ws_buf v2buf;
77 union pkt_buf rip_v2_buf;
81 void
82 bufinit(void)
84 ripv12_buf.rip.rip_cmd = RIPCMD_RESPONSE;
85 v12buf.buf = &ripv12_buf.rip;
86 v12buf.base = &v12buf.buf->rip_nets[0];
88 rip_v2_buf.rip.rip_cmd = RIPCMD_RESPONSE;
89 rip_v2_buf.rip.rip_vers = RIPv2;
90 v2buf.buf = &rip_v2_buf.rip;
91 v2buf.base = &v2buf.buf->rip_nets[0];
95 /* Send the contents of the global buffer via the non-multicast socket
97 int /* <0 on failure */
98 output(enum output_type type,
99 struct sockaddr_in *dst, /* send to here */
100 struct interface *ifp,
101 struct rip *buf,
102 int size) /* this many bytes */
104 struct sockaddr_in in;
105 int flags;
106 const char *msg;
107 int res;
108 naddr tgt_mcast;
109 int soc;
110 int serrno;
112 in = *dst;
113 if (in.sin_port == 0)
114 in.sin_port = htons(RIP_PORT);
115 #ifdef _HAVE_SIN_LEN
116 if (in.sin_len == 0)
117 in.sin_len = sizeof(in);
118 #endif
120 soc = rip_sock;
121 flags = 0;
123 switch (type) {
124 case OUT_QUERY:
125 msg = "Answer Query";
126 if (soc < 0)
127 soc = ifp->int_rip_sock;
128 break;
129 case OUT_UNICAST:
130 msg = "Send";
131 if (soc < 0)
132 soc = ifp->int_rip_sock;
133 flags = MSG_DONTROUTE;
134 break;
135 case OUT_BROADCAST:
136 if (ifp->int_if_flags & IFF_POINTOPOINT) {
137 msg = "Send";
138 } else {
139 msg = "Send bcast";
141 flags = MSG_DONTROUTE;
142 break;
143 case OUT_MULTICAST:
144 if (ifp->int_if_flags & IFF_POINTOPOINT) {
145 msg = "Send pt-to-pt";
146 } else if (ifp->int_state & IS_DUP) {
147 trace_act("abort multicast output via %s"
148 " with duplicate address",
149 ifp->int_name);
150 return 0;
151 } else {
152 msg = "Send mcast";
153 if (rip_sock_mcast != ifp) {
154 #ifdef MCAST_PPP_BUG
155 /* Do not specify the primary interface
156 * explicitly if we have the multicast
157 * point-to-point kernel bug, since the
158 * kernel will do the wrong thing if the
159 * local address of a point-to-point link
160 * is the same as the address of an ordinary
161 * interface.
163 if (ifp->int_addr == myaddr) {
164 tgt_mcast = 0;
165 } else
166 #endif
167 tgt_mcast = ifp->int_addr;
168 if (0 > setsockopt(rip_sock,
169 IPPROTO_IP, IP_MULTICAST_IF,
170 &tgt_mcast,
171 sizeof(tgt_mcast))) {
172 serrno = errno;
173 LOGERR("setsockopt(rip_sock,"
174 "IP_MULTICAST_IF)");
175 errno = serrno;
176 ifp = NULL;
177 return -1;
179 rip_sock_mcast = ifp;
181 in.sin_addr.s_addr = htonl(INADDR_RIP_GROUP);
183 break;
185 case NO_OUT_MULTICAST:
186 case NO_OUT_RIPV2:
187 default:
188 #ifdef DEBUG
189 abort();
190 #endif
191 return -1;
194 trace_rip(msg, "to", &in, ifp, buf, size);
196 res = sendto(soc, buf, size, flags,
197 (struct sockaddr *)&in, sizeof(in));
198 if (res < 0
199 && (ifp == NULL || !(ifp->int_state & IS_BROKE))) {
200 serrno = errno;
201 msglog("%s sendto(%s%s%s.%d): %s", msg,
202 ifp != NULL ? ifp->int_name : "",
203 ifp != NULL ? ", " : "",
204 inet_ntoa(in.sin_addr),
205 ntohs(in.sin_port),
206 strerror(errno));
207 errno = serrno;
210 return res;
214 /* Find the first key for a packet to send.
215 * Try for a key that is eligible and has not expired, but settle for
216 * the last key if they have all expired.
217 * If no key is ready yet, give up.
219 struct auth *
220 find_auth(struct interface *ifp)
222 struct auth *ap, *res;
223 int i;
226 if (ifp == NULL)
227 return 0;
229 res = NULL;
230 ap = ifp->int_auth;
231 for (i = 0; i < MAX_AUTH_KEYS; i++, ap++) {
232 /* stop looking after the last key */
233 if (ap->type == RIP_AUTH_NONE)
234 break;
236 /* ignore keys that are not ready yet */
237 if ((u_long)ap->start > (u_long)clk.tv_sec)
238 continue;
240 if ((u_long)ap->end < (u_long)clk.tv_sec) {
241 /* note best expired password as a fall-back */
242 if (res == NULL || (u_long)ap->end > (u_long)res->end)
243 res = ap;
244 continue;
247 /* note key with the best future */
248 if (res == NULL || (u_long)res->end < (u_long)ap->end)
249 res = ap;
251 return res;
255 void
256 clr_ws_buf(struct ws_buf *wb,
257 struct auth *ap)
259 struct netauth *na;
261 wb->lim = wb->base + NETS_LEN;
262 wb->n = wb->base;
263 memset(wb->n, 0, NETS_LEN*sizeof(*wb->n));
265 /* (start to) install authentication if appropriate
267 if (ap == NULL)
268 return;
270 na = (struct netauth*)wb->n;
271 if (ap->type == RIP_AUTH_PW) {
272 na->a_family = RIP_AF_AUTH;
273 na->a_type = RIP_AUTH_PW;
274 memcpy(na->au.au_pw, ap->key, sizeof(na->au.au_pw));
275 wb->n++;
277 } else if (ap->type == RIP_AUTH_MD5) {
278 na->a_family = RIP_AF_AUTH;
279 na->a_type = RIP_AUTH_MD5;
280 na->au.a_md5.md5_keyid = ap->keyid;
281 na->au.a_md5.md5_auth_len = RIP_AUTH_MD5_LEN;
282 na->au.a_md5.md5_seqno = htonl(clk.tv_sec);
283 wb->n++;
284 wb->lim--; /* make room for trailer */
289 void
290 end_md5_auth(struct ws_buf *wb,
291 struct auth *ap)
293 struct netauth *na, *na2;
294 MD5_CTX md5_ctx;
295 int len;
298 na = (struct netauth*)wb->base;
299 na2 = (struct netauth*)wb->n;
300 len = (char *)na2-(char *)wb->buf;
301 na2->a_family = RIP_AF_AUTH;
302 na2->a_type = htons(1);
303 na->au.a_md5.md5_pkt_len = htons(len);
304 MD5Init(&md5_ctx);
305 MD5Update(&md5_ctx, (u_char *)wb->buf, len);
306 MD5Update(&md5_ctx, ap->key, RIP_AUTH_MD5_LEN);
307 MD5Final(na2->au.au_pw, &md5_ctx);
308 wb->n++;
312 /* Send the buffer
314 static void
315 supply_write(struct ws_buf *wb)
317 /* Output multicast only if legal.
318 * If we would multicast and it would be illegal, then discard the
319 * packet.
321 switch (wb->type) {
322 case NO_OUT_MULTICAST:
323 trace_pkt("skip multicast to %s because impossible",
324 naddr_ntoa(ws.to.sin_addr.s_addr));
325 break;
326 case NO_OUT_RIPV2:
327 break;
328 default:
329 if (ws.a != NULL && ws.a->type == RIP_AUTH_MD5)
330 end_md5_auth(wb,ws.a);
331 if (output(wb->type, &ws.to, ws.ifp, wb->buf,
332 ((char *)wb->n - (char*)wb->buf)) < 0
333 && ws.ifp != NULL)
334 if_sick(ws.ifp);
335 ws.npackets++;
336 break;
339 clr_ws_buf(wb,ws.a);
343 /* put an entry into the packet
345 static void
346 supply_out(struct ag_info *ag)
348 int i;
349 naddr mask, v1_mask, dst_h, ddst_h = 0;
350 struct ws_buf *wb;
353 /* Skip this route if doing a flash update and it and the routes
354 * it aggregates have not changed recently.
356 if (ag->ag_seqno < update_seqno
357 && (ws.state & WS_ST_FLASH))
358 return;
360 dst_h = ag->ag_dst_h;
361 mask = ag->ag_mask;
362 v1_mask = ripv1_mask_host(htonl(dst_h),
363 (ws.state & WS_ST_TO_ON_NET) ? ws.ifp : 0);
364 i = 0;
366 /* If we are sending RIPv2 packets that cannot (or must not) be
367 * heard by RIPv1 listeners, do not worry about sub- or supernets.
368 * Subnets (from other networks) can only be sent via multicast.
369 * A pair of subnet routes might have been promoted so that they
370 * are legal to send by RIPv1.
371 * If RIPv1 is off, use the multicast buffer.
373 if ((ws.state & WS_ST_RIP2_ALL)
374 || ((ag->ag_state & AGS_RIPV2) && v1_mask != mask)) {
375 /* use the RIPv2-only buffer */
376 wb = &v2buf;
378 } else {
379 /* use the RIPv1-or-RIPv2 buffer */
380 wb = &v12buf;
382 /* Convert supernet route into corresponding set of network
383 * routes for RIPv1, but leave non-contiguous netmasks
384 * to ag_check().
386 if (v1_mask > mask
387 && mask + (mask & -mask) == 0) {
388 ddst_h = v1_mask & -v1_mask;
389 i = (v1_mask & ~mask)/ddst_h;
391 if (i > ws.gen_limit) {
392 /* Punt if we would have to generate an
393 * unreasonable number of routes.
395 if (TRACECONTENTS)
396 trace_misc("sending %s-->%s as 1"
397 " instead of %d routes",
398 addrname(htonl(dst_h), mask,
400 naddr_ntoa(ws.to.sin_addr
401 .s_addr),
402 i+1);
403 i = 0;
405 } else {
406 mask = v1_mask;
407 ws.gen_limit -= i;
412 do {
413 wb->n->n_family = RIP_AF_INET;
414 wb->n->n_dst = htonl(dst_h);
415 /* If the route is from router-discovery or we are
416 * shutting down, admit only a bad metric.
418 wb->n->n_metric = ((stopint || ag->ag_metric < 1)
419 ? HOPCNT_INFINITY
420 : ag->ag_metric);
421 wb->n->n_metric = htonl(wb->n->n_metric);
422 /* Any non-zero bits in the supposedly unused RIPv1 fields
423 * cause the old `routed` to ignore the route.
424 * That means the mask and so forth cannot be sent
425 * in the hybrid RIPv1/RIPv2 mode.
427 if (ws.state & WS_ST_RIP2_ALL) {
428 if (ag->ag_nhop != 0
429 && ((ws.state & WS_ST_QUERY)
430 || (ag->ag_nhop != ws.ifp->int_addr
431 && on_net(ag->ag_nhop,
432 ws.ifp->int_net,
433 ws.ifp->int_mask))))
434 wb->n->n_nhop = ag->ag_nhop;
435 wb->n->n_mask = htonl(mask);
436 wb->n->n_tag = ag->ag_tag;
438 dst_h += ddst_h;
440 if (++wb->n >= wb->lim)
441 supply_write(wb);
442 } while (i-- != 0);
446 /* supply one route from the table
448 /* ARGSUSED */
449 static int
450 walk_supply(struct radix_node *rn,
451 struct walkarg *argp UNUSED)
453 #define RT ((struct rt_entry *)rn)
454 u_short ags;
455 char metric, pref;
456 naddr dst, nhop;
457 struct rt_spare *rts;
458 int i;
461 /* Do not advertise external remote interfaces or passive interfaces.
463 if ((RT->rt_state & RS_IF)
464 && RT->rt_ifp != 0
465 && (RT->rt_ifp->int_state & IS_PASSIVE)
466 && !(RT->rt_state & RS_MHOME))
467 return 0;
469 /* If being quiet about our ability to forward, then
470 * do not say anything unless responding to a query,
471 * except about our main interface.
473 if (!supplier && !(ws.state & WS_ST_QUERY)
474 && !(RT->rt_state & RS_MHOME))
475 return 0;
477 dst = RT->rt_dst;
479 /* do not collide with the fake default route */
480 if (dst == RIP_DEFAULT
481 && (ws.state & WS_ST_DEFAULT))
482 return 0;
484 if (RT->rt_state & RS_NET_SYN) {
485 if (RT->rt_state & RS_NET_INT) {
486 /* Do not send manual synthetic network routes
487 * into the subnet.
489 if (on_net(ws.to.sin_addr.s_addr,
490 ntohl(dst), RT->rt_mask))
491 return 0;
493 } else {
494 /* Do not send automatic synthetic network routes
495 * if they are not needed because no RIPv1 listeners
496 * can hear them.
498 if (ws.state & WS_ST_RIP2_ALL)
499 return 0;
501 /* Do not send automatic synthetic network routes to
502 * the real subnet.
504 if (on_net(ws.to.sin_addr.s_addr,
505 ntohl(dst), RT->rt_mask))
506 return 0;
508 nhop = 0;
510 } else {
511 /* Advertise the next hop if this is not a route for one
512 * of our interfaces and the next hop is on the same
513 * network as the target.
514 * The final determination is made by supply_out().
516 if (!(RT->rt_state & RS_IF)
517 && RT->rt_gate != myaddr
518 && RT->rt_gate != loopaddr)
519 nhop = RT->rt_gate;
520 else
521 nhop = 0;
524 metric = RT->rt_metric;
525 ags = 0;
527 if (RT->rt_state & RS_MHOME) {
528 /* retain host route of multi-homed servers */
531 } else if (RT_ISHOST(RT)) {
532 /* We should always suppress (into existing network routes)
533 * the host routes for the local end of our point-to-point
534 * links.
535 * If we are suppressing host routes in general, then do so.
536 * Avoid advertising host routes onto their own network,
537 * where they should be handled by proxy-ARP.
539 if ((RT->rt_state & RS_LOCAL)
540 || ridhosts
541 || on_net(dst, ws.to_net, ws.to_mask))
542 ags |= AGS_SUPPRESS;
544 /* Aggregate stray host routes into network routes if allowed.
545 * We cannot aggregate host routes into small network routes
546 * without confusing RIPv1 listeners into thinking the
547 * network routes are host routes.
549 if ((ws.state & WS_ST_AG)
550 && !(ws.state & WS_ST_RIP2_ALL))
551 ags |= AGS_AGGREGATE;
553 } else {
554 /* Always suppress network routes into other, existing
555 * network routes
557 ags |= AGS_SUPPRESS;
559 /* Generate supernets if allowed.
560 * If we can be heard by RIPv1 systems, we will
561 * later convert back to ordinary nets.
562 * This unifies dealing with received supernets.
564 if ((ws.state & WS_ST_AG)
565 && ((RT->rt_state & RS_SUBNET)
566 || (ws.state & WS_ST_SUPER_AG)))
567 ags |= AGS_AGGREGATE;
570 /* Do not send RIPv1 advertisements of subnets to other
571 * networks. If possible, multicast them by RIPv2.
573 if ((RT->rt_state & RS_SUBNET)
574 && !(ws.state & WS_ST_RIP2_ALL)
575 && !on_net(dst, ws.to_std_net, ws.to_std_mask))
576 ags |= AGS_RIPV2 | AGS_AGGREGATE;
579 /* Do not send a route back to where it came from, except in
580 * response to a query. This is "split-horizon". That means not
581 * advertising back to the same network and so via the same interface.
583 * We want to suppress routes that might have been fragmented
584 * from this route by a RIPv1 router and sent back to us, and so we
585 * cannot forget this route here. Let the split-horizon route
586 * suppress the fragmented routes and then itself be forgotten.
588 * Include the routes for both ends of point-to-point interfaces
589 * among those suppressed by split-horizon, since the other side
590 * should knows them as well as we do.
592 * Notice spare routes with the same metric that we are about to
593 * advertise, to split the horizon on redundant, inactive paths.
595 if (ws.ifp != 0
596 && !(ws.state & WS_ST_QUERY)
597 && (ws.state & WS_ST_TO_ON_NET)
598 && (!(RT->rt_state & RS_IF)
599 || ws.ifp->int_if_flags & IFF_POINTOPOINT)) {
600 for (rts = RT->rt_spares, i = NUM_SPARES; i != 0; i--, rts++) {
601 if (rts->rts_metric > metric
602 || rts->rts_ifp != ws.ifp)
603 continue;
605 /* If we do not mark the route with AGS_SPLIT_HZ here,
606 * it will be poisoned-reverse, or advertised back
607 * toward its source with an infinite metric.
608 * If we have recently advertised the route with a
609 * better metric than we now have, then we should
610 * poison-reverse the route before suppressing it for
611 * split-horizon.
613 * In almost all cases, if there is no spare for the
614 * route then it is either old and dead or a brand
615 * new route. If it is brand new, there is no need
616 * for poison-reverse. If it is old and dead, it
617 * is already poisoned.
619 if (RT->rt_poison_time < now_expire
620 || RT->rt_poison_metric >= metric
621 || RT->rt_spares[1].rts_gate == 0) {
622 ags |= AGS_SPLIT_HZ;
623 ags &= ~AGS_SUPPRESS;
625 metric = HOPCNT_INFINITY;
626 break;
630 /* Keep track of the best metric with which the
631 * route has been advertised recently.
633 if (RT->rt_poison_metric >= metric
634 || RT->rt_poison_time < now_expire) {
635 RT->rt_poison_time = now.tv_sec;
636 RT->rt_poison_metric = metric;
639 /* Adjust the outgoing metric by the cost of the link.
640 * Avoid aggregation when a route is counting to infinity.
642 pref = RT->rt_poison_metric + ws.metric;
643 metric += ws.metric;
645 /* Do not advertise stable routes that will be ignored,
646 * unless we are answering a query.
647 * If the route recently was advertised with a metric that
648 * would have been less than infinity through this interface,
649 * we need to continue to advertise it in order to poison it.
651 if (metric >= HOPCNT_INFINITY) {
652 if (!(ws.state & WS_ST_QUERY)
653 && (pref >= HOPCNT_INFINITY
654 || RT->rt_poison_time < now_garbage))
655 return 0;
657 metric = HOPCNT_INFINITY;
660 ag_check(dst, RT->rt_mask, 0, nhop, metric, pref,
661 RT->rt_seqno, RT->rt_tag, ags, supply_out);
662 return 0;
663 #undef RT
667 /* Supply dst with the contents of the routing tables.
668 * If this won't fit in one packet, chop it up into several.
670 void
671 supply(struct sockaddr_in *dst,
672 struct interface *ifp, /* output interface */
673 enum output_type type,
674 int flash, /* 1=flash update */
675 int vers, /* RIP version */
676 int passwd_ok) /* OK to include cleartext password */
678 struct rt_entry *rt;
679 int def_metric;
682 ws.state = 0;
683 ws.gen_limit = 1024;
685 ws.to = *dst;
686 ws.to_std_mask = std_mask(ws.to.sin_addr.s_addr);
687 ws.to_std_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_std_mask;
689 if (ifp != NULL) {
690 ws.to_mask = ifp->int_mask;
691 ws.to_net = ifp->int_net;
692 if (on_net(ws.to.sin_addr.s_addr, ws.to_net, ws.to_mask))
693 ws.state |= WS_ST_TO_ON_NET;
695 } else {
696 ws.to_mask = ripv1_mask_net(ws.to.sin_addr.s_addr, 0);
697 ws.to_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_mask;
698 rt = rtfind(dst->sin_addr.s_addr);
699 if (rt)
700 ifp = rt->rt_ifp;
703 ws.npackets = 0;
704 if (flash)
705 ws.state |= WS_ST_FLASH;
707 if ((ws.ifp = ifp) == NULL) {
708 ws.metric = 1;
709 } else {
710 /* Adjust the advertised metric by the outgoing interface
711 * metric.
713 ws.metric = ifp->int_metric+1;
716 ripv12_buf.rip.rip_vers = vers;
718 switch (type) {
719 case OUT_MULTICAST:
720 if (ifp->int_if_flags & IFF_MULTICAST)
721 v2buf.type = OUT_MULTICAST;
722 else
723 v2buf.type = NO_OUT_MULTICAST;
724 v12buf.type = OUT_BROADCAST;
725 break;
727 case OUT_QUERY:
728 ws.state |= WS_ST_QUERY;
729 /* fall through */
730 case OUT_BROADCAST:
731 case OUT_UNICAST:
732 v2buf.type = (vers == RIPv2) ? type : NO_OUT_RIPV2;
733 v12buf.type = type;
734 break;
736 case NO_OUT_MULTICAST:
737 case NO_OUT_RIPV2:
738 break; /* no output */
741 if (vers == RIPv2) {
742 /* full RIPv2 only if cannot be heard by RIPv1 listeners */
743 if (type != OUT_BROADCAST)
744 ws.state |= WS_ST_RIP2_ALL;
745 if ((ws.state & WS_ST_QUERY)
746 || !(ws.state & WS_ST_TO_ON_NET)) {
747 ws.state |= (WS_ST_AG | WS_ST_SUPER_AG);
748 } else if (ifp == NULL || !(ifp->int_state & IS_NO_AG)) {
749 ws.state |= WS_ST_AG;
750 if (type != OUT_BROADCAST
751 && (ifp == NULL
752 || !(ifp->int_state & IS_NO_SUPER_AG)))
753 ws.state |= WS_ST_SUPER_AG;
757 ws.a = (vers == RIPv2) ? find_auth(ifp) : 0;
758 if (!passwd_ok && ws.a != NULL && ws.a->type == RIP_AUTH_PW)
759 ws.a = NULL;
760 clr_ws_buf(&v12buf,ws.a);
761 clr_ws_buf(&v2buf,ws.a);
763 /* Fake a default route if asked and if there is not already
764 * a better, real default route.
766 if (supplier && (def_metric = ifp->int_d_metric) != 0) {
767 if (NULL == (rt = rtget(RIP_DEFAULT, 0))
768 || rt->rt_metric+ws.metric >= def_metric) {
769 ws.state |= WS_ST_DEFAULT;
770 ag_check(0, 0, 0, 0, def_metric, def_metric,
771 0, 0, 0, supply_out);
772 } else {
773 def_metric = rt->rt_metric+ws.metric;
776 /* If both RIPv2 and the poor-man's router discovery
777 * kludge are on, arrange to advertise an extra
778 * default route via RIPv1.
780 if ((ws.state & WS_ST_RIP2_ALL)
781 && (ifp->int_state & IS_PM_RDISC)) {
782 ripv12_buf.rip.rip_vers = RIPv1;
783 v12buf.n->n_family = RIP_AF_INET;
784 v12buf.n->n_dst = htonl(RIP_DEFAULT);
785 v12buf.n->n_metric = htonl(def_metric);
786 v12buf.n++;
790 rn_walktree(rhead, walk_supply, 0);
791 ag_flush(0,0,supply_out);
793 /* Flush the packet buffers, provided they are not empty and
794 * do not contain only the password.
796 if (v12buf.n != v12buf.base
797 && (v12buf.n > v12buf.base+1
798 || v12buf.base->n_family != RIP_AF_AUTH))
799 supply_write(&v12buf);
800 if (v2buf.n != v2buf.base
801 && (v2buf.n > v2buf.base+1
802 || v2buf.base->n_family != RIP_AF_AUTH))
803 supply_write(&v2buf);
805 /* If we sent nothing and this is an answer to a query, send
806 * an empty buffer.
808 if (ws.npackets == 0
809 && (ws.state & WS_ST_QUERY))
810 supply_write(&v12buf);
814 /* send all of the routing table or just do a flash update
816 void
817 rip_bcast(int flash)
819 #ifdef _HAVE_SIN_LEN
820 static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}};
821 #else
822 static struct sockaddr_in dst = {AF_INET};
823 #endif
824 struct interface *ifp;
825 enum output_type type;
826 int vers;
827 struct timeval rtime;
830 need_flash = 0;
831 intvl_random(&rtime, MIN_WAITTIME, MAX_WAITTIME);
832 no_flash = rtime;
833 timevaladd(&no_flash, &now);
835 if (rip_sock < 0)
836 return;
838 trace_act("send %s and inhibit dynamic updates for %.3f sec",
839 flash ? "dynamic update" : "all routes",
840 rtime.tv_sec + ((float)rtime.tv_usec)/1000000.0);
842 for (ifp = ifnet; ifp != NULL; ifp = ifp->int_next) {
843 /* Skip interfaces not doing RIP.
844 * Do try broken interfaces to see if they have healed.
846 if (IS_RIP_OUT_OFF(ifp->int_state))
847 continue;
849 /* skip turned off interfaces */
850 if (!iff_up(ifp->int_if_flags))
851 continue;
853 vers = (ifp->int_state & IS_NO_RIPV1_OUT) ? RIPv2 : RIPv1;
855 if (ifp->int_if_flags & IFF_BROADCAST) {
856 /* ordinary, hardware interface */
857 dst.sin_addr.s_addr = ifp->int_brdaddr;
859 if (vers == RIPv2
860 && !(ifp->int_state & IS_NO_RIP_MCAST)) {
861 type = OUT_MULTICAST;
862 } else {
863 type = OUT_BROADCAST;
866 } else if (ifp->int_if_flags & IFF_POINTOPOINT) {
867 /* point-to-point hardware interface */
868 dst.sin_addr.s_addr = ifp->int_dstaddr;
869 type = OUT_UNICAST;
871 } else if (ifp->int_state & IS_REMOTE) {
872 /* remote interface */
873 dst.sin_addr.s_addr = ifp->int_addr;
874 type = OUT_UNICAST;
876 } else {
877 /* ATM, HIPPI, etc. */
878 continue;
881 supply(&dst, ifp, type, flash, vers, 1);
884 update_seqno++; /* all routes are up to date */
888 /* Ask for routes
889 * Do it only once to an interface, and not even after the interface
890 * was broken and recovered.
892 void
893 rip_query(void)
895 #ifdef _HAVE_SIN_LEN
896 static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}};
897 #else
898 static struct sockaddr_in dst = {AF_INET};
899 #endif
900 struct interface *ifp;
901 struct rip buf;
902 enum output_type type;
905 if (rip_sock < 0)
906 return;
908 memset(&buf, 0, sizeof(buf));
910 for (ifp = ifnet; ifp; ifp = ifp->int_next) {
911 /* Skip interfaces those already queried.
912 * Do not ask via interfaces through which we don't
913 * accept input. Do not ask via interfaces that cannot
914 * send RIP packets.
915 * Do try broken interfaces to see if they have healed.
917 if (IS_RIP_IN_OFF(ifp->int_state)
918 || ifp->int_query_time != NEVER)
919 continue;
921 /* skip turned off interfaces */
922 if (!iff_up(ifp->int_if_flags))
923 continue;
925 buf.rip_vers = (ifp->int_state&IS_NO_RIPV1_OUT) ? RIPv2:RIPv1;
926 buf.rip_cmd = RIPCMD_REQUEST;
927 buf.rip_nets[0].n_family = RIP_AF_UNSPEC;
928 buf.rip_nets[0].n_metric = htonl(HOPCNT_INFINITY);
930 /* Send a RIPv1 query only if allowed and if we will
931 * listen to RIPv1 routers.
933 if ((ifp->int_state & IS_NO_RIPV1_OUT)
934 || (ifp->int_state & IS_NO_RIPV1_IN)) {
935 buf.rip_vers = RIPv2;
936 } else {
937 buf.rip_vers = RIPv1;
940 if (ifp->int_if_flags & IFF_BROADCAST) {
941 /* ordinary, hardware interface */
942 dst.sin_addr.s_addr = ifp->int_brdaddr;
944 /* Broadcast RIPv1 queries and RIPv2 queries
945 * when the hardware cannot multicast.
947 if (buf.rip_vers == RIPv2
948 && (ifp->int_if_flags & IFF_MULTICAST)
949 && !(ifp->int_state & IS_NO_RIP_MCAST)) {
950 type = OUT_MULTICAST;
951 } else {
952 type = OUT_BROADCAST;
955 } else if (ifp->int_if_flags & IFF_POINTOPOINT) {
956 /* point-to-point hardware interface */
957 dst.sin_addr.s_addr = ifp->int_dstaddr;
958 type = OUT_UNICAST;
960 } else if (ifp->int_state & IS_REMOTE) {
961 /* remote interface */
962 dst.sin_addr.s_addr = ifp->int_addr;
963 type = OUT_UNICAST;
965 } else {
966 /* ATM, HIPPI, etc. */
967 continue;
970 ifp->int_query_time = now.tv_sec+SUPPLY_INTERVAL;
971 if (output(type, &dst, ifp, &buf, sizeof(buf)) < 0)
972 if_sick(ifp);