snj doesn't like my accent, so use proper English month names.
[netbsd-mini2440.git] / sbin / routed / output.c
blob216d35a09fb5e4e2e81ce61ee62afd4ba586946a
1 /* $NetBSD: output.c,v 1.23 2006/03/18 20:21:50 christos Exp $ */
3 /*
4 * Copyright (c) 1983, 1988, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgment:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
36 #include "defs.h"
38 #ifdef __NetBSD__
39 __RCSID("$NetBSD: output.c,v 1.23 2006/03/18 20:21:50 christos Exp $");
40 #elif defined(__FreeBSD__)
41 __RCSID("$FreeBSD$");
42 #else
43 __RCSID("Revision: 2.27 ");
44 #ident "Revision: 2.27 "
45 #endif
48 u_int update_seqno;
51 /* walk the tree of routes with this for output
53 struct {
54 struct sockaddr_in to;
55 naddr to_mask;
56 naddr to_net;
57 naddr to_std_mask;
58 naddr to_std_net;
59 struct interface *ifp; /* usually output interface */
60 struct auth *a;
61 char metric; /* adjust metrics by interface */
62 int npackets;
63 int gen_limit;
64 u_int state;
65 #define WS_ST_FLASH 0x001 /* send only changed routes */
66 #define WS_ST_RIP2_ALL 0x002 /* send full featured RIPv2 */
67 #define WS_ST_AG 0x004 /* ok to aggregate subnets */
68 #define WS_ST_SUPER_AG 0x008 /* ok to aggregate networks */
69 #define WS_ST_QUERY 0x010 /* responding to a query */
70 #define WS_ST_TO_ON_NET 0x020 /* sending onto one of our nets */
71 #define WS_ST_DEFAULT 0x040 /* faking a default */
72 } ws;
74 /* A buffer for what can be heard by both RIPv1 and RIPv2 listeners */
75 struct ws_buf v12buf;
76 union pkt_buf ripv12_buf;
78 /* Another for only RIPv2 listeners */
79 struct ws_buf v2buf;
80 union pkt_buf rip_v2_buf;
84 void
85 bufinit(void)
87 ripv12_buf.rip.rip_cmd = RIPCMD_RESPONSE;
88 v12buf.buf = &ripv12_buf.rip;
89 v12buf.base = &v12buf.buf->rip_nets[0];
91 rip_v2_buf.rip.rip_cmd = RIPCMD_RESPONSE;
92 rip_v2_buf.rip.rip_vers = RIPv2;
93 v2buf.buf = &rip_v2_buf.rip;
94 v2buf.base = &v2buf.buf->rip_nets[0];
98 /* Send the contents of the global buffer via the non-multicast socket
100 int /* <0 on failure */
101 output(enum output_type type,
102 struct sockaddr_in *dst, /* send to here */
103 struct interface *ifp,
104 struct rip *buf,
105 int size) /* this many bytes */
107 struct sockaddr_in osin;
108 int flags;
109 const char *msg;
110 int res;
111 naddr tgt_mcast;
112 int soc;
113 int serrno;
115 osin = *dst;
116 if (osin.sin_port == 0)
117 osin.sin_port = htons(RIP_PORT);
118 #ifdef _HAVE_SIN_LEN
119 if (osin.sin_len == 0)
120 osin.sin_len = sizeof(osin);
121 #endif
123 soc = rip_sock;
124 flags = 0;
126 switch (type) {
127 case OUT_QUERY:
128 msg = "Answer Query";
129 if (soc < 0)
130 soc = ifp->int_rip_sock;
131 break;
132 case OUT_UNICAST:
133 msg = "Send";
134 if (soc < 0)
135 soc = ifp->int_rip_sock;
136 flags = MSG_DONTROUTE;
137 break;
138 case OUT_BROADCAST:
139 if (ifp->int_if_flags & IFF_POINTOPOINT) {
140 msg = "Send";
141 } else {
142 msg = "Send bcast";
144 flags = MSG_DONTROUTE;
145 break;
146 case OUT_MULTICAST:
147 if (ifp->int_if_flags & IFF_POINTOPOINT) {
148 msg = "Send pt-to-pt";
149 } else if (ifp->int_state & IS_DUP) {
150 trace_act("abort multicast output via %s"
151 " with duplicate address",
152 ifp->int_name);
153 return 0;
154 } else {
155 msg = "Send mcast";
156 if (rip_sock_mcast != ifp) {
157 #ifdef MCAST_IFINDEX
158 /* specify ifindex */
159 tgt_mcast = htonl(ifp->int_index);
160 #else
161 #ifdef MCAST_PPP_BUG
162 /* Do not specify the primary interface
163 * explicitly if we have the multicast
164 * point-to-point kernel bug, since the
165 * kernel will do the wrong thing if the
166 * local address of a point-to-point link
167 * is the same as the address of an ordinary
168 * interface.
170 if (ifp->int_addr == myaddr) {
171 tgt_mcast = 0;
172 } else
173 #endif
174 tgt_mcast = ifp->int_addr;
175 #endif
176 if (0 > setsockopt(rip_sock,
177 IPPROTO_IP, IP_MULTICAST_IF,
178 &tgt_mcast,
179 sizeof(tgt_mcast))) {
180 serrno = errno;
181 LOGERR("setsockopt(rip_sock,"
182 "IP_MULTICAST_IF)");
183 errno = serrno;
184 ifp = 0;
185 return -1;
187 rip_sock_mcast = ifp;
189 osin.sin_addr.s_addr = htonl(INADDR_RIP_GROUP);
191 break;
193 case NO_OUT_MULTICAST:
194 case NO_OUT_RIPV2:
195 default:
196 #ifdef DEBUG
197 abort();
198 #endif
199 return -1;
202 trace_rip(msg, "to", &osin, ifp, buf, size);
204 res = sendto(soc, buf, size, flags,
205 (struct sockaddr *)&osin, sizeof(osin));
206 if (res < 0
207 && (ifp == 0 || !(ifp->int_state & IS_BROKE))) {
208 serrno = errno;
209 msglog("%s sendto(%s%s%s.%d): %s", msg,
210 ifp != 0 ? ifp->int_name : "",
211 ifp != 0 ? ", " : "",
212 inet_ntoa(osin.sin_addr),
213 ntohs(osin.sin_port),
214 strerror(errno));
215 errno = serrno;
218 return res;
222 /* Find the first key for a packet to send.
223 * Try for a key that is eligible and has not expired, but settle for
224 * the last key if they have all expired.
225 * If no key is ready yet, give up.
227 struct auth *
228 find_auth(struct interface *ifp)
230 struct auth *ap, *res;
231 int i;
234 if (ifp == 0)
235 return 0;
237 res = 0;
238 ap = ifp->int_auth;
239 for (i = 0; i < MAX_AUTH_KEYS; i++, ap++) {
240 /* stop looking after the last key */
241 if (ap->type == RIP_AUTH_NONE)
242 break;
244 /* ignore keys that are not ready yet */
245 if ((u_long)ap->start > (u_long)clk.tv_sec)
246 continue;
248 if ((u_long)ap->end < (u_long)clk.tv_sec) {
249 /* note best expired password as a fall-back */
250 if (res == 0 || (u_long)ap->end > (u_long)res->end)
251 res = ap;
252 continue;
255 /* note key with the best future */
256 if (res == 0 || (u_long)res->end < (u_long)ap->end)
257 res = ap;
259 return res;
263 void
264 clr_ws_buf(struct ws_buf *wb,
265 struct auth *ap)
267 struct netauth *na;
269 wb->lim = wb->base + NETS_LEN;
270 wb->n = wb->base;
271 memset(wb->n, 0, NETS_LEN*sizeof(*wb->n));
273 /* (start to) install authentication if appropriate
275 if (ap == 0)
276 return;
278 na = (struct netauth*)wb->n;
279 if (ap->type == RIP_AUTH_PW) {
280 na->a_family = RIP_AF_AUTH;
281 na->a_type = RIP_AUTH_PW;
282 memcpy(na->au.au_pw, ap->key, sizeof(na->au.au_pw));
283 wb->n++;
285 } else if (ap->type == RIP_AUTH_MD5) {
286 na->a_family = RIP_AF_AUTH;
287 na->a_type = RIP_AUTH_MD5;
288 na->au.a_md5.md5_keyid = ap->keyid;
289 na->au.a_md5.md5_auth_len = RIP_AUTH_MD5_KEY_LEN;
290 na->au.a_md5.md5_seqno = htonl(clk.tv_sec);
291 wb->n++;
292 wb->lim--; /* make room for trailer */
297 void
298 end_md5_auth(struct ws_buf *wb,
299 struct auth *ap)
301 struct netauth *na, *na2;
302 MD5_CTX md5_ctx;
303 int len;
306 na = (struct netauth*)wb->base;
307 na2 = (struct netauth*)wb->n;
308 len = (char *)na2-(char *)wb->buf;
309 na2->a_family = RIP_AF_AUTH;
310 na2->a_type = htons(1);
311 na->au.a_md5.md5_pkt_len = htons(len);
312 MD5Init(&md5_ctx);
313 MD5Update(&md5_ctx, (u_char *)wb->buf, len + RIP_AUTH_MD5_HASH_XTRA);
314 MD5Update(&md5_ctx, ap->key, RIP_AUTH_MD5_KEY_LEN);
315 MD5Final(na2->au.au_pw, &md5_ctx);
316 wb->n++;
320 /* Send the buffer
322 static void
323 supply_write(struct ws_buf *wb)
325 /* Output multicast only if legal.
326 * If we would multicast and it would be illegal, then discard the
327 * packet.
329 switch (wb->type) {
330 case NO_OUT_MULTICAST:
331 trace_pkt("skip multicast to %s because impossible",
332 naddr_ntoa(ws.to.sin_addr.s_addr));
333 break;
334 case NO_OUT_RIPV2:
335 break;
336 default:
337 if (ws.a != 0 && ws.a->type == RIP_AUTH_MD5)
338 end_md5_auth(wb,ws.a);
339 if (output(wb->type, &ws.to, ws.ifp, wb->buf,
340 ((char *)wb->n - (char*)wb->buf)) < 0
341 && ws.ifp != 0)
342 if_sick(ws.ifp);
343 ws.npackets++;
344 break;
347 clr_ws_buf(wb,ws.a);
351 /* put an entry into the packet
353 static void
354 supply_out(struct ag_info *ag)
356 int i;
357 naddr mask, v1_mask, dst_h, ddst_h = 0;
358 struct ws_buf *wb;
361 /* Skip this route if doing a flash update and it and the routes
362 * it aggregates have not changed recently.
364 if (ag->ag_seqno < update_seqno
365 && (ws.state & WS_ST_FLASH))
366 return;
368 dst_h = ag->ag_dst_h;
369 mask = ag->ag_mask;
370 v1_mask = ripv1_mask_host(htonl(dst_h),
371 (ws.state & WS_ST_TO_ON_NET) ? ws.ifp : 0);
372 i = 0;
374 /* If we are sending RIPv2 packets that cannot (or must not) be
375 * heard by RIPv1 listeners, do not worry about sub- or supernets.
376 * Subnets (from other networks) can only be sent via multicast.
377 * A pair of subnet routes might have been promoted so that they
378 * are legal to send by RIPv1.
379 * If RIPv1 is off, use the multicast buffer.
381 if ((ws.state & WS_ST_RIP2_ALL)
382 || ((ag->ag_state & AGS_RIPV2) && v1_mask != mask)) {
383 /* use the RIPv2-only buffer */
384 wb = &v2buf;
386 } else {
387 /* use the RIPv1-or-RIPv2 buffer */
388 wb = &v12buf;
390 /* Convert supernet route into corresponding set of network
391 * routes for RIPv1, but leave non-contiguous netmasks
392 * to ag_check().
394 if (v1_mask > mask
395 && mask + (mask & -mask) == 0) {
396 ddst_h = v1_mask & -v1_mask;
397 i = (v1_mask & ~mask)/ddst_h;
399 if (i > ws.gen_limit) {
400 /* Punt if we would have to generate an
401 * unreasonable number of routes.
403 if (TRACECONTENTS)
404 trace_misc("sending %s-->%s as 1"
405 " instead of %d routes",
406 addrname(htonl(dst_h), mask,
408 naddr_ntoa(ws.to.sin_addr
409 .s_addr),
410 i+1);
411 i = 0;
413 } else {
414 mask = v1_mask;
415 ws.gen_limit -= i;
420 do {
421 wb->n->n_family = RIP_AF_INET;
422 wb->n->n_dst = htonl(dst_h);
423 /* If the route is from router-discovery or we are
424 * shutting down, admit only a bad metric.
426 wb->n->n_metric = ((stopint || ag->ag_metric < 1)
427 ? HOPCNT_INFINITY
428 : ag->ag_metric);
429 HTONL(wb->n->n_metric);
430 /* Any non-zero bits in the supposedly unused RIPv1 fields
431 * cause the old `routed` to ignore the route.
432 * That means the mask and so forth cannot be sent
433 * in the hybrid RIPv1/RIPv2 mode.
435 if (ws.state & WS_ST_RIP2_ALL) {
436 if (ag->ag_nhop != 0
437 && ((ws.state & WS_ST_QUERY)
438 || (ag->ag_nhop != ws.ifp->int_addr
439 && on_net(ag->ag_nhop,
440 ws.ifp->int_net,
441 ws.ifp->int_mask))))
442 wb->n->n_nhop = ag->ag_nhop;
443 wb->n->n_mask = htonl(mask);
444 wb->n->n_tag = ag->ag_tag;
446 dst_h += ddst_h;
448 if (++wb->n >= wb->lim)
449 supply_write(wb);
450 } while (i-- != 0);
454 /* supply one route from the table
456 /* ARGSUSED */
457 static int
458 walk_supply(struct radix_node *rn,
459 struct walkarg *argp UNUSED)
461 #define RT ((struct rt_entry *)rn)
462 u_short ags;
463 char metric, pref;
464 naddr dst, nhop;
465 struct rt_spare *rts;
466 int i;
469 /* Do not advertise external remote interfaces or passive interfaces.
471 if ((RT->rt_state & RS_IF)
472 && RT->rt_ifp != 0
473 && (RT->rt_ifp->int_state & IS_PASSIVE)
474 && !(RT->rt_state & RS_MHOME))
475 return 0;
477 /* If being quiet about our ability to forward, then
478 * do not say anything unless responding to a query,
479 * except about our main interface.
481 if (!supplier && !(ws.state & WS_ST_QUERY)
482 && !(RT->rt_state & RS_MHOME))
483 return 0;
485 dst = RT->rt_dst;
487 /* do not collide with the fake default route */
488 if (dst == RIP_DEFAULT
489 && (ws.state & WS_ST_DEFAULT))
490 return 0;
492 if (RT->rt_state & RS_NET_SYN) {
493 if (RT->rt_state & RS_NET_INT) {
494 /* Do not send manual synthetic network routes
495 * into the subnet.
497 if (on_net(ws.to.sin_addr.s_addr,
498 ntohl(dst), RT->rt_mask))
499 return 0;
501 } else {
502 /* Do not send automatic synthetic network routes
503 * if they are not needed because no RIPv1 listeners
504 * can hear them.
506 if (ws.state & WS_ST_RIP2_ALL)
507 return 0;
509 /* Do not send automatic synthetic network routes to
510 * the real subnet.
512 if (on_net(ws.to.sin_addr.s_addr,
513 ntohl(dst), RT->rt_mask))
514 return 0;
516 nhop = 0;
518 } else {
519 /* Advertise the next hop if this is not a route for one
520 * of our interfaces and the next hop is on the same
521 * network as the target.
522 * The final determination is made by supply_out().
524 if (!(RT->rt_state & RS_IF)
525 && RT->rt_gate != myaddr
526 && RT->rt_gate != loopaddr)
527 nhop = RT->rt_gate;
528 else
529 nhop = 0;
532 metric = RT->rt_metric;
533 ags = 0;
535 if (RT->rt_state & RS_MHOME) {
536 /* retain host route of multi-homed servers */
539 } else if (RT_ISHOST(RT)) {
540 /* We should always suppress (into existing network routes)
541 * the host routes for the local end of our point-to-point
542 * links.
543 * If we are suppressing host routes in general, then do so.
544 * Avoid advertising host routes onto their own network,
545 * where they should be handled by proxy-ARP.
547 if ((RT->rt_state & RS_LOCAL)
548 || ridhosts
549 || on_net(dst, ws.to_net, ws.to_mask))
550 ags |= AGS_SUPPRESS;
552 /* Aggregate stray host routes into network routes if allowed.
553 * We cannot aggregate host routes into small network routes
554 * without confusing RIPv1 listeners into thinking the
555 * network routes are host routes.
557 if ((ws.state & WS_ST_AG) && (ws.state & WS_ST_RIP2_ALL))
558 ags |= AGS_AGGREGATE;
560 } else {
561 /* Always suppress network routes into other, existing
562 * network routes
564 ags |= AGS_SUPPRESS;
566 /* Generate supernets if allowed.
567 * If we can be heard by RIPv1 systems, we will
568 * later convert back to ordinary nets.
569 * This unifies dealing with received supernets.
571 if ((ws.state & WS_ST_AG)
572 && ((RT->rt_state & RS_SUBNET)
573 || (ws.state & WS_ST_SUPER_AG)))
574 ags |= AGS_AGGREGATE;
577 /* Do not send RIPv1 advertisements of subnets to other
578 * networks. If possible, multicast them by RIPv2.
580 if ((RT->rt_state & RS_SUBNET)
581 && !(ws.state & WS_ST_RIP2_ALL)
582 && !on_net(dst, ws.to_std_net, ws.to_std_mask))
583 ags |= AGS_RIPV2 | AGS_AGGREGATE;
586 /* Do not send a route back to where it came from, except in
587 * response to a query. This is "split-horizon". That means not
588 * advertising back to the same network and so via the same interface.
590 * We want to suppress routes that might have been fragmented
591 * from this route by a RIPv1 router and sent back to us, and so we
592 * cannot forget this route here. Let the split-horizon route
593 * suppress the fragmented routes and then itself be forgotten.
595 * Include the routes for both ends of point-to-point interfaces
596 * among those suppressed by split-horizon, since the other side
597 * should knows them as well as we do.
599 * Notice spare routes with the same metric that we are about to
600 * advertise, to split the horizon on redundant, inactive paths.
602 * Do not suppress advertisements of interface-related addresses on
603 * non-point-to-point interfaces. This ensures that we have something
604 * to say every 30 seconds to help detect broken Ethernets or
605 * other interfaces where one packet every 30 seconds costs nothing.
607 if (ws.ifp != 0
608 && !(ws.state & WS_ST_QUERY)
609 && (ws.state & WS_ST_TO_ON_NET)
610 && (!(RT->rt_state & RS_IF)
611 || ws.ifp->int_if_flags & IFF_POINTOPOINT)) {
612 for (rts = RT->rt_spares, i = NUM_SPARES; i != 0; i--, rts++) {
613 if (rts->rts_metric > metric
614 || rts->rts_ifp != ws.ifp)
615 continue;
617 /* If we do not mark the route with AGS_SPLIT_HZ here,
618 * it will be poisoned-reverse, or advertised back
619 * toward its source with an infinite metric.
620 * If we have recently advertised the route with a
621 * better metric than we now have, then we should
622 * poison-reverse the route before suppressing it for
623 * split-horizon.
625 * In almost all cases, if there is no spare for the
626 * route then it is either old and dead or a brand
627 * new route. If it is brand new, there is no need
628 * for poison-reverse. If it is old and dead, it
629 * is already poisoned.
631 if (RT->rt_poison_time < now_expire
632 || RT->rt_poison_metric >= metric
633 || RT->rt_spares[1].rts_gate == 0) {
634 ags |= AGS_SPLIT_HZ;
635 ags &= ~AGS_SUPPRESS;
637 metric = HOPCNT_INFINITY;
638 break;
642 /* Keep track of the best metric with which the
643 * route has been advertised recently.
645 if (RT->rt_poison_metric >= metric
646 || RT->rt_poison_time < now_expire) {
647 RT->rt_poison_time = now.tv_sec;
648 RT->rt_poison_metric = metric;
651 /* Adjust the outgoing metric by the cost of the link.
652 * Avoid aggregation when a route is counting to infinity.
654 pref = RT->rt_poison_metric + ws.metric;
655 metric += ws.metric;
657 /* Do not advertise stable routes that will be ignored,
658 * unless we are answering a query.
659 * If the route recently was advertised with a metric that
660 * would have been less than infinity through this interface,
661 * we need to continue to advertise it in order to poison it.
663 if (metric >= HOPCNT_INFINITY) {
664 if (!(ws.state & WS_ST_QUERY)
665 && (pref >= HOPCNT_INFINITY
666 || RT->rt_poison_time < now_garbage))
667 return 0;
669 metric = HOPCNT_INFINITY;
672 ag_check(dst, RT->rt_mask, 0, nhop, metric, pref,
673 RT->rt_seqno, RT->rt_tag, ags, supply_out);
674 return 0;
675 #undef RT
679 /* Supply dst with the contents of the routing tables.
680 * If this won't fit in one packet, chop it up into several.
682 void
683 supply(struct sockaddr_in *dst,
684 struct interface *ifp, /* output interface */
685 enum output_type type,
686 int flash, /* 1=flash update */
687 int vers, /* RIP version */
688 int passwd_ok) /* OK to include cleartext password */
690 struct rt_entry *rt;
691 int def_metric;
694 ws.state = 0;
695 ws.gen_limit = 1024;
697 ws.to = *dst;
698 ws.to_std_mask = std_mask(ws.to.sin_addr.s_addr);
699 ws.to_std_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_std_mask;
701 if (ifp != 0) {
702 ws.to_mask = ifp->int_mask;
703 ws.to_net = ifp->int_net;
704 if (on_net(ws.to.sin_addr.s_addr, ws.to_net, ws.to_mask))
705 ws.state |= WS_ST_TO_ON_NET;
707 } else {
708 ws.to_mask = ripv1_mask_net(ws.to.sin_addr.s_addr, 0);
709 ws.to_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_mask;
710 rt = rtfind(dst->sin_addr.s_addr);
711 if (rt)
712 ifp = rt->rt_ifp;
715 ws.npackets = 0;
716 if (flash)
717 ws.state |= WS_ST_FLASH;
719 if ((ws.ifp = ifp) == 0) {
720 ws.metric = 1;
721 } else {
722 /* Adjust the advertised metric by the outgoing interface
723 * metric.
725 ws.metric = ifp->int_metric + 1 + ifp->int_adj_outmetric;
728 ripv12_buf.rip.rip_vers = vers;
730 switch (type) {
731 case OUT_MULTICAST:
732 if (ifp != NULL && ifp->int_if_flags & IFF_MULTICAST)
733 v2buf.type = OUT_MULTICAST;
734 else
735 v2buf.type = NO_OUT_MULTICAST;
736 v12buf.type = OUT_BROADCAST;
737 break;
739 case OUT_QUERY:
740 ws.state |= WS_ST_QUERY;
741 /* fall through */
742 case OUT_BROADCAST:
743 case OUT_UNICAST:
744 v2buf.type = (vers == RIPv2) ? type : NO_OUT_RIPV2;
745 v12buf.type = type;
746 break;
748 case NO_OUT_MULTICAST:
749 case NO_OUT_RIPV2:
750 break; /* no output */
753 if (vers == RIPv2) {
754 /* full RIPv2 only if cannot be heard by RIPv1 listeners */
755 if (type != OUT_BROADCAST)
756 ws.state |= WS_ST_RIP2_ALL;
757 if ((ws.state & WS_ST_QUERY)
758 || !(ws.state & WS_ST_TO_ON_NET)) {
759 ws.state |= (WS_ST_AG | WS_ST_SUPER_AG);
760 } else if (ifp == 0 || !(ifp->int_state & IS_NO_AG)) {
761 ws.state |= WS_ST_AG;
762 if (type != OUT_BROADCAST
763 && (ifp == 0
764 || !(ifp->int_state & IS_NO_SUPER_AG)))
765 ws.state |= WS_ST_SUPER_AG;
769 ws.a = (vers == RIPv2) ? find_auth(ifp) : 0;
770 if (!passwd_ok && ws.a != 0 && ws.a->type == RIP_AUTH_PW)
771 ws.a = 0;
772 clr_ws_buf(&v12buf,ws.a);
773 clr_ws_buf(&v2buf,ws.a);
775 /* Fake a default route if asked and if there is not already
776 * a better, real default route.
778 if (supplier && ifp && (def_metric = ifp->int_d_metric) != 0) {
779 if (0 == (rt = rtget(RIP_DEFAULT, 0))
780 || rt->rt_metric+ws.metric >= def_metric) {
781 ws.state |= WS_ST_DEFAULT;
782 ag_check(0, 0, 0, 0, def_metric, def_metric,
783 0, 0, 0, supply_out);
784 } else {
785 def_metric = rt->rt_metric+ws.metric;
788 /* If both RIPv2 and the poor-man's router discovery
789 * kludge are on, arrange to advertise an extra
790 * default route via RIPv1.
792 if ((ws.state & WS_ST_RIP2_ALL)
793 && (ifp->int_state & IS_PM_RDISC)) {
794 ripv12_buf.rip.rip_vers = RIPv1;
795 v12buf.n->n_family = RIP_AF_INET;
796 v12buf.n->n_dst = htonl(RIP_DEFAULT);
797 v12buf.n->n_metric = htonl(def_metric);
798 v12buf.n++;
802 (void)rn_walktree(rhead, walk_supply, 0);
803 ag_flush(0,0,supply_out);
805 /* Flush the packet buffers, provided they are not empty and
806 * do not contain only the password.
808 if (v12buf.n != v12buf.base
809 && (v12buf.n > v12buf.base+1
810 || v12buf.base->n_family != RIP_AF_AUTH))
811 supply_write(&v12buf);
812 if (v2buf.n != v2buf.base
813 && (v2buf.n > v2buf.base+1
814 || v2buf.base->n_family != RIP_AF_AUTH))
815 supply_write(&v2buf);
817 /* If we sent nothing and this is an answer to a query, send
818 * an empty buffer.
820 if (ws.npackets == 0
821 && (ws.state & WS_ST_QUERY))
822 supply_write(&v12buf);
826 /* send all of the routing table or just do a flash update
828 void
829 rip_bcast(int flash)
831 #ifdef _HAVE_SIN_LEN
832 static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}};
833 #else
834 static struct sockaddr_in dst = {AF_INET};
835 #endif
836 struct interface *ifp;
837 enum output_type type;
838 int vers;
839 struct timeval rtime;
842 need_flash = 0;
843 intvl_random(&rtime, MIN_WAITTIME, MAX_WAITTIME);
844 no_flash = rtime;
845 timevaladd(&no_flash, &now);
847 if (rip_sock < 0)
848 return;
850 trace_act("send %s and inhibit dynamic updates for %.3f sec",
851 flash ? "dynamic update" : "all routes",
852 rtime.tv_sec + ((float)rtime.tv_usec)/1000000.0);
854 for (ifp = ifnet; ifp != 0; ifp = ifp->int_next) {
855 /* Skip interfaces not doing RIP.
856 * Do try broken interfaces to see if they have healed.
858 if (IS_RIP_OUT_OFF(ifp->int_state))
859 continue;
861 /* skip turned off interfaces */
862 if (!iff_up(ifp->int_if_flags))
863 continue;
865 vers = (ifp->int_state & IS_NO_RIPV1_OUT) ? RIPv2 : RIPv1;
867 if (ifp->int_if_flags & IFF_BROADCAST) {
868 /* ordinary, hardware interface */
869 dst.sin_addr.s_addr = ifp->int_brdaddr;
871 if (vers == RIPv2
872 && !(ifp->int_state & IS_NO_RIP_MCAST)) {
873 type = OUT_MULTICAST;
874 } else {
875 type = OUT_BROADCAST;
878 } else if (ifp->int_if_flags & IFF_POINTOPOINT) {
879 /* point-to-point hardware interface */
880 dst.sin_addr.s_addr = ifp->int_dstaddr;
881 type = OUT_UNICAST;
883 } else if (ifp->int_state & IS_REMOTE) {
884 /* remote interface */
885 dst.sin_addr.s_addr = ifp->int_addr;
886 type = OUT_UNICAST;
888 } else {
889 /* ATM, HIPPI, etc. */
890 continue;
893 supply(&dst, ifp, type, flash, vers, 1);
896 update_seqno++; /* all routes are up to date */
900 /* Ask for routes
901 * Do it only once to an interface, and not even after the interface
902 * was broken and recovered.
904 void
905 rip_query(void)
907 #ifdef _HAVE_SIN_LEN
908 static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}};
909 #else
910 static struct sockaddr_in dst = {AF_INET};
911 #endif
912 struct interface *ifp;
913 struct rip buf;
914 enum output_type type;
917 if (rip_sock < 0)
918 return;
920 memset(&buf, 0, sizeof(buf));
922 for (ifp = ifnet; ifp; ifp = ifp->int_next) {
923 /* Skip interfaces those already queried.
924 * Do not ask via interfaces through which we don't
925 * accept input. Do not ask via interfaces that cannot
926 * send RIP packets.
927 * Do try broken interfaces to see if they have healed.
929 if (IS_RIP_IN_OFF(ifp->int_state)
930 || ifp->int_query_time != NEVER)
931 continue;
933 /* skip turned off interfaces */
934 if (!iff_up(ifp->int_if_flags))
935 continue;
937 buf.rip_vers = (ifp->int_state&IS_NO_RIPV1_OUT) ? RIPv2:RIPv1;
938 buf.rip_cmd = RIPCMD_REQUEST;
939 buf.rip_nets[0].n_family = RIP_AF_UNSPEC;
940 buf.rip_nets[0].n_metric = htonl(HOPCNT_INFINITY);
942 /* Send a RIPv1 query only if allowed and if we will
943 * listen to RIPv1 routers.
945 if ((ifp->int_state & IS_NO_RIPV1_OUT)
946 || (ifp->int_state & IS_NO_RIPV1_IN)) {
947 buf.rip_vers = RIPv2;
948 } else {
949 buf.rip_vers = RIPv1;
952 if (ifp->int_if_flags & IFF_BROADCAST) {
953 /* ordinary, hardware interface */
954 dst.sin_addr.s_addr = ifp->int_brdaddr;
956 /* Broadcast RIPv1 queries and RIPv2 queries
957 * when the hardware cannot multicast.
959 if (buf.rip_vers == RIPv2
960 && (ifp->int_if_flags & IFF_MULTICAST)
961 && !(ifp->int_state & IS_NO_RIP_MCAST)) {
962 type = OUT_MULTICAST;
963 } else {
964 type = OUT_BROADCAST;
967 } else if (ifp->int_if_flags & IFF_POINTOPOINT) {
968 /* point-to-point hardware interface */
969 dst.sin_addr.s_addr = ifp->int_dstaddr;
970 type = OUT_UNICAST;
972 } else if (ifp->int_state & IS_REMOTE) {
973 /* remote interface */
974 dst.sin_addr.s_addr = ifp->int_addr;
975 type = OUT_UNICAST;
977 } else {
978 /* ATM, HIPPI, etc. */
979 continue;
982 ifp->int_query_time = now.tv_sec+SUPPLY_INTERVAL;
983 if (output(type, &dst, ifp, &buf, sizeof(buf)) < 0)
984 if_sick(ifp);