atkbdc - Do not attach PS2 controller via legacy ISA bus, if FADT says so.
[dragonfly.git] / sys / net / route.c
blobce9a5bcb9d1077777294713274ab51f8fcbdfe12
1 /*
2 * Copyright (c) 2004, 2005 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey M. Hsu.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
34 * Copyright (c) 1980, 1986, 1991, 1993
35 * The Regents of the University of California. All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
61 * @(#)route.c 8.3 (Berkeley) 1/9/95
62 * $FreeBSD: src/sys/net/route.c,v 1.59.2.10 2003/01/17 08:04:00 ru Exp $
65 #include "opt_inet.h"
66 #include "opt_mpls.h"
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/malloc.h>
71 #include <sys/mbuf.h>
72 #include <sys/socket.h>
73 #include <sys/domain.h>
74 #include <sys/kernel.h>
75 #include <sys/sysctl.h>
76 #include <sys/globaldata.h>
77 #include <sys/thread.h>
79 #include <net/if.h>
80 #include <net/route.h>
81 #include <net/netisr.h>
83 #include <netinet/in.h>
84 #include <net/ip_mroute/ip_mroute.h>
86 #include <sys/thread2.h>
87 #include <sys/msgport2.h>
88 #include <net/netmsg2.h>
89 #include <net/netisr2.h>
91 #ifdef MPLS
92 #include <netproto/mpls/mpls.h>
93 #endif
95 static struct rtstatistics rtstatistics_percpu[MAXCPU] __cachealign;
96 #define rtstat rtstatistics_percpu[mycpuid]
98 struct radix_node_head *rt_tables[MAXCPU][AF_MAX+1];
100 static void rt_maskedcopy (struct sockaddr *, struct sockaddr *,
101 struct sockaddr *);
102 static void rtable_init(void);
103 static void rtinit_rtrequest_callback(int, int, struct rt_addrinfo *,
104 struct rtentry *, void *);
106 static void rtredirect_msghandler(netmsg_t msg);
107 static void rtrequest1_msghandler(netmsg_t msg);
108 static void rtsearch_msghandler(netmsg_t msg);
109 static void rtmask_add_msghandler(netmsg_t msg);
111 static int rt_setshims(struct rtentry *, struct sockaddr **);
113 SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RW, 0, "Routing");
115 #ifdef ROUTE_DEBUG
116 static int route_debug = 1;
117 SYSCTL_INT(_net_route, OID_AUTO, route_debug, CTLFLAG_RW,
118 &route_debug, 0, "");
119 #endif
121 u_long route_kmalloc_limit = 0;
122 TUNABLE_ULONG("net.route.kmalloc_limit", &route_kmalloc_limit);
125 * Initialize the route table(s) for protocol domains and
126 * create a helper thread which will be responsible for updating
127 * route table entries on each cpu.
129 void
130 route_init(void)
132 int cpu;
134 if (route_kmalloc_limit)
135 kmalloc_raise_limit(M_RTABLE, route_kmalloc_limit);
137 for (cpu = 0; cpu < netisr_ncpus; ++cpu)
138 bzero(&rtstatistics_percpu[cpu], sizeof(struct rtstatistics));
139 rn_init(); /* initialize all zeroes, all ones, mask table */
140 rtable_init(); /* call dom_rtattach() on each cpu */
143 static void
144 rtable_init_oncpu(netmsg_t msg)
146 struct domain *dom;
147 int cpu = mycpuid;
149 ASSERT_NETISR_NCPUS(cpu);
151 SLIST_FOREACH(dom, &domains, dom_next) {
152 if (dom->dom_rtattach) {
153 dom->dom_rtattach(
154 (void **)&rt_tables[cpu][dom->dom_family],
155 dom->dom_rtoffset);
158 netisr_forwardmsg(&msg->base, cpu + 1);
161 static void
162 rtable_init(void)
164 struct netmsg_base msg;
166 netmsg_init(&msg, NULL, &curthread->td_msgport, 0, rtable_init_oncpu);
167 netisr_domsg_global(&msg);
171 * Routing statistics.
173 static int
174 sysctl_rtstatistics(SYSCTL_HANDLER_ARGS)
176 int cpu, error = 0;
178 for (cpu = 0; cpu < netisr_ncpus; ++cpu) {
179 if ((error = SYSCTL_OUT(req, &rtstatistics_percpu[cpu],
180 sizeof(struct rtstatistics))))
181 break;
182 if ((error = SYSCTL_IN(req, &rtstatistics_percpu[cpu],
183 sizeof(struct rtstatistics))))
184 break;
187 return (error);
189 SYSCTL_PROC(_net_route, OID_AUTO, stats, (CTLTYPE_OPAQUE|CTLFLAG_RW),
190 0, 0, sysctl_rtstatistics, "S,rtstatistics", "Routing statistics");
193 * Packet routing routines.
197 * Look up and fill in the "ro_rt" rtentry field in a route structure given
198 * an address in the "ro_dst" field. Always send a report on a miss and
199 * always clone routes.
201 void
202 rtalloc(struct route *ro)
204 rtalloc_ign(ro, 0UL);
208 * Look up and fill in the "ro_rt" rtentry field in a route structure given
209 * an address in the "ro_dst" field. Always send a report on a miss and
210 * optionally clone routes when RTF_CLONING or RTF_PRCLONING are not being
211 * ignored.
213 void
214 rtalloc_ign(struct route *ro, u_long ignoreflags)
216 if (ro->ro_rt != NULL) {
217 if (ro->ro_rt->rt_ifp != NULL && ro->ro_rt->rt_flags & RTF_UP)
218 return;
219 rtfree(ro->ro_rt);
220 ro->ro_rt = NULL;
222 ro->ro_rt = _rtlookup(&ro->ro_dst, RTL_REPORTMSG, ignoreflags);
226 * Look up the route that matches the given "dst" address.
228 * Route lookup can have the side-effect of creating and returning
229 * a cloned route instead when "dst" matches a cloning route and the
230 * RTF_CLONING and RTF_PRCLONING flags are not being ignored.
232 * Any route returned has its reference count incremented.
234 struct rtentry *
235 _rtlookup(struct sockaddr *dst, boolean_t generate_report, u_long ignore)
237 struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family];
238 struct rtentry *rt;
240 ASSERT_NETISR_NCPUS(mycpuid);
242 if (rnh == NULL)
243 goto unreach;
246 * Look up route in the radix tree.
248 rt = (struct rtentry *) rnh->rnh_matchaddr((char *)dst, rnh);
249 if (rt == NULL)
250 goto unreach;
253 * Handle cloning routes.
255 if ((rt->rt_flags & ~ignore & (RTF_CLONING | RTF_PRCLONING)) != 0) {
256 struct rtentry *clonedroute;
257 int error;
259 clonedroute = rt; /* copy in/copy out parameter */
260 error = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0,
261 &clonedroute); /* clone the route */
262 if (error != 0) { /* cloning failed */
263 if (generate_report)
264 rt_dstmsg(RTM_MISS, dst, error);
265 rt->rt_refcnt++;
266 return (rt); /* return the uncloned route */
268 if (generate_report) {
269 if (clonedroute->rt_flags & RTF_XRESOLVE)
270 rt_dstmsg(RTM_RESOLVE, dst, 0);
271 else
272 rt_rtmsg(RTM_ADD, clonedroute,
273 clonedroute->rt_ifp, 0);
275 return (clonedroute); /* return cloned route */
279 * Increment the reference count of the matched route and return.
281 rt->rt_refcnt++;
282 return (rt);
284 unreach:
285 rtstat.rts_unreach++;
286 if (generate_report)
287 rt_dstmsg(RTM_MISS, dst, 0);
288 return (NULL);
291 void
292 rtfree(struct rtentry *rt)
295 ASSERT_NETISR_NCPUS(rt->rt_cpuid);
296 KASSERT(rt->rt_refcnt > 0, ("rtfree: rt_refcnt %ld", rt->rt_refcnt));
298 --rt->rt_refcnt;
299 if (rt->rt_refcnt == 0) {
300 struct radix_node_head *rnh =
301 rt_tables[mycpuid][rt_key(rt)->sa_family];
303 if (rnh->rnh_close)
304 rnh->rnh_close((struct radix_node *)rt, rnh);
305 if (!(rt->rt_flags & RTF_UP)) {
306 /* deallocate route */
307 if (rt->rt_ifa != NULL)
308 IFAFREE(rt->rt_ifa);
309 if (rt->rt_parent != NULL)
310 RTFREE(rt->rt_parent); /* recursive call! */
311 Free(rt_key(rt));
312 Free(rt);
317 static void
318 rtfree_async_dispatch(netmsg_t msg)
320 struct rtentry *rt = msg->lmsg.u.ms_resultp;
322 rtfree(rt);
323 netisr_replymsg(&msg->base, 0);
326 void
327 rtfree_async(struct rtentry *rt)
329 struct netmsg_base *msg;
331 if (IN_NETISR_NCPUS(rt->rt_cpuid)) {
332 rtfree(rt);
333 return;
336 KASSERT(rt->rt_refcnt > 0,
337 ("rtfree_async: rt_refcnt %ld", rt->rt_refcnt));
339 msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_INTWAIT);
340 netmsg_init(msg, NULL, &netisr_afree_rport, 0, rtfree_async_dispatch);
341 msg->lmsg.u.ms_resultp = rt;
343 netisr_sendmsg(msg, rt->rt_cpuid);
347 rtredirect_oncpu(struct sockaddr *dst, struct sockaddr *gateway,
348 struct sockaddr *netmask, int flags, struct sockaddr *src)
350 struct rtentry *rt = NULL;
351 struct rt_addrinfo rtinfo;
352 struct ifaddr *ifa;
353 u_long *stat = NULL;
354 int error;
356 ASSERT_NETISR_NCPUS(mycpuid);
358 /* verify the gateway is directly reachable */
359 if ((ifa = ifa_ifwithnet(gateway)) == NULL) {
360 error = ENETUNREACH;
361 goto out;
365 * If the redirect isn't from our current router for this destination,
366 * it's either old or wrong.
368 if (!(flags & RTF_DONE) && /* XXX JH */
369 (rt = rtpurelookup(dst)) != NULL &&
370 (!sa_equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) {
371 error = EINVAL;
372 goto done;
376 * If it redirects us to ourselves, we have a routing loop,
377 * perhaps as a result of an interface going down recently.
379 if (ifa_ifwithaddr(gateway)) {
380 error = EHOSTUNREACH;
381 goto done;
385 * Create a new entry if the lookup failed or if we got back
386 * a wildcard entry for the default route. This is necessary
387 * for hosts which use routing redirects generated by smart
388 * gateways to dynamically build the routing tables.
390 if (rt == NULL)
391 goto create;
392 if ((rt_mask(rt) != NULL && rt_mask(rt)->sa_len < 2)) {
393 rtfree(rt);
394 goto create;
397 /* Ignore redirects for directly connected hosts. */
398 if (!(rt->rt_flags & RTF_GATEWAY)) {
399 error = EHOSTUNREACH;
400 goto done;
403 if (!(rt->rt_flags & RTF_HOST) && (flags & RTF_HOST)) {
405 * Changing from a network route to a host route.
406 * Create a new host route rather than smashing the
407 * network route.
409 create:
410 flags |= RTF_GATEWAY | RTF_DYNAMIC;
411 bzero(&rtinfo, sizeof(struct rt_addrinfo));
412 rtinfo.rti_info[RTAX_DST] = dst;
413 rtinfo.rti_info[RTAX_GATEWAY] = gateway;
414 rtinfo.rti_info[RTAX_NETMASK] = netmask;
415 rtinfo.rti_flags = flags;
416 rtinfo.rti_ifa = ifa;
417 rt = NULL; /* copy-in/copy-out parameter */
418 error = rtrequest1(RTM_ADD, &rtinfo, &rt);
419 if (rt != NULL)
420 flags = rt->rt_flags;
421 stat = &rtstat.rts_dynamic;
422 } else {
424 * Smash the current notion of the gateway to this destination.
425 * Should check about netmask!!!
427 rt->rt_flags |= RTF_MODIFIED;
428 flags |= RTF_MODIFIED;
430 /* We only need to report rtmsg on CPU0 */
431 rt_setgate(rt, rt_key(rt), gateway,
432 mycpuid == 0 ? RTL_REPORTMSG : RTL_DONTREPORT);
433 error = 0;
434 stat = &rtstat.rts_newgateway;
437 done:
438 if (rt != NULL)
439 rtfree(rt);
440 out:
441 if (error != 0)
442 rtstat.rts_badredirect++;
443 else if (stat != NULL)
444 (*stat)++;
446 return error;
449 struct netmsg_rtredirect {
450 struct netmsg_base base;
451 struct sockaddr *dst;
452 struct sockaddr *gateway;
453 struct sockaddr *netmask;
454 int flags;
455 struct sockaddr *src;
459 * Force a routing table entry to the specified
460 * destination to go through the given gateway.
461 * Normally called as a result of a routing redirect
462 * message from the network layer.
464 void
465 rtredirect(struct sockaddr *dst, struct sockaddr *gateway,
466 struct sockaddr *netmask, int flags, struct sockaddr *src)
468 struct rt_addrinfo rtinfo;
469 int error;
470 struct netmsg_rtredirect msg;
472 netmsg_init(&msg.base, NULL, &curthread->td_msgport,
473 0, rtredirect_msghandler);
474 msg.dst = dst;
475 msg.gateway = gateway;
476 msg.netmask = netmask;
477 msg.flags = flags;
478 msg.src = src;
479 error = netisr_domsg_global(&msg.base);
481 bzero(&rtinfo, sizeof(struct rt_addrinfo));
482 rtinfo.rti_info[RTAX_DST] = dst;
483 rtinfo.rti_info[RTAX_GATEWAY] = gateway;
484 rtinfo.rti_info[RTAX_NETMASK] = netmask;
485 rtinfo.rti_info[RTAX_AUTHOR] = src;
486 rt_missmsg(RTM_REDIRECT, &rtinfo, flags, error);
489 static void
490 rtredirect_msghandler(netmsg_t msg)
492 struct netmsg_rtredirect *rmsg = (void *)msg;
494 rtredirect_oncpu(rmsg->dst, rmsg->gateway, rmsg->netmask,
495 rmsg->flags, rmsg->src);
496 netisr_forwardmsg(&msg->base, mycpuid + 1);
500 * Routing table ioctl interface.
503 rtioctl(u_long req, caddr_t data, struct ucred *cred)
505 #ifdef INET
506 /* Multicast goop, grrr... */
507 return mrt_ioctl ? mrt_ioctl(req, data) : EOPNOTSUPP;
508 #else
509 return ENXIO;
510 #endif
513 struct ifaddr *
514 ifa_ifwithroute(int flags, struct sockaddr *dst, struct sockaddr *gateway)
516 struct ifaddr *ifa;
518 if (!(flags & RTF_GATEWAY)) {
520 * If we are adding a route to an interface,
521 * and the interface is a point-to-point link,
522 * we should search for the destination
523 * as our clue to the interface. Otherwise
524 * we can use the local address.
526 ifa = NULL;
527 if (flags & RTF_HOST) {
528 ifa = ifa_ifwithdstaddr(dst);
530 if (ifa == NULL)
531 ifa = ifa_ifwithaddr(gateway);
532 } else {
534 * If we are adding a route to a remote net
535 * or host, the gateway may still be on the
536 * other end of a pt to pt link.
538 ifa = ifa_ifwithdstaddr(gateway);
540 if (ifa == NULL)
541 ifa = ifa_ifwithnet(gateway);
542 if (ifa == NULL) {
543 struct rtentry *rt;
545 rt = rtpurelookup(gateway);
546 if (rt == NULL)
547 return (NULL);
548 rt->rt_refcnt--;
549 if ((ifa = rt->rt_ifa) == NULL)
550 return (NULL);
552 if (ifa->ifa_addr->sa_family != dst->sa_family) {
553 struct ifaddr *oldifa = ifa;
555 ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
556 if (ifa == NULL)
557 ifa = oldifa;
559 return (ifa);
562 static int rt_fixdelete (struct radix_node *, void *);
563 static int rt_fixchange (struct radix_node *, void *);
565 struct rtfc_arg {
566 struct rtentry *rt0;
567 struct radix_node_head *rnh;
571 * Set rtinfo->rti_ifa and rtinfo->rti_ifp.
574 rt_getifa(struct rt_addrinfo *rtinfo)
576 struct sockaddr *gateway = rtinfo->rti_info[RTAX_GATEWAY];
577 struct sockaddr *dst = rtinfo->rti_info[RTAX_DST];
578 struct sockaddr *ifaaddr = rtinfo->rti_info[RTAX_IFA];
579 int flags = rtinfo->rti_flags;
582 * ifp may be specified by sockaddr_dl
583 * when protocol address is ambiguous.
585 if (rtinfo->rti_ifp == NULL) {
586 struct sockaddr *ifpaddr;
588 ifpaddr = rtinfo->rti_info[RTAX_IFP];
589 if (ifpaddr != NULL && ifpaddr->sa_family == AF_LINK) {
590 struct ifaddr *ifa;
592 ifa = ifa_ifwithnet(ifpaddr);
593 if (ifa != NULL)
594 rtinfo->rti_ifp = ifa->ifa_ifp;
598 if (rtinfo->rti_ifa == NULL && ifaaddr != NULL)
599 rtinfo->rti_ifa = ifa_ifwithaddr(ifaaddr);
600 if (rtinfo->rti_ifa == NULL) {
601 struct sockaddr *sa;
603 sa = ifaaddr != NULL ? ifaaddr :
604 (gateway != NULL ? gateway : dst);
605 if (sa != NULL && rtinfo->rti_ifp != NULL)
606 rtinfo->rti_ifa = ifaof_ifpforaddr(sa, rtinfo->rti_ifp);
607 else if (dst != NULL && gateway != NULL)
608 rtinfo->rti_ifa = ifa_ifwithroute(flags, dst, gateway);
609 else if (sa != NULL)
610 rtinfo->rti_ifa = ifa_ifwithroute(flags, sa, sa);
612 if (rtinfo->rti_ifa == NULL)
613 return (ENETUNREACH);
615 if (rtinfo->rti_ifp == NULL)
616 rtinfo->rti_ifp = rtinfo->rti_ifa->ifa_ifp;
617 return (0);
621 * Do appropriate manipulations of a routing tree given
622 * all the bits of info needed
625 rtrequest(
626 int req,
627 struct sockaddr *dst,
628 struct sockaddr *gateway,
629 struct sockaddr *netmask,
630 int flags,
631 struct rtentry **ret_nrt)
633 struct rt_addrinfo rtinfo;
635 bzero(&rtinfo, sizeof(struct rt_addrinfo));
636 rtinfo.rti_info[RTAX_DST] = dst;
637 rtinfo.rti_info[RTAX_GATEWAY] = gateway;
638 rtinfo.rti_info[RTAX_NETMASK] = netmask;
639 rtinfo.rti_flags = flags;
640 return rtrequest1(req, &rtinfo, ret_nrt);
644 rtrequest_global(
645 int req,
646 struct sockaddr *dst,
647 struct sockaddr *gateway,
648 struct sockaddr *netmask,
649 int flags)
651 struct rt_addrinfo rtinfo;
653 bzero(&rtinfo, sizeof(struct rt_addrinfo));
654 rtinfo.rti_info[RTAX_DST] = dst;
655 rtinfo.rti_info[RTAX_GATEWAY] = gateway;
656 rtinfo.rti_info[RTAX_NETMASK] = netmask;
657 rtinfo.rti_flags = flags;
658 return rtrequest1_global(req, &rtinfo, NULL, NULL, RTREQ_PRIO_NORM);
661 struct netmsg_rtq {
662 struct netmsg_base base;
663 int req;
664 struct rt_addrinfo *rtinfo;
665 rtrequest1_callback_func_t callback;
666 void *arg;
670 rtrequest1_global(int req, struct rt_addrinfo *rtinfo,
671 rtrequest1_callback_func_t callback, void *arg, boolean_t req_prio)
673 struct netmsg_rtq msg;
674 int flags = 0;
676 if (req_prio)
677 flags = MSGF_PRIORITY;
678 netmsg_init(&msg.base, NULL, &curthread->td_msgport, flags,
679 rtrequest1_msghandler);
680 msg.base.lmsg.ms_error = -1;
681 msg.req = req;
682 msg.rtinfo = rtinfo;
683 msg.callback = callback;
684 msg.arg = arg;
685 return (netisr_domsg_global(&msg.base));
689 * Handle a route table request on the current cpu. Since the route table's
690 * are supposed to be identical on each cpu, an error occuring later in the
691 * message chain is considered system-fatal.
693 static void
694 rtrequest1_msghandler(netmsg_t msg)
696 struct netmsg_rtq *rmsg = (void *)msg;
697 struct rt_addrinfo rtinfo;
698 struct rtentry *rt = NULL;
699 int error;
702 * Copy the rtinfo. We need to make sure that the original
703 * rtinfo, which is setup by the caller, in the netmsg will
704 * _not_ be changed; else the next CPU on the netmsg forwarding
705 * path will see a different rtinfo than what this CPU has seen.
707 rtinfo = *rmsg->rtinfo;
709 error = rtrequest1(rmsg->req, &rtinfo, &rt);
710 if (rt)
711 --rt->rt_refcnt;
712 if (rmsg->callback)
713 rmsg->callback(rmsg->req, error, &rtinfo, rt, rmsg->arg);
716 * RTM_DELETE's are propogated even if an error occurs, since a
717 * cloned route might be undergoing deletion and cloned routes
718 * are not necessarily replicated. An overall error is returned
719 * only if no cpus have the route in question.
721 if (rmsg->base.lmsg.ms_error < 0 || error == 0)
722 rmsg->base.lmsg.ms_error = error;
724 if (error && rmsg->req != RTM_DELETE) {
725 if (mycpuid != 0) {
726 panic("rtrequest1_msghandler: rtrequest table req %d, "
727 "failed on cpu%d, error %d\n",
728 rmsg->req, mycpuid, error);
730 netisr_replymsg(&rmsg->base, error);
731 } else {
732 netisr_forwardmsg_error(&rmsg->base, mycpuid + 1,
733 rmsg->base.lmsg.ms_error);
738 rtrequest1(int req, struct rt_addrinfo *rtinfo, struct rtentry **ret_nrt)
740 struct sockaddr *dst = rtinfo->rti_info[RTAX_DST];
741 struct rtentry *rt;
742 struct radix_node *rn;
743 struct radix_node_head *rnh;
744 struct ifaddr *ifa;
745 struct sockaddr *ndst;
746 boolean_t reportmsg;
747 int error = 0;
749 ASSERT_NETISR_NCPUS(mycpuid);
751 #define gotoerr(x) { error = x ; goto bad; }
753 #ifdef ROUTE_DEBUG
754 if (route_debug)
755 rt_addrinfo_print(req, rtinfo);
756 #endif
758 crit_enter();
760 * Find the correct routing tree to use for this Address Family
762 if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL)
763 gotoerr(EAFNOSUPPORT);
766 * If we are adding a host route then we don't want to put
767 * a netmask in the tree, nor do we want to clone it.
769 if (rtinfo->rti_flags & RTF_HOST) {
770 rtinfo->rti_info[RTAX_NETMASK] = NULL;
771 rtinfo->rti_flags &= ~(RTF_CLONING | RTF_PRCLONING);
774 switch (req) {
775 case RTM_DELETE:
776 /* Remove the item from the tree. */
777 rn = rnh->rnh_deladdr((char *)rtinfo->rti_info[RTAX_DST],
778 (char *)rtinfo->rti_info[RTAX_NETMASK],
779 rnh);
780 if (rn == NULL)
781 gotoerr(ESRCH);
782 KASSERT(!(rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)),
783 ("rnh_deladdr returned flags 0x%x", rn->rn_flags));
784 rt = (struct rtentry *)rn;
786 /* ref to prevent a deletion race */
787 ++rt->rt_refcnt;
789 /* Free any routes cloned from this one. */
790 if ((rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) &&
791 rt_mask(rt) != NULL) {
792 rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
793 (char *)rt_mask(rt),
794 rt_fixdelete, rt);
797 if (rt->rt_gwroute != NULL) {
798 RTFREE(rt->rt_gwroute);
799 rt->rt_gwroute = NULL;
803 * NB: RTF_UP must be set during the search above,
804 * because we might delete the last ref, causing
805 * rt to get freed prematurely.
807 rt->rt_flags &= ~RTF_UP;
809 #ifdef ROUTE_DEBUG
810 if (route_debug)
811 rt_print(rtinfo, rt);
812 #endif
814 /* Give the protocol a chance to keep things in sync. */
815 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
816 ifa->ifa_rtrequest(RTM_DELETE, rt);
819 * If the caller wants it, then it can have it,
820 * but it's up to it to free the rtentry as we won't be
821 * doing it.
823 KASSERT(rt->rt_refcnt >= 0,
824 ("rtrequest1(DELETE): refcnt %ld", rt->rt_refcnt));
825 if (ret_nrt != NULL) {
826 /* leave ref intact for return */
827 *ret_nrt = rt;
828 } else {
829 /* deref / attempt to destroy */
830 rtfree(rt);
832 break;
834 case RTM_RESOLVE:
835 if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
836 gotoerr(EINVAL);
838 KASSERT(rt->rt_cpuid == mycpuid,
839 ("rt resolve rt_cpuid %d, mycpuid %d",
840 rt->rt_cpuid, mycpuid));
842 ifa = rt->rt_ifa;
843 rtinfo->rti_flags =
844 rt->rt_flags & ~(RTF_CLONING | RTF_PRCLONING | RTF_STATIC);
845 rtinfo->rti_flags |= RTF_WASCLONED;
846 rtinfo->rti_info[RTAX_GATEWAY] = rt->rt_gateway;
847 if ((rtinfo->rti_info[RTAX_NETMASK] = rt->rt_genmask) == NULL)
848 rtinfo->rti_flags |= RTF_HOST;
849 rtinfo->rti_info[RTAX_MPLS1] = rt->rt_shim[0];
850 rtinfo->rti_info[RTAX_MPLS2] = rt->rt_shim[1];
851 rtinfo->rti_info[RTAX_MPLS3] = rt->rt_shim[2];
852 goto makeroute;
854 case RTM_ADD:
855 KASSERT(!(rtinfo->rti_flags & RTF_GATEWAY) ||
856 rtinfo->rti_info[RTAX_GATEWAY] != NULL,
857 ("rtrequest: GATEWAY but no gateway"));
859 if (rtinfo->rti_ifa == NULL && (error = rt_getifa(rtinfo)))
860 gotoerr(error);
861 ifa = rtinfo->rti_ifa;
862 makeroute:
863 R_Malloc(rt, struct rtentry *, sizeof(struct rtentry));
864 if (rt == NULL) {
865 if (req == RTM_ADD) {
866 kprintf("rtrequest1: alloc rtentry failed on "
867 "cpu%d\n", mycpuid);
869 gotoerr(ENOBUFS);
871 bzero(rt, sizeof(struct rtentry));
872 rt->rt_flags = RTF_UP | rtinfo->rti_flags;
873 rt->rt_cpuid = mycpuid;
875 if (mycpuid != 0 && req == RTM_ADD) {
876 /* For RTM_ADD, we have already sent rtmsg on CPU0. */
877 reportmsg = RTL_DONTREPORT;
878 } else {
880 * For RTM_ADD, we only send rtmsg on CPU0.
881 * For RTM_RESOLVE, we always send rtmsg. XXX
883 reportmsg = RTL_REPORTMSG;
885 error = rt_setgate(rt, dst, rtinfo->rti_info[RTAX_GATEWAY],
886 reportmsg);
887 if (error != 0) {
888 Free(rt);
889 gotoerr(error);
892 ndst = rt_key(rt);
893 if (rtinfo->rti_info[RTAX_NETMASK] != NULL)
894 rt_maskedcopy(dst, ndst,
895 rtinfo->rti_info[RTAX_NETMASK]);
896 else
897 bcopy(dst, ndst, dst->sa_len);
899 if (rtinfo->rti_info[RTAX_MPLS1] != NULL)
900 rt_setshims(rt, rtinfo->rti_info);
903 * Note that we now have a reference to the ifa.
904 * This moved from below so that rnh->rnh_addaddr() can
905 * examine the ifa and ifa->ifa_ifp if it so desires.
907 IFAREF(ifa);
908 rt->rt_ifa = ifa;
909 rt->rt_ifp = ifa->ifa_ifp;
910 /* XXX mtu manipulation will be done in rnh_addaddr -- itojun */
912 rn = rnh->rnh_addaddr((char *)ndst,
913 (char *)rtinfo->rti_info[RTAX_NETMASK],
914 rnh, rt->rt_nodes);
915 if (rn == NULL) {
916 struct rtentry *oldrt;
919 * We already have one of these in the tree.
920 * We do a special hack: if the old route was
921 * cloned, then we blow it away and try
922 * re-inserting the new one.
924 oldrt = rtpurelookup(ndst);
925 if (oldrt != NULL) {
926 --oldrt->rt_refcnt;
927 if (oldrt->rt_flags & RTF_WASCLONED) {
928 rtrequest(RTM_DELETE, rt_key(oldrt),
929 oldrt->rt_gateway,
930 rt_mask(oldrt),
931 oldrt->rt_flags, NULL);
932 rn = rnh->rnh_addaddr((char *)ndst,
933 (char *)
934 rtinfo->rti_info[RTAX_NETMASK],
935 rnh, rt->rt_nodes);
939 /* NOTE: rt_ifa may have been changed */
940 ifa = rt->rt_ifa;
943 * If it still failed to go into the tree,
944 * then un-make it (this should be a function).
946 if (rn == NULL) {
947 if (rt->rt_gwroute != NULL)
948 rtfree(rt->rt_gwroute);
949 IFAFREE(ifa);
950 Free(rt_key(rt));
951 Free(rt);
952 gotoerr(EEXIST);
956 * If we got here from RESOLVE, then we are cloning
957 * so clone the rest, and note that we
958 * are a clone (and increment the parent's references)
960 if (req == RTM_RESOLVE) {
961 rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */
962 rt->rt_rmx.rmx_pksent = 0; /* reset packet counter */
963 if ((*ret_nrt)->rt_flags &
964 (RTF_CLONING | RTF_PRCLONING)) {
965 rt->rt_parent = *ret_nrt;
966 (*ret_nrt)->rt_refcnt++;
971 * if this protocol has something to add to this then
972 * allow it to do that as well.
974 if (ifa->ifa_rtrequest != NULL)
975 ifa->ifa_rtrequest(req, rt);
978 * We repeat the same procedure from rt_setgate() here because
979 * it doesn't fire when we call it there because the node
980 * hasn't been added to the tree yet.
982 if (req == RTM_ADD && !(rt->rt_flags & RTF_HOST) &&
983 rt_mask(rt) != NULL) {
984 struct rtfc_arg arg = { rt, rnh };
986 rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
987 (char *)rt_mask(rt),
988 rt_fixchange, &arg);
991 #ifdef ROUTE_DEBUG
992 if (route_debug)
993 rt_print(rtinfo, rt);
994 #endif
996 * Return the resulting rtentry,
997 * increasing the number of references by one.
999 if (ret_nrt != NULL) {
1000 rt->rt_refcnt++;
1001 *ret_nrt = rt;
1003 break;
1004 default:
1005 error = EOPNOTSUPP;
1007 bad:
1008 #ifdef ROUTE_DEBUG
1009 if (route_debug) {
1010 if (error)
1011 kprintf("rti %p failed error %d\n", rtinfo, error);
1012 else
1013 kprintf("rti %p succeeded\n", rtinfo);
1015 #endif
1016 crit_exit();
1017 return (error);
1021 * Called from rtrequest(RTM_DELETE, ...) to fix up the route's ``family''
1022 * (i.e., the routes related to it by the operation of cloning). This
1023 * routine is iterated over all potential former-child-routes by way of
1024 * rnh->rnh_walktree_from() above, and those that actually are children of
1025 * the late parent (passed in as VP here) are themselves deleted.
1027 static int
1028 rt_fixdelete(struct radix_node *rn, void *vp)
1030 struct rtentry *rt = (struct rtentry *)rn;
1031 struct rtentry *rt0 = vp;
1033 if (rt->rt_parent == rt0 &&
1034 !(rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
1035 return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1036 rt->rt_flags, NULL);
1038 return 0;
1042 * This routine is called from rt_setgate() to do the analogous thing for
1043 * adds and changes. There is the added complication in this case of a
1044 * middle insert; i.e., insertion of a new network route between an older
1045 * network route and (cloned) host routes. For this reason, a simple check
1046 * of rt->rt_parent is insufficient; each candidate route must be tested
1047 * against the (mask, value) of the new route (passed as before in vp)
1048 * to see if the new route matches it.
1050 * XXX - it may be possible to do fixdelete() for changes and reserve this
1051 * routine just for adds. I'm not sure why I thought it was necessary to do
1052 * changes this way.
1054 #ifdef DEBUG
1055 static int rtfcdebug = 0;
1056 #endif
1058 static int
1059 rt_fixchange(struct radix_node *rn, void *vp)
1061 struct rtentry *rt = (struct rtentry *)rn;
1062 struct rtfc_arg *ap = vp;
1063 struct rtentry *rt0 = ap->rt0;
1064 struct radix_node_head *rnh = ap->rnh;
1065 u_char *xk1, *xm1, *xk2, *xmp;
1066 int i, len, mlen;
1068 #ifdef DEBUG
1069 if (rtfcdebug)
1070 kprintf("rt_fixchange: rt %p, rt0 %p\n", rt, rt0);
1071 #endif
1073 if (rt->rt_parent == NULL ||
1074 (rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
1075 #ifdef DEBUG
1076 if (rtfcdebug) kprintf("no parent, pinned or cloning\n");
1077 #endif
1078 return 0;
1081 if (rt->rt_parent == rt0) {
1082 #ifdef DEBUG
1083 if (rtfcdebug) kprintf("parent match\n");
1084 #endif
1085 return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1086 rt->rt_flags, NULL);
1090 * There probably is a function somewhere which does this...
1091 * if not, there should be.
1093 len = imin(rt_key(rt0)->sa_len, rt_key(rt)->sa_len);
1095 xk1 = (u_char *)rt_key(rt0);
1096 xm1 = (u_char *)rt_mask(rt0);
1097 xk2 = (u_char *)rt_key(rt);
1099 /* avoid applying a less specific route */
1100 xmp = (u_char *)rt_mask(rt->rt_parent);
1101 mlen = rt_key(rt->rt_parent)->sa_len;
1102 if (mlen > rt_key(rt0)->sa_len) {
1103 #ifdef DEBUG
1104 if (rtfcdebug)
1105 kprintf("rt_fixchange: inserting a less "
1106 "specific route\n");
1107 #endif
1108 return 0;
1110 for (i = rnh->rnh_treetop->rn_offset; i < mlen; i++) {
1111 if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i]) {
1112 #ifdef DEBUG
1113 if (rtfcdebug)
1114 kprintf("rt_fixchange: inserting a less "
1115 "specific route\n");
1116 #endif
1117 return 0;
1121 for (i = rnh->rnh_treetop->rn_offset; i < len; i++) {
1122 if ((xk2[i] & xm1[i]) != xk1[i]) {
1123 #ifdef DEBUG
1124 if (rtfcdebug) kprintf("no match\n");
1125 #endif
1126 return 0;
1131 * OK, this node is a clone, and matches the node currently being
1132 * changed/added under the node's mask. So, get rid of it.
1134 #ifdef DEBUG
1135 if (rtfcdebug) kprintf("deleting\n");
1136 #endif
1137 return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1138 rt->rt_flags, NULL);
1142 rt_setgate(struct rtentry *rt0, struct sockaddr *dst, struct sockaddr *gate,
1143 boolean_t generate_report)
1145 char *space, *oldspace;
1146 int dlen = RT_ROUNDUP(dst->sa_len), glen = RT_ROUNDUP(gate->sa_len);
1147 struct rtentry *rt = rt0;
1148 struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family];
1150 ASSERT_NETISR_NCPUS(mycpuid);
1153 * A host route with the destination equal to the gateway
1154 * will interfere with keeping LLINFO in the routing
1155 * table, so disallow it.
1157 if (((rt0->rt_flags & (RTF_HOST | RTF_GATEWAY | RTF_LLINFO)) ==
1158 (RTF_HOST | RTF_GATEWAY)) &&
1159 dst->sa_len == gate->sa_len &&
1160 sa_equal(dst, gate)) {
1162 * The route might already exist if this is an RTM_CHANGE
1163 * or a routing redirect, so try to delete it.
1165 if (rt_key(rt0) != NULL)
1166 rtrequest(RTM_DELETE, rt_key(rt0), rt0->rt_gateway,
1167 rt_mask(rt0), rt0->rt_flags, NULL);
1168 return EADDRNOTAVAIL;
1172 * Both dst and gateway are stored in the same malloc'ed chunk
1173 * (If I ever get my hands on....)
1174 * if we need to malloc a new chunk, then keep the old one around
1175 * till we don't need it any more.
1177 if (rt->rt_gateway == NULL ||
1178 glen > RT_ROUNDUP(rt->rt_gateway->sa_len)) {
1179 oldspace = (char *)rt_key(rt);
1180 R_Malloc(space, char *, dlen + glen);
1181 if (space == NULL)
1182 return ENOBUFS;
1183 rt->rt_nodes->rn_key = space;
1184 } else {
1185 space = (char *)rt_key(rt); /* Just use the old space. */
1186 oldspace = NULL;
1189 /* Set the gateway value. */
1190 rt->rt_gateway = (struct sockaddr *)(space + dlen);
1191 bcopy(gate, rt->rt_gateway, glen);
1193 if (oldspace != NULL) {
1195 * If we allocated a new chunk, preserve the original dst.
1196 * This way, rt_setgate() really just sets the gate
1197 * and leaves the dst field alone.
1199 bcopy(dst, space, dlen);
1200 Free(oldspace);
1204 * If there is already a gwroute, it's now almost definitely wrong
1205 * so drop it.
1207 if (rt->rt_gwroute != NULL) {
1208 RTFREE(rt->rt_gwroute);
1209 rt->rt_gwroute = NULL;
1211 if (rt->rt_flags & RTF_GATEWAY) {
1213 * Cloning loop avoidance: In the presence of
1214 * protocol-cloning and bad configuration, it is
1215 * possible to get stuck in bottomless mutual recursion
1216 * (rtrequest rt_setgate rtlookup). We avoid this
1217 * by not allowing protocol-cloning to operate for
1218 * gateways (which is probably the correct choice
1219 * anyway), and avoid the resulting reference loops
1220 * by disallowing any route to run through itself as
1221 * a gateway. This is obviously mandatory when we
1222 * get rt->rt_output().
1224 * This breaks TTCP for hosts outside the gateway! XXX JH
1226 rt->rt_gwroute = _rtlookup(gate, generate_report,
1227 RTF_PRCLONING);
1228 if (rt->rt_gwroute == rt) {
1229 rt->rt_gwroute = NULL;
1230 --rt->rt_refcnt;
1231 return EDQUOT; /* failure */
1236 * This isn't going to do anything useful for host routes, so
1237 * don't bother. Also make sure we have a reasonable mask
1238 * (we don't yet have one during adds).
1240 if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL) {
1241 struct rtfc_arg arg = { rt, rnh };
1243 rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
1244 (char *)rt_mask(rt),
1245 rt_fixchange, &arg);
1248 return 0;
1251 static void
1252 rt_maskedcopy(
1253 struct sockaddr *src,
1254 struct sockaddr *dst,
1255 struct sockaddr *netmask)
1257 u_char *cp1 = (u_char *)src;
1258 u_char *cp2 = (u_char *)dst;
1259 u_char *cp3 = (u_char *)netmask;
1260 u_char *cplim = cp2 + *cp3;
1261 u_char *cplim2 = cp2 + *cp1;
1263 *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
1264 cp3 += 2;
1265 if (cplim > cplim2)
1266 cplim = cplim2;
1267 while (cp2 < cplim)
1268 *cp2++ = *cp1++ & *cp3++;
1269 if (cp2 < cplim2)
1270 bzero(cp2, cplim2 - cp2);
1274 rt_llroute(struct sockaddr *dst, struct rtentry *rt0, struct rtentry **drt)
1276 struct rtentry *up_rt, *rt;
1278 ASSERT_NETISR_NCPUS(mycpuid);
1280 if (!(rt0->rt_flags & RTF_UP)) {
1281 up_rt = rtlookup(dst);
1282 if (up_rt == NULL)
1283 return (EHOSTUNREACH);
1284 up_rt->rt_refcnt--;
1285 } else
1286 up_rt = rt0;
1287 if (up_rt->rt_flags & RTF_GATEWAY) {
1288 if (up_rt->rt_gwroute == NULL) {
1289 up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway);
1290 if (up_rt->rt_gwroute == NULL)
1291 return (EHOSTUNREACH);
1292 } else if (!(up_rt->rt_gwroute->rt_flags & RTF_UP)) {
1293 rtfree(up_rt->rt_gwroute);
1294 up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway);
1295 if (up_rt->rt_gwroute == NULL)
1296 return (EHOSTUNREACH);
1298 rt = up_rt->rt_gwroute;
1299 } else
1300 rt = up_rt;
1301 if (rt->rt_flags & RTF_REJECT &&
1302 (rt->rt_rmx.rmx_expire == 0 || /* rt doesn't expire */
1303 time_uptime < rt->rt_rmx.rmx_expire)) /* rt not expired */
1304 return (rt->rt_flags & RTF_HOST ? EHOSTDOWN : EHOSTUNREACH);
1305 *drt = rt;
1306 return 0;
1309 static int
1310 rt_setshims(struct rtentry *rt, struct sockaddr **rt_shim){
1311 int i;
1313 for (i=0; i<3; i++) {
1314 struct sockaddr *shim = rt_shim[RTAX_MPLS1 + i];
1315 int shimlen;
1317 if (shim == NULL)
1318 break;
1320 shimlen = RT_ROUNDUP(shim->sa_len);
1321 R_Malloc(rt->rt_shim[i], struct sockaddr *, shimlen);
1322 bcopy(shim, rt->rt_shim[i], shimlen);
1325 return 0;
1328 #ifdef ROUTE_DEBUG
1331 * Print out a route table entry
1333 void
1334 rt_print(struct rt_addrinfo *rtinfo, struct rtentry *rn)
1336 kprintf("rti %p cpu %d route %p flags %08lx: ",
1337 rtinfo, mycpuid, rn, rn->rt_flags);
1338 sockaddr_print(rt_key(rn));
1339 kprintf(" mask ");
1340 sockaddr_print(rt_mask(rn));
1341 kprintf(" gw ");
1342 sockaddr_print(rn->rt_gateway);
1343 kprintf(" ifc \"%s\"", rn->rt_ifp ? rn->rt_ifp->if_dname : "?");
1344 kprintf(" ifa %p\n", rn->rt_ifa);
1347 void
1348 rt_addrinfo_print(int cmd, struct rt_addrinfo *rti)
1350 int didit = 0;
1351 int i;
1353 #ifdef ROUTE_DEBUG
1354 if (cmd == RTM_DELETE && route_debug > 1)
1355 print_backtrace(-1);
1356 #endif
1358 switch(cmd) {
1359 case RTM_ADD:
1360 kprintf("ADD ");
1361 break;
1362 case RTM_RESOLVE:
1363 kprintf("RES ");
1364 break;
1365 case RTM_DELETE:
1366 kprintf("DEL ");
1367 break;
1368 default:
1369 kprintf("C%02d ", cmd);
1370 break;
1372 kprintf("rti %p cpu %d ", rti, mycpuid);
1373 for (i = 0; i < rti->rti_addrs; ++i) {
1374 if (rti->rti_info[i] == NULL)
1375 continue;
1376 if (didit)
1377 kprintf(" ,");
1378 switch(i) {
1379 case RTAX_DST:
1380 kprintf("(DST ");
1381 break;
1382 case RTAX_GATEWAY:
1383 kprintf("(GWY ");
1384 break;
1385 case RTAX_NETMASK:
1386 kprintf("(MSK ");
1387 break;
1388 case RTAX_GENMASK:
1389 kprintf("(GEN ");
1390 break;
1391 case RTAX_IFP:
1392 kprintf("(IFP ");
1393 break;
1394 case RTAX_IFA:
1395 kprintf("(IFA ");
1396 break;
1397 case RTAX_AUTHOR:
1398 kprintf("(AUT ");
1399 break;
1400 case RTAX_BRD:
1401 kprintf("(BRD ");
1402 break;
1403 default:
1404 kprintf("(?%02d ", i);
1405 break;
1407 sockaddr_print(rti->rti_info[i]);
1408 kprintf(")");
1409 didit = 1;
1411 kprintf("\n");
1414 void
1415 sockaddr_print(struct sockaddr *sa)
1417 struct sockaddr_in *sa4;
1418 struct sockaddr_in6 *sa6;
1419 int len;
1420 int i;
1422 if (sa == NULL) {
1423 kprintf("NULL");
1424 return;
1427 len = sa->sa_len - offsetof(struct sockaddr, sa_data[0]);
1429 switch(sa->sa_family) {
1430 case AF_INET:
1431 case AF_INET6:
1432 default:
1433 switch(sa->sa_family) {
1434 case AF_INET:
1435 sa4 = (struct sockaddr_in *)sa;
1436 kprintf("INET %d %d.%d.%d.%d",
1437 ntohs(sa4->sin_port),
1438 (ntohl(sa4->sin_addr.s_addr) >> 24) & 255,
1439 (ntohl(sa4->sin_addr.s_addr) >> 16) & 255,
1440 (ntohl(sa4->sin_addr.s_addr) >> 8) & 255,
1441 (ntohl(sa4->sin_addr.s_addr) >> 0) & 255
1443 break;
1444 case AF_INET6:
1445 sa6 = (struct sockaddr_in6 *)sa;
1446 kprintf("INET6 %d %04x:%04x%04x:%04x:%04x:%04x:%04x:%04x",
1447 ntohs(sa6->sin6_port),
1448 sa6->sin6_addr.s6_addr16[0],
1449 sa6->sin6_addr.s6_addr16[1],
1450 sa6->sin6_addr.s6_addr16[2],
1451 sa6->sin6_addr.s6_addr16[3],
1452 sa6->sin6_addr.s6_addr16[4],
1453 sa6->sin6_addr.s6_addr16[5],
1454 sa6->sin6_addr.s6_addr16[6],
1455 sa6->sin6_addr.s6_addr16[7]
1457 break;
1458 default:
1459 kprintf("AF%d ", sa->sa_family);
1460 while (len > 0 && sa->sa_data[len-1] == 0)
1461 --len;
1463 for (i = 0; i < len; ++i) {
1464 if (i)
1465 kprintf(".");
1466 kprintf("%d", (unsigned char)sa->sa_data[i]);
1468 break;
1473 #endif
1476 * Set up a routing table entry, normally for an interface.
1479 rtinit(struct ifaddr *ifa, int cmd, int flags)
1481 struct sockaddr *dst, *deldst, *netmask;
1482 struct mbuf *m = NULL;
1483 struct radix_node_head *rnh;
1484 struct radix_node *rn;
1485 struct rt_addrinfo rtinfo;
1486 int error;
1488 ASSERT_NETISR0;
1490 if (flags & RTF_HOST) {
1491 dst = ifa->ifa_dstaddr;
1492 netmask = NULL;
1493 } else {
1494 dst = ifa->ifa_addr;
1495 netmask = ifa->ifa_netmask;
1498 * If it's a delete, check that if it exists, it's on the correct
1499 * interface or we might scrub a route to another ifa which would
1500 * be confusing at best and possibly worse.
1502 if (cmd == RTM_DELETE) {
1504 * It's a delete, so it should already exist..
1505 * If it's a net, mask off the host bits
1506 * (Assuming we have a mask)
1508 if (netmask != NULL) {
1509 m = m_get(M_NOWAIT, MT_SONAME);
1510 if (m == NULL)
1511 return (ENOBUFS);
1512 mbuftrackid(m, 34);
1513 deldst = mtod(m, struct sockaddr *);
1514 rt_maskedcopy(dst, deldst, netmask);
1515 dst = deldst;
1518 * Look up an rtentry that is in the routing tree and
1519 * contains the correct info.
1521 if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL ||
1522 (rn = rnh->rnh_lookup((char *)dst,
1523 (char *)netmask, rnh)) == NULL ||
1524 ((struct rtentry *)rn)->rt_ifa != ifa ||
1525 !sa_equal((struct sockaddr *)rn->rn_key, dst)) {
1526 if (m != NULL)
1527 m_free(m);
1528 return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
1530 /* XXX */
1531 #if 0
1532 else {
1534 * One would think that as we are deleting, and we know
1535 * it doesn't exist, we could just return at this point
1536 * with an "ELSE" clause, but apparently not..
1538 return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
1540 #endif
1543 * Do the actual request
1545 bzero(&rtinfo, sizeof(struct rt_addrinfo));
1546 rtinfo.rti_info[RTAX_DST] = dst;
1547 rtinfo.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1548 rtinfo.rti_info[RTAX_NETMASK] = netmask;
1549 rtinfo.rti_flags = flags | ifa->ifa_flags;
1550 rtinfo.rti_ifa = ifa;
1551 error = rtrequest1_global(cmd, &rtinfo, rtinit_rtrequest_callback, ifa,
1552 RTREQ_PRIO_HIGH);
1553 if (m != NULL)
1554 m_free(m);
1555 return (error);
1558 static void
1559 rtinit_rtrequest_callback(int cmd, int error,
1560 struct rt_addrinfo *rtinfo, struct rtentry *rt,
1561 void *arg)
1563 struct ifaddr *ifa = arg;
1565 if (error == 0 && rt) {
1566 if (mycpuid == 0) {
1567 ++rt->rt_refcnt;
1568 rt_newaddrmsg(cmd, ifa, error, rt);
1569 --rt->rt_refcnt;
1571 if (cmd == RTM_DELETE) {
1572 if (rt->rt_refcnt == 0) {
1573 ++rt->rt_refcnt;
1574 rtfree(rt);
1580 struct netmsg_rts {
1581 struct netmsg_base base;
1582 int req;
1583 struct rt_addrinfo *rtinfo;
1584 rtsearch_callback_func_t callback;
1585 void *arg;
1586 boolean_t exact_match;
1587 int found_cnt;
1591 rtsearch_global(int req, struct rt_addrinfo *rtinfo,
1592 rtsearch_callback_func_t callback, void *arg, boolean_t exact_match,
1593 boolean_t req_prio)
1595 struct netmsg_rts msg;
1596 int flags = 0;
1598 if (req_prio)
1599 flags = MSGF_PRIORITY;
1600 netmsg_init(&msg.base, NULL, &curthread->td_msgport, flags,
1601 rtsearch_msghandler);
1602 msg.req = req;
1603 msg.rtinfo = rtinfo;
1604 msg.callback = callback;
1605 msg.arg = arg;
1606 msg.exact_match = exact_match;
1607 msg.found_cnt = 0;
1608 return (netisr_domsg_global(&msg.base));
1611 static void
1612 rtsearch_msghandler(netmsg_t msg)
1614 struct netmsg_rts *rmsg = (void *)msg;
1615 struct rt_addrinfo rtinfo;
1616 struct radix_node_head *rnh;
1617 struct rtentry *rt;
1618 int error;
1620 ASSERT_NETISR_NCPUS(mycpuid);
1623 * Copy the rtinfo. We need to make sure that the original
1624 * rtinfo, which is setup by the caller, in the netmsg will
1625 * _not_ be changed; else the next CPU on the netmsg forwarding
1626 * path will see a different rtinfo than what this CPU has seen.
1628 rtinfo = *rmsg->rtinfo;
1631 * Find the correct routing tree to use for this Address Family
1633 if ((rnh = rt_tables[mycpuid][rtinfo.rti_dst->sa_family]) == NULL) {
1634 if (mycpuid != 0)
1635 panic("partially initialized routing tables");
1636 netisr_replymsg(&rmsg->base, EAFNOSUPPORT);
1637 return;
1641 * Correct rtinfo for the host route searching.
1643 if (rtinfo.rti_flags & RTF_HOST) {
1644 rtinfo.rti_netmask = NULL;
1645 rtinfo.rti_flags &= ~(RTF_CLONING | RTF_PRCLONING);
1648 rt = (struct rtentry *)
1649 rnh->rnh_lookup((char *)rtinfo.rti_dst,
1650 (char *)rtinfo.rti_netmask, rnh);
1653 * If we are asked to do the "exact match", we need to make sure
1654 * that host route searching got a host route while a network
1655 * route searching got a network route.
1657 if (rt != NULL && rmsg->exact_match &&
1658 ((rt->rt_flags ^ rtinfo.rti_flags) & RTF_HOST))
1659 rt = NULL;
1661 if (rt == NULL) {
1663 * No matching routes have been found, don't count this
1664 * as a critical error (here, we set 'error' to 0), just
1665 * keep moving on, since at least prcloned routes are not
1666 * duplicated onto each CPU.
1668 error = 0;
1669 } else {
1670 rmsg->found_cnt++;
1672 rt->rt_refcnt++;
1673 error = rmsg->callback(rmsg->req, &rtinfo, rt, rmsg->arg,
1674 rmsg->found_cnt);
1675 rt->rt_refcnt--;
1677 if (error == EJUSTRETURN) {
1678 netisr_replymsg(&rmsg->base, 0);
1679 return;
1683 if (error) {
1684 KKASSERT(rmsg->found_cnt > 0);
1687 * Under following cases, unrecoverable error has
1688 * not occured:
1689 * o Request is RTM_GET
1690 * o The first time that we find the route, but the
1691 * modification fails.
1693 if (rmsg->req != RTM_GET && rmsg->found_cnt > 1) {
1694 panic("rtsearch_msghandler: unrecoverable error "
1695 "cpu %d", mycpuid);
1697 netisr_replymsg(&rmsg->base, error);
1698 } else {
1699 if (rmsg->found_cnt == 0) {
1700 /* The requested route has not been seen ... */
1701 error = ESRCH;
1703 netisr_forwardmsg_error(&rmsg->base, mycpuid + 1, error);
1708 rtmask_add_global(struct sockaddr *mask, boolean_t req_prio)
1710 struct netmsg_base msg;
1711 int flags = 0;
1713 if (req_prio)
1714 flags = MSGF_PRIORITY;
1715 netmsg_init(&msg, NULL, &curthread->td_msgport, flags,
1716 rtmask_add_msghandler);
1717 msg.lmsg.u.ms_resultp = mask;
1719 return (netisr_domsg_global(&msg));
1722 struct sockaddr *
1723 _rtmask_lookup(struct sockaddr *mask, boolean_t search)
1725 struct radix_node *n;
1727 #define clen(s) (*(u_char *)(s))
1728 n = rn_addmask((char *)mask, search, 1, rn_cpumaskhead(mycpuid));
1729 if (n != NULL &&
1730 mask->sa_len >= clen(n->rn_key) &&
1731 bcmp((char *)mask + 1,
1732 (char *)n->rn_key + 1, clen(n->rn_key) - 1) == 0) {
1733 return (struct sockaddr *)n->rn_key;
1734 } else {
1735 return NULL;
1737 #undef clen
1740 static void
1741 rtmask_add_msghandler(netmsg_t msg)
1743 struct sockaddr *mask = msg->lmsg.u.ms_resultp;
1745 ASSERT_NETISR_NCPUS(mycpuid);
1747 if (rtmask_lookup(mask) == NULL) {
1748 netisr_replymsg(&msg->base, ENOBUFS);
1749 return;
1751 netisr_forwardmsg(&msg->base, mycpuid + 1);
1754 /* This must be before ip6_init2(), which is now SI_ORDER_MIDDLE */
1755 SYSINIT(route, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, route_init, 0);
1757 struct rtchange_arg {
1758 struct ifaddr *old_ifa;
1759 struct ifaddr *new_ifa;
1760 struct rtentry *rt;
1761 int changed;
1764 static void
1765 rtchange_ifa(struct rtentry *rt, struct rtchange_arg *ap)
1767 if (rt->rt_ifa->ifa_rtrequest != NULL)
1768 rt->rt_ifa->ifa_rtrequest(RTM_DELETE, rt);
1769 IFAFREE(rt->rt_ifa);
1771 IFAREF(ap->new_ifa);
1772 rt->rt_ifa = ap->new_ifa;
1773 rt->rt_ifp = ap->new_ifa->ifa_ifp;
1774 if (rt->rt_ifa->ifa_rtrequest != NULL)
1775 rt->rt_ifa->ifa_rtrequest(RTM_ADD, rt);
1777 ap->changed = 1;
1780 static int
1781 rtchange_callback(struct radix_node *rn, void *xap)
1783 struct rtchange_arg *ap = xap;
1784 struct rtentry *rt = (struct rtentry *)rn;
1786 if (rt->rt_ifa == ap->old_ifa) {
1787 if (rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) {
1789 * We could saw the branch off when we are
1790 * still sitting on it, if the ifa_rtrequest
1791 * DEL/ADD are called directly from here.
1793 ap->rt = rt;
1794 return EJUSTRETURN;
1796 rtchange_ifa(rt, ap);
1798 return 0;
1801 struct netmsg_rtchange {
1802 struct netmsg_base base;
1803 struct ifaddr *old_ifa;
1804 struct ifaddr *new_ifa;
1805 int changed;
1808 static void
1809 rtchange_dispatch(netmsg_t msg)
1811 struct netmsg_rtchange *rmsg = (void *)msg;
1812 struct radix_node_head *rnh;
1813 struct rtchange_arg arg;
1814 int cpu;
1816 cpu = mycpuid;
1817 ASSERT_NETISR_NCPUS(cpu);
1819 memset(&arg, 0, sizeof(arg));
1820 arg.old_ifa = rmsg->old_ifa;
1821 arg.new_ifa = rmsg->new_ifa;
1823 rnh = rt_tables[cpu][AF_INET];
1824 for (;;) {
1825 int error;
1827 KKASSERT(arg.rt == NULL);
1828 error = rnh->rnh_walktree(rnh, rtchange_callback, &arg);
1829 if (arg.rt != NULL) {
1830 struct rtentry *rt;
1832 rt = arg.rt;
1833 arg.rt = NULL;
1834 rtchange_ifa(rt, &arg);
1835 } else {
1836 break;
1839 if (arg.changed)
1840 rmsg->changed = 1;
1842 netisr_forwardmsg(&rmsg->base, cpu + 1);
1846 rtchange(struct ifaddr *old_ifa, struct ifaddr *new_ifa)
1848 struct netmsg_rtchange msg;
1851 * XXX individual requests are not independantly chained,
1852 * which means that the per-cpu route tables will not be
1853 * consistent in the middle of the operation. If routes
1854 * related to the interface are manipulated while we are
1855 * doing this the inconsistancy could trigger a panic.
1857 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY,
1858 rtchange_dispatch);
1859 msg.old_ifa = old_ifa;
1860 msg.new_ifa = new_ifa;
1861 msg.changed = 0;
1862 netisr_domsg_global(&msg.base);
1864 if (msg.changed) {
1865 old_ifa->ifa_flags &= ~IFA_ROUTE;
1866 new_ifa->ifa_flags |= IFA_ROUTE;
1867 return 0;
1868 } else {
1869 return ENOENT;