route: Delete ortentry, SIOC{ADD,DEL}RT and RTM_OLD{ADD,DEL}
[dragonfly.git] / sys / net / route.c
blob5e058cfeb98568b679d9a6efa048bde0d67f5f05
1 /*
2 * Copyright (c) 2004, 2005 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey M. Hsu.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
34 * Copyright (c) 1980, 1986, 1991, 1993
35 * The Regents of the University of California. All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
61 * @(#)route.c 8.3 (Berkeley) 1/9/95
62 * $FreeBSD: src/sys/net/route.c,v 1.59.2.10 2003/01/17 08:04:00 ru Exp $
65 #include "opt_inet.h"
66 #include "opt_mpls.h"
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/malloc.h>
71 #include <sys/mbuf.h>
72 #include <sys/socket.h>
73 #include <sys/domain.h>
74 #include <sys/kernel.h>
75 #include <sys/sysctl.h>
76 #include <sys/globaldata.h>
77 #include <sys/thread.h>
79 #include <net/if.h>
80 #include <net/route.h>
81 #include <net/netisr.h>
83 #include <netinet/in.h>
84 #include <net/ip_mroute/ip_mroute.h>
86 #include <sys/thread2.h>
87 #include <sys/msgport2.h>
88 #include <net/netmsg2.h>
89 #include <net/netisr2.h>
91 #ifdef MPLS
92 #include <netproto/mpls/mpls.h>
93 #endif
95 static struct rtstatistics rtstatistics_percpu[MAXCPU] __cachealign;
96 #define rtstat rtstatistics_percpu[mycpuid]
98 struct radix_node_head *rt_tables[MAXCPU][AF_MAX+1];
100 static void rt_maskedcopy (struct sockaddr *, struct sockaddr *,
101 struct sockaddr *);
102 static void rtable_init(void);
103 static void rtinit_rtrequest_callback(int, int, struct rt_addrinfo *,
104 struct rtentry *, void *);
106 static void rtredirect_msghandler(netmsg_t msg);
107 static void rtrequest1_msghandler(netmsg_t msg);
108 static void rtsearch_msghandler(netmsg_t msg);
109 static void rtmask_add_msghandler(netmsg_t msg);
111 static int rt_setshims(struct rtentry *, struct sockaddr **);
113 SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RW, 0, "Routing");
115 #ifdef ROUTE_DEBUG
116 static int route_debug = 1;
117 SYSCTL_INT(_net_route, OID_AUTO, route_debug, CTLFLAG_RW,
118 &route_debug, 0, "");
119 #endif
121 int route_assert_owner_access = 1;
122 SYSCTL_INT(_net_route, OID_AUTO, assert_owner_access, CTLFLAG_RW,
123 &route_assert_owner_access, 0, "");
125 u_long route_kmalloc_limit = 0;
126 TUNABLE_ULONG("net.route.kmalloc_limit", &route_kmalloc_limit);
129 * Initialize the route table(s) for protocol domains and
130 * create a helper thread which will be responsible for updating
131 * route table entries on each cpu.
133 void
134 route_init(void)
136 int cpu;
138 for (cpu = 0; cpu < ncpus; ++cpu)
139 bzero(&rtstatistics_percpu[cpu], sizeof(struct rtstatistics));
140 rn_init(); /* initialize all zeroes, all ones, mask table */
141 rtable_init(); /* call dom_rtattach() on each cpu */
143 if (route_kmalloc_limit)
144 kmalloc_raise_limit(M_RTABLE, route_kmalloc_limit);
147 static void
148 rtable_init_oncpu(netmsg_t msg)
150 struct domain *dom;
151 int nextcpu = mycpuid + 1;
153 SLIST_FOREACH(dom, &domains, dom_next) {
154 if (dom->dom_rtattach) {
155 dom->dom_rtattach(
156 (void **)&rt_tables[mycpuid][dom->dom_family],
157 dom->dom_rtoffset);
160 if (nextcpu < ncpus)
161 lwkt_forwardmsg(netisr_cpuport(nextcpu), &msg->lmsg);
162 else
163 lwkt_replymsg(&msg->lmsg, 0);
166 static void
167 rtable_init(void)
169 struct netmsg_base msg;
171 netmsg_init(&msg, NULL, &curthread->td_msgport, 0, rtable_init_oncpu);
172 rt_domsg_global(&msg);
176 * Routing statistics.
178 static int
179 sysctl_rtstatistics(SYSCTL_HANDLER_ARGS)
181 int cpu, error = 0;
183 for (cpu = 0; cpu < ncpus; ++cpu) {
184 if ((error = SYSCTL_OUT(req, &rtstatistics_percpu[cpu],
185 sizeof(struct rtstatistics))))
186 break;
187 if ((error = SYSCTL_IN(req, &rtstatistics_percpu[cpu],
188 sizeof(struct rtstatistics))))
189 break;
192 return (error);
194 SYSCTL_PROC(_net_route, OID_AUTO, stats, (CTLTYPE_OPAQUE|CTLFLAG_RW),
195 0, 0, sysctl_rtstatistics, "S,rtstatistics", "Routing statistics");
198 * Packet routing routines.
202 * Look up and fill in the "ro_rt" rtentry field in a route structure given
203 * an address in the "ro_dst" field. Always send a report on a miss and
204 * always clone routes.
206 void
207 rtalloc(struct route *ro)
209 rtalloc_ign(ro, 0UL);
213 * Look up and fill in the "ro_rt" rtentry field in a route structure given
214 * an address in the "ro_dst" field. Always send a report on a miss and
215 * optionally clone routes when RTF_CLONING or RTF_PRCLONING are not being
216 * ignored.
218 void
219 rtalloc_ign(struct route *ro, u_long ignoreflags)
221 if (ro->ro_rt != NULL) {
222 if (ro->ro_rt->rt_ifp != NULL && ro->ro_rt->rt_flags & RTF_UP)
223 return;
224 rtfree(ro->ro_rt);
225 ro->ro_rt = NULL;
227 ro->ro_rt = _rtlookup(&ro->ro_dst, RTL_REPORTMSG, ignoreflags);
231 * Look up the route that matches the given "dst" address.
233 * Route lookup can have the side-effect of creating and returning
234 * a cloned route instead when "dst" matches a cloning route and the
235 * RTF_CLONING and RTF_PRCLONING flags are not being ignored.
237 * Any route returned has its reference count incremented.
239 struct rtentry *
240 _rtlookup(struct sockaddr *dst, boolean_t generate_report, u_long ignore)
242 struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family];
243 struct rtentry *rt;
245 if (rnh == NULL)
246 goto unreach;
249 * Look up route in the radix tree.
251 rt = (struct rtentry *) rnh->rnh_matchaddr((char *)dst, rnh);
252 if (rt == NULL)
253 goto unreach;
256 * Handle cloning routes.
258 if ((rt->rt_flags & ~ignore & (RTF_CLONING | RTF_PRCLONING)) != 0) {
259 struct rtentry *clonedroute;
260 int error;
262 clonedroute = rt; /* copy in/copy out parameter */
263 error = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0,
264 &clonedroute); /* clone the route */
265 if (error != 0) { /* cloning failed */
266 if (generate_report)
267 rt_dstmsg(RTM_MISS, dst, error);
268 rt->rt_refcnt++;
269 return (rt); /* return the uncloned route */
271 if (generate_report) {
272 if (clonedroute->rt_flags & RTF_XRESOLVE)
273 rt_dstmsg(RTM_RESOLVE, dst, 0);
274 else
275 rt_rtmsg(RTM_ADD, clonedroute,
276 clonedroute->rt_ifp, 0);
278 return (clonedroute); /* return cloned route */
282 * Increment the reference count of the matched route and return.
284 rt->rt_refcnt++;
285 return (rt);
287 unreach:
288 rtstat.rts_unreach++;
289 if (generate_report)
290 rt_dstmsg(RTM_MISS, dst, 0);
291 return (NULL);
294 void
295 rtfree(struct rtentry *rt)
297 if (rt->rt_cpuid == mycpuid)
298 rtfree_oncpu(rt);
299 else
300 rtfree_remote(rt);
303 void
304 rtfree_oncpu(struct rtentry *rt)
306 KKASSERT(rt->rt_cpuid == mycpuid);
307 KASSERT(rt->rt_refcnt > 0, ("rtfree: rt_refcnt %ld", rt->rt_refcnt));
309 --rt->rt_refcnt;
310 if (rt->rt_refcnt == 0) {
311 struct radix_node_head *rnh =
312 rt_tables[mycpuid][rt_key(rt)->sa_family];
314 if (rnh->rnh_close)
315 rnh->rnh_close((struct radix_node *)rt, rnh);
316 if (!(rt->rt_flags & RTF_UP)) {
317 /* deallocate route */
318 if (rt->rt_ifa != NULL)
319 IFAFREE(rt->rt_ifa);
320 if (rt->rt_parent != NULL)
321 RTFREE(rt->rt_parent); /* recursive call! */
322 Free(rt_key(rt));
323 Free(rt);
328 static void
329 rtfree_async_dispatch(netmsg_t msg)
331 struct rtentry *rt = msg->lmsg.u.ms_resultp;
333 rtfree_oncpu(rt);
334 netisr_replymsg(&msg->base, 0);
337 void
338 rtfree_async(struct rtentry *rt)
340 struct netmsg_base *msg;
342 if (IS_NETISR(curthread, rt->rt_cpuid)) {
343 rtfree_oncpu(rt);
344 return;
347 KASSERT(rt->rt_refcnt > 0,
348 ("rtfree_async: rt_refcnt %ld", rt->rt_refcnt));
350 msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_INTWAIT);
351 netmsg_init(msg, NULL, &netisr_afree_rport, 0, rtfree_async_dispatch);
352 msg->lmsg.u.ms_resultp = rt;
354 netisr_sendmsg(msg, rt->rt_cpuid);
357 void
358 rtfree_remote(struct rtentry *rt)
361 KKASSERT(rt->rt_cpuid != mycpuid);
363 if (route_assert_owner_access) {
364 panic("rt remote free rt_cpuid %d, mycpuid %d",
365 rt->rt_cpuid, mycpuid);
366 } else {
367 kprintf("rt remote free rt_cpuid %d, mycpuid %d\n",
368 rt->rt_cpuid, mycpuid);
369 print_backtrace(-1);
371 rtfree_async(rt);
375 rtredirect_oncpu(struct sockaddr *dst, struct sockaddr *gateway,
376 struct sockaddr *netmask, int flags, struct sockaddr *src)
378 struct rtentry *rt = NULL;
379 struct rt_addrinfo rtinfo;
380 struct ifaddr *ifa;
381 u_long *stat = NULL;
382 int error;
384 /* verify the gateway is directly reachable */
385 if ((ifa = ifa_ifwithnet(gateway)) == NULL) {
386 error = ENETUNREACH;
387 goto out;
391 * If the redirect isn't from our current router for this destination,
392 * it's either old or wrong.
394 if (!(flags & RTF_DONE) && /* XXX JH */
395 (rt = rtpurelookup(dst)) != NULL &&
396 (!sa_equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) {
397 error = EINVAL;
398 goto done;
402 * If it redirects us to ourselves, we have a routing loop,
403 * perhaps as a result of an interface going down recently.
405 if (ifa_ifwithaddr(gateway)) {
406 error = EHOSTUNREACH;
407 goto done;
411 * Create a new entry if the lookup failed or if we got back
412 * a wildcard entry for the default route. This is necessary
413 * for hosts which use routing redirects generated by smart
414 * gateways to dynamically build the routing tables.
416 if (rt == NULL)
417 goto create;
418 if ((rt_mask(rt) != NULL && rt_mask(rt)->sa_len < 2)) {
419 rtfree(rt);
420 goto create;
423 /* Ignore redirects for directly connected hosts. */
424 if (!(rt->rt_flags & RTF_GATEWAY)) {
425 error = EHOSTUNREACH;
426 goto done;
429 if (!(rt->rt_flags & RTF_HOST) && (flags & RTF_HOST)) {
431 * Changing from a network route to a host route.
432 * Create a new host route rather than smashing the
433 * network route.
435 create:
436 flags |= RTF_GATEWAY | RTF_DYNAMIC;
437 bzero(&rtinfo, sizeof(struct rt_addrinfo));
438 rtinfo.rti_info[RTAX_DST] = dst;
439 rtinfo.rti_info[RTAX_GATEWAY] = gateway;
440 rtinfo.rti_info[RTAX_NETMASK] = netmask;
441 rtinfo.rti_flags = flags;
442 rtinfo.rti_ifa = ifa;
443 rt = NULL; /* copy-in/copy-out parameter */
444 error = rtrequest1(RTM_ADD, &rtinfo, &rt);
445 if (rt != NULL)
446 flags = rt->rt_flags;
447 stat = &rtstat.rts_dynamic;
448 } else {
450 * Smash the current notion of the gateway to this destination.
451 * Should check about netmask!!!
453 rt->rt_flags |= RTF_MODIFIED;
454 flags |= RTF_MODIFIED;
456 /* We only need to report rtmsg on CPU0 */
457 rt_setgate(rt, rt_key(rt), gateway,
458 mycpuid == 0 ? RTL_REPORTMSG : RTL_DONTREPORT);
459 error = 0;
460 stat = &rtstat.rts_newgateway;
463 done:
464 if (rt != NULL)
465 rtfree(rt);
466 out:
467 if (error != 0)
468 rtstat.rts_badredirect++;
469 else if (stat != NULL)
470 (*stat)++;
472 return error;
475 struct netmsg_rtredirect {
476 struct netmsg_base base;
477 struct sockaddr *dst;
478 struct sockaddr *gateway;
479 struct sockaddr *netmask;
480 int flags;
481 struct sockaddr *src;
485 * Force a routing table entry to the specified
486 * destination to go through the given gateway.
487 * Normally called as a result of a routing redirect
488 * message from the network layer.
490 * N.B.: must be called at splnet
492 void
493 rtredirect(struct sockaddr *dst, struct sockaddr *gateway,
494 struct sockaddr *netmask, int flags, struct sockaddr *src)
496 struct rt_addrinfo rtinfo;
497 int error;
498 struct netmsg_rtredirect msg;
500 netmsg_init(&msg.base, NULL, &curthread->td_msgport,
501 0, rtredirect_msghandler);
502 msg.dst = dst;
503 msg.gateway = gateway;
504 msg.netmask = netmask;
505 msg.flags = flags;
506 msg.src = src;
507 error = rt_domsg_global(&msg.base);
508 bzero(&rtinfo, sizeof(struct rt_addrinfo));
509 rtinfo.rti_info[RTAX_DST] = dst;
510 rtinfo.rti_info[RTAX_GATEWAY] = gateway;
511 rtinfo.rti_info[RTAX_NETMASK] = netmask;
512 rtinfo.rti_info[RTAX_AUTHOR] = src;
513 rt_missmsg(RTM_REDIRECT, &rtinfo, flags, error);
516 static void
517 rtredirect_msghandler(netmsg_t msg)
519 struct netmsg_rtredirect *rmsg = (void *)msg;
520 int nextcpu;
522 rtredirect_oncpu(rmsg->dst, rmsg->gateway, rmsg->netmask,
523 rmsg->flags, rmsg->src);
524 nextcpu = mycpuid + 1;
525 if (nextcpu < ncpus)
526 lwkt_forwardmsg(netisr_cpuport(nextcpu), &msg->lmsg);
527 else
528 lwkt_replymsg(&msg->lmsg, 0);
532 * Routing table ioctl interface.
535 rtioctl(u_long req, caddr_t data, struct ucred *cred)
537 #ifdef INET
538 /* Multicast goop, grrr... */
539 return mrt_ioctl ? mrt_ioctl(req, data) : EOPNOTSUPP;
540 #else
541 return ENXIO;
542 #endif
545 struct ifaddr *
546 ifa_ifwithroute(int flags, struct sockaddr *dst, struct sockaddr *gateway)
548 struct ifaddr *ifa;
550 if (!(flags & RTF_GATEWAY)) {
552 * If we are adding a route to an interface,
553 * and the interface is a point-to-point link,
554 * we should search for the destination
555 * as our clue to the interface. Otherwise
556 * we can use the local address.
558 ifa = NULL;
559 if (flags & RTF_HOST) {
560 ifa = ifa_ifwithdstaddr(dst);
562 if (ifa == NULL)
563 ifa = ifa_ifwithaddr(gateway);
564 } else {
566 * If we are adding a route to a remote net
567 * or host, the gateway may still be on the
568 * other end of a pt to pt link.
570 ifa = ifa_ifwithdstaddr(gateway);
572 if (ifa == NULL)
573 ifa = ifa_ifwithnet(gateway);
574 if (ifa == NULL) {
575 struct rtentry *rt;
577 rt = rtpurelookup(gateway);
578 if (rt == NULL)
579 return (NULL);
580 rt->rt_refcnt--;
581 if ((ifa = rt->rt_ifa) == NULL)
582 return (NULL);
584 if (ifa->ifa_addr->sa_family != dst->sa_family) {
585 struct ifaddr *oldifa = ifa;
587 ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
588 if (ifa == NULL)
589 ifa = oldifa;
591 return (ifa);
594 static int rt_fixdelete (struct radix_node *, void *);
595 static int rt_fixchange (struct radix_node *, void *);
597 struct rtfc_arg {
598 struct rtentry *rt0;
599 struct radix_node_head *rnh;
603 * Set rtinfo->rti_ifa and rtinfo->rti_ifp.
606 rt_getifa(struct rt_addrinfo *rtinfo)
608 struct sockaddr *gateway = rtinfo->rti_info[RTAX_GATEWAY];
609 struct sockaddr *dst = rtinfo->rti_info[RTAX_DST];
610 struct sockaddr *ifaaddr = rtinfo->rti_info[RTAX_IFA];
611 int flags = rtinfo->rti_flags;
614 * ifp may be specified by sockaddr_dl
615 * when protocol address is ambiguous.
617 if (rtinfo->rti_ifp == NULL) {
618 struct sockaddr *ifpaddr;
620 ifpaddr = rtinfo->rti_info[RTAX_IFP];
621 if (ifpaddr != NULL && ifpaddr->sa_family == AF_LINK) {
622 struct ifaddr *ifa;
624 ifa = ifa_ifwithnet(ifpaddr);
625 if (ifa != NULL)
626 rtinfo->rti_ifp = ifa->ifa_ifp;
630 if (rtinfo->rti_ifa == NULL && ifaaddr != NULL)
631 rtinfo->rti_ifa = ifa_ifwithaddr(ifaaddr);
632 if (rtinfo->rti_ifa == NULL) {
633 struct sockaddr *sa;
635 sa = ifaaddr != NULL ? ifaaddr :
636 (gateway != NULL ? gateway : dst);
637 if (sa != NULL && rtinfo->rti_ifp != NULL)
638 rtinfo->rti_ifa = ifaof_ifpforaddr(sa, rtinfo->rti_ifp);
639 else if (dst != NULL && gateway != NULL)
640 rtinfo->rti_ifa = ifa_ifwithroute(flags, dst, gateway);
641 else if (sa != NULL)
642 rtinfo->rti_ifa = ifa_ifwithroute(flags, sa, sa);
644 if (rtinfo->rti_ifa == NULL)
645 return (ENETUNREACH);
647 if (rtinfo->rti_ifp == NULL)
648 rtinfo->rti_ifp = rtinfo->rti_ifa->ifa_ifp;
649 return (0);
653 * Do appropriate manipulations of a routing tree given
654 * all the bits of info needed
657 rtrequest(
658 int req,
659 struct sockaddr *dst,
660 struct sockaddr *gateway,
661 struct sockaddr *netmask,
662 int flags,
663 struct rtentry **ret_nrt)
665 struct rt_addrinfo rtinfo;
667 bzero(&rtinfo, sizeof(struct rt_addrinfo));
668 rtinfo.rti_info[RTAX_DST] = dst;
669 rtinfo.rti_info[RTAX_GATEWAY] = gateway;
670 rtinfo.rti_info[RTAX_NETMASK] = netmask;
671 rtinfo.rti_flags = flags;
672 return rtrequest1(req, &rtinfo, ret_nrt);
676 rtrequest_global(
677 int req,
678 struct sockaddr *dst,
679 struct sockaddr *gateway,
680 struct sockaddr *netmask,
681 int flags)
683 struct rt_addrinfo rtinfo;
685 bzero(&rtinfo, sizeof(struct rt_addrinfo));
686 rtinfo.rti_info[RTAX_DST] = dst;
687 rtinfo.rti_info[RTAX_GATEWAY] = gateway;
688 rtinfo.rti_info[RTAX_NETMASK] = netmask;
689 rtinfo.rti_flags = flags;
690 return rtrequest1_global(req, &rtinfo, NULL, NULL, RTREQ_PRIO_NORM);
693 struct netmsg_rtq {
694 struct netmsg_base base;
695 int req;
696 struct rt_addrinfo *rtinfo;
697 rtrequest1_callback_func_t callback;
698 void *arg;
702 rtrequest1_global(int req, struct rt_addrinfo *rtinfo,
703 rtrequest1_callback_func_t callback, void *arg, boolean_t req_prio)
705 int error, flags = 0;
706 struct netmsg_rtq msg;
708 if (req_prio)
709 flags = MSGF_PRIORITY;
710 netmsg_init(&msg.base, NULL, &curthread->td_msgport, flags,
711 rtrequest1_msghandler);
712 msg.base.lmsg.ms_error = -1;
713 msg.req = req;
714 msg.rtinfo = rtinfo;
715 msg.callback = callback;
716 msg.arg = arg;
717 error = rt_domsg_global(&msg.base);
718 return (error);
722 * Handle a route table request on the current cpu. Since the route table's
723 * are supposed to be identical on each cpu, an error occuring later in the
724 * message chain is considered system-fatal.
726 static void
727 rtrequest1_msghandler(netmsg_t msg)
729 struct netmsg_rtq *rmsg = (void *)msg;
730 struct rt_addrinfo rtinfo;
731 struct rtentry *rt = NULL;
732 int nextcpu;
733 int error;
736 * Copy the rtinfo. We need to make sure that the original
737 * rtinfo, which is setup by the caller, in the netmsg will
738 * _not_ be changed; else the next CPU on the netmsg forwarding
739 * path will see a different rtinfo than what this CPU has seen.
741 rtinfo = *rmsg->rtinfo;
743 error = rtrequest1(rmsg->req, &rtinfo, &rt);
744 if (rt)
745 --rt->rt_refcnt;
746 if (rmsg->callback)
747 rmsg->callback(rmsg->req, error, &rtinfo, rt, rmsg->arg);
750 * RTM_DELETE's are propogated even if an error occurs, since a
751 * cloned route might be undergoing deletion and cloned routes
752 * are not necessarily replicated. An overall error is returned
753 * only if no cpus have the route in question.
755 if (rmsg->base.lmsg.ms_error < 0 || error == 0)
756 rmsg->base.lmsg.ms_error = error;
758 nextcpu = mycpuid + 1;
759 if (error && rmsg->req != RTM_DELETE) {
760 if (mycpuid != 0) {
761 panic("rtrequest1_msghandler: rtrequest table req %d, "
762 "failed on cpu%d, error %d\n",
763 rmsg->req, mycpuid, error);
765 lwkt_replymsg(&rmsg->base.lmsg, error);
766 } else if (nextcpu < ncpus) {
767 lwkt_forwardmsg(netisr_cpuport(nextcpu), &rmsg->base.lmsg);
768 } else {
769 lwkt_replymsg(&rmsg->base.lmsg, rmsg->base.lmsg.ms_error);
774 rtrequest1(int req, struct rt_addrinfo *rtinfo, struct rtentry **ret_nrt)
776 struct sockaddr *dst = rtinfo->rti_info[RTAX_DST];
777 struct rtentry *rt;
778 struct radix_node *rn;
779 struct radix_node_head *rnh;
780 struct ifaddr *ifa;
781 struct sockaddr *ndst;
782 boolean_t reportmsg;
783 int error = 0;
785 #define gotoerr(x) { error = x ; goto bad; }
787 #ifdef ROUTE_DEBUG
788 if (route_debug)
789 rt_addrinfo_print(req, rtinfo);
790 #endif
792 crit_enter();
794 * Find the correct routing tree to use for this Address Family
796 if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL)
797 gotoerr(EAFNOSUPPORT);
800 * If we are adding a host route then we don't want to put
801 * a netmask in the tree, nor do we want to clone it.
803 if (rtinfo->rti_flags & RTF_HOST) {
804 rtinfo->rti_info[RTAX_NETMASK] = NULL;
805 rtinfo->rti_flags &= ~(RTF_CLONING | RTF_PRCLONING);
808 switch (req) {
809 case RTM_DELETE:
810 /* Remove the item from the tree. */
811 rn = rnh->rnh_deladdr((char *)rtinfo->rti_info[RTAX_DST],
812 (char *)rtinfo->rti_info[RTAX_NETMASK],
813 rnh);
814 if (rn == NULL)
815 gotoerr(ESRCH);
816 KASSERT(!(rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)),
817 ("rnh_deladdr returned flags 0x%x", rn->rn_flags));
818 rt = (struct rtentry *)rn;
820 /* ref to prevent a deletion race */
821 ++rt->rt_refcnt;
823 /* Free any routes cloned from this one. */
824 if ((rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) &&
825 rt_mask(rt) != NULL) {
826 rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
827 (char *)rt_mask(rt),
828 rt_fixdelete, rt);
831 if (rt->rt_gwroute != NULL) {
832 RTFREE(rt->rt_gwroute);
833 rt->rt_gwroute = NULL;
837 * NB: RTF_UP must be set during the search above,
838 * because we might delete the last ref, causing
839 * rt to get freed prematurely.
841 rt->rt_flags &= ~RTF_UP;
843 #ifdef ROUTE_DEBUG
844 if (route_debug)
845 rt_print(rtinfo, rt);
846 #endif
848 /* Give the protocol a chance to keep things in sync. */
849 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
850 ifa->ifa_rtrequest(RTM_DELETE, rt);
853 * If the caller wants it, then it can have it,
854 * but it's up to it to free the rtentry as we won't be
855 * doing it.
857 KASSERT(rt->rt_refcnt >= 0,
858 ("rtrequest1(DELETE): refcnt %ld", rt->rt_refcnt));
859 if (ret_nrt != NULL) {
860 /* leave ref intact for return */
861 *ret_nrt = rt;
862 } else {
863 /* deref / attempt to destroy */
864 rtfree(rt);
866 break;
868 case RTM_RESOLVE:
869 if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
870 gotoerr(EINVAL);
872 KASSERT(rt->rt_cpuid == mycpuid,
873 ("rt resolve rt_cpuid %d, mycpuid %d",
874 rt->rt_cpuid, mycpuid));
876 ifa = rt->rt_ifa;
877 rtinfo->rti_flags =
878 rt->rt_flags & ~(RTF_CLONING | RTF_PRCLONING | RTF_STATIC);
879 rtinfo->rti_flags |= RTF_WASCLONED;
880 rtinfo->rti_info[RTAX_GATEWAY] = rt->rt_gateway;
881 if ((rtinfo->rti_info[RTAX_NETMASK] = rt->rt_genmask) == NULL)
882 rtinfo->rti_flags |= RTF_HOST;
883 rtinfo->rti_info[RTAX_MPLS1] = rt->rt_shim[0];
884 rtinfo->rti_info[RTAX_MPLS2] = rt->rt_shim[1];
885 rtinfo->rti_info[RTAX_MPLS3] = rt->rt_shim[2];
886 goto makeroute;
888 case RTM_ADD:
889 KASSERT(!(rtinfo->rti_flags & RTF_GATEWAY) ||
890 rtinfo->rti_info[RTAX_GATEWAY] != NULL,
891 ("rtrequest: GATEWAY but no gateway"));
893 if (rtinfo->rti_ifa == NULL && (error = rt_getifa(rtinfo)))
894 gotoerr(error);
895 ifa = rtinfo->rti_ifa;
896 makeroute:
897 R_Malloc(rt, struct rtentry *, sizeof(struct rtentry));
898 if (rt == NULL) {
899 if (req == RTM_ADD) {
900 kprintf("rtrequest1: alloc rtentry failed on "
901 "cpu%d\n", mycpuid);
903 gotoerr(ENOBUFS);
905 bzero(rt, sizeof(struct rtentry));
906 rt->rt_flags = RTF_UP | rtinfo->rti_flags;
907 rt->rt_cpuid = mycpuid;
909 if (mycpuid != 0 && req == RTM_ADD) {
910 /* For RTM_ADD, we have already sent rtmsg on CPU0. */
911 reportmsg = RTL_DONTREPORT;
912 } else {
914 * For RTM_ADD, we only send rtmsg on CPU0.
915 * For RTM_RESOLVE, we always send rtmsg. XXX
917 reportmsg = RTL_REPORTMSG;
919 error = rt_setgate(rt, dst, rtinfo->rti_info[RTAX_GATEWAY],
920 reportmsg);
921 if (error != 0) {
922 Free(rt);
923 gotoerr(error);
926 ndst = rt_key(rt);
927 if (rtinfo->rti_info[RTAX_NETMASK] != NULL)
928 rt_maskedcopy(dst, ndst,
929 rtinfo->rti_info[RTAX_NETMASK]);
930 else
931 bcopy(dst, ndst, dst->sa_len);
933 if (rtinfo->rti_info[RTAX_MPLS1] != NULL)
934 rt_setshims(rt, rtinfo->rti_info);
937 * Note that we now have a reference to the ifa.
938 * This moved from below so that rnh->rnh_addaddr() can
939 * examine the ifa and ifa->ifa_ifp if it so desires.
941 IFAREF(ifa);
942 rt->rt_ifa = ifa;
943 rt->rt_ifp = ifa->ifa_ifp;
944 /* XXX mtu manipulation will be done in rnh_addaddr -- itojun */
946 rn = rnh->rnh_addaddr((char *)ndst,
947 (char *)rtinfo->rti_info[RTAX_NETMASK],
948 rnh, rt->rt_nodes);
949 if (rn == NULL) {
950 struct rtentry *oldrt;
953 * We already have one of these in the tree.
954 * We do a special hack: if the old route was
955 * cloned, then we blow it away and try
956 * re-inserting the new one.
958 oldrt = rtpurelookup(ndst);
959 if (oldrt != NULL) {
960 --oldrt->rt_refcnt;
961 if (oldrt->rt_flags & RTF_WASCLONED) {
962 rtrequest(RTM_DELETE, rt_key(oldrt),
963 oldrt->rt_gateway,
964 rt_mask(oldrt),
965 oldrt->rt_flags, NULL);
966 rn = rnh->rnh_addaddr((char *)ndst,
967 (char *)
968 rtinfo->rti_info[RTAX_NETMASK],
969 rnh, rt->rt_nodes);
973 /* NOTE: rt_ifa may have been changed */
974 ifa = rt->rt_ifa;
977 * If it still failed to go into the tree,
978 * then un-make it (this should be a function).
980 if (rn == NULL) {
981 if (rt->rt_gwroute != NULL)
982 rtfree(rt->rt_gwroute);
983 IFAFREE(ifa);
984 Free(rt_key(rt));
985 Free(rt);
986 gotoerr(EEXIST);
990 * If we got here from RESOLVE, then we are cloning
991 * so clone the rest, and note that we
992 * are a clone (and increment the parent's references)
994 if (req == RTM_RESOLVE) {
995 rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */
996 rt->rt_rmx.rmx_pksent = 0; /* reset packet counter */
997 if ((*ret_nrt)->rt_flags &
998 (RTF_CLONING | RTF_PRCLONING)) {
999 rt->rt_parent = *ret_nrt;
1000 (*ret_nrt)->rt_refcnt++;
1005 * if this protocol has something to add to this then
1006 * allow it to do that as well.
1008 if (ifa->ifa_rtrequest != NULL)
1009 ifa->ifa_rtrequest(req, rt);
1012 * We repeat the same procedure from rt_setgate() here because
1013 * it doesn't fire when we call it there because the node
1014 * hasn't been added to the tree yet.
1016 if (req == RTM_ADD && !(rt->rt_flags & RTF_HOST) &&
1017 rt_mask(rt) != NULL) {
1018 struct rtfc_arg arg = { rt, rnh };
1020 rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
1021 (char *)rt_mask(rt),
1022 rt_fixchange, &arg);
1025 #ifdef ROUTE_DEBUG
1026 if (route_debug)
1027 rt_print(rtinfo, rt);
1028 #endif
1030 * Return the resulting rtentry,
1031 * increasing the number of references by one.
1033 if (ret_nrt != NULL) {
1034 rt->rt_refcnt++;
1035 *ret_nrt = rt;
1037 break;
1038 default:
1039 error = EOPNOTSUPP;
1041 bad:
1042 #ifdef ROUTE_DEBUG
1043 if (route_debug) {
1044 if (error)
1045 kprintf("rti %p failed error %d\n", rtinfo, error);
1046 else
1047 kprintf("rti %p succeeded\n", rtinfo);
1049 #endif
1050 crit_exit();
1051 return (error);
1055 * Called from rtrequest(RTM_DELETE, ...) to fix up the route's ``family''
1056 * (i.e., the routes related to it by the operation of cloning). This
1057 * routine is iterated over all potential former-child-routes by way of
1058 * rnh->rnh_walktree_from() above, and those that actually are children of
1059 * the late parent (passed in as VP here) are themselves deleted.
1061 static int
1062 rt_fixdelete(struct radix_node *rn, void *vp)
1064 struct rtentry *rt = (struct rtentry *)rn;
1065 struct rtentry *rt0 = vp;
1067 if (rt->rt_parent == rt0 &&
1068 !(rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
1069 return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1070 rt->rt_flags, NULL);
1072 return 0;
1076 * This routine is called from rt_setgate() to do the analogous thing for
1077 * adds and changes. There is the added complication in this case of a
1078 * middle insert; i.e., insertion of a new network route between an older
1079 * network route and (cloned) host routes. For this reason, a simple check
1080 * of rt->rt_parent is insufficient; each candidate route must be tested
1081 * against the (mask, value) of the new route (passed as before in vp)
1082 * to see if the new route matches it.
1084 * XXX - it may be possible to do fixdelete() for changes and reserve this
1085 * routine just for adds. I'm not sure why I thought it was necessary to do
1086 * changes this way.
1088 #ifdef DEBUG
1089 static int rtfcdebug = 0;
1090 #endif
1092 static int
1093 rt_fixchange(struct radix_node *rn, void *vp)
1095 struct rtentry *rt = (struct rtentry *)rn;
1096 struct rtfc_arg *ap = vp;
1097 struct rtentry *rt0 = ap->rt0;
1098 struct radix_node_head *rnh = ap->rnh;
1099 u_char *xk1, *xm1, *xk2, *xmp;
1100 int i, len, mlen;
1102 #ifdef DEBUG
1103 if (rtfcdebug)
1104 kprintf("rt_fixchange: rt %p, rt0 %p\n", rt, rt0);
1105 #endif
1107 if (rt->rt_parent == NULL ||
1108 (rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
1109 #ifdef DEBUG
1110 if (rtfcdebug) kprintf("no parent, pinned or cloning\n");
1111 #endif
1112 return 0;
1115 if (rt->rt_parent == rt0) {
1116 #ifdef DEBUG
1117 if (rtfcdebug) kprintf("parent match\n");
1118 #endif
1119 return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1120 rt->rt_flags, NULL);
1124 * There probably is a function somewhere which does this...
1125 * if not, there should be.
1127 len = imin(rt_key(rt0)->sa_len, rt_key(rt)->sa_len);
1129 xk1 = (u_char *)rt_key(rt0);
1130 xm1 = (u_char *)rt_mask(rt0);
1131 xk2 = (u_char *)rt_key(rt);
1133 /* avoid applying a less specific route */
1134 xmp = (u_char *)rt_mask(rt->rt_parent);
1135 mlen = rt_key(rt->rt_parent)->sa_len;
1136 if (mlen > rt_key(rt0)->sa_len) {
1137 #ifdef DEBUG
1138 if (rtfcdebug)
1139 kprintf("rt_fixchange: inserting a less "
1140 "specific route\n");
1141 #endif
1142 return 0;
1144 for (i = rnh->rnh_treetop->rn_offset; i < mlen; i++) {
1145 if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i]) {
1146 #ifdef DEBUG
1147 if (rtfcdebug)
1148 kprintf("rt_fixchange: inserting a less "
1149 "specific route\n");
1150 #endif
1151 return 0;
1155 for (i = rnh->rnh_treetop->rn_offset; i < len; i++) {
1156 if ((xk2[i] & xm1[i]) != xk1[i]) {
1157 #ifdef DEBUG
1158 if (rtfcdebug) kprintf("no match\n");
1159 #endif
1160 return 0;
1165 * OK, this node is a clone, and matches the node currently being
1166 * changed/added under the node's mask. So, get rid of it.
1168 #ifdef DEBUG
1169 if (rtfcdebug) kprintf("deleting\n");
1170 #endif
1171 return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1172 rt->rt_flags, NULL);
1176 rt_setgate(struct rtentry *rt0, struct sockaddr *dst, struct sockaddr *gate,
1177 boolean_t generate_report)
1179 char *space, *oldspace;
1180 int dlen = RT_ROUNDUP(dst->sa_len), glen = RT_ROUNDUP(gate->sa_len);
1181 struct rtentry *rt = rt0;
1182 struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family];
1185 * A host route with the destination equal to the gateway
1186 * will interfere with keeping LLINFO in the routing
1187 * table, so disallow it.
1189 if (((rt0->rt_flags & (RTF_HOST | RTF_GATEWAY | RTF_LLINFO)) ==
1190 (RTF_HOST | RTF_GATEWAY)) &&
1191 dst->sa_len == gate->sa_len &&
1192 sa_equal(dst, gate)) {
1194 * The route might already exist if this is an RTM_CHANGE
1195 * or a routing redirect, so try to delete it.
1197 if (rt_key(rt0) != NULL)
1198 rtrequest(RTM_DELETE, rt_key(rt0), rt0->rt_gateway,
1199 rt_mask(rt0), rt0->rt_flags, NULL);
1200 return EADDRNOTAVAIL;
1204 * Both dst and gateway are stored in the same malloc'ed chunk
1205 * (If I ever get my hands on....)
1206 * if we need to malloc a new chunk, then keep the old one around
1207 * till we don't need it any more.
1209 if (rt->rt_gateway == NULL ||
1210 glen > RT_ROUNDUP(rt->rt_gateway->sa_len)) {
1211 oldspace = (char *)rt_key(rt);
1212 R_Malloc(space, char *, dlen + glen);
1213 if (space == NULL)
1214 return ENOBUFS;
1215 rt->rt_nodes->rn_key = space;
1216 } else {
1217 space = (char *)rt_key(rt); /* Just use the old space. */
1218 oldspace = NULL;
1221 /* Set the gateway value. */
1222 rt->rt_gateway = (struct sockaddr *)(space + dlen);
1223 bcopy(gate, rt->rt_gateway, glen);
1225 if (oldspace != NULL) {
1227 * If we allocated a new chunk, preserve the original dst.
1228 * This way, rt_setgate() really just sets the gate
1229 * and leaves the dst field alone.
1231 bcopy(dst, space, dlen);
1232 Free(oldspace);
1236 * If there is already a gwroute, it's now almost definitely wrong
1237 * so drop it.
1239 if (rt->rt_gwroute != NULL) {
1240 RTFREE(rt->rt_gwroute);
1241 rt->rt_gwroute = NULL;
1243 if (rt->rt_flags & RTF_GATEWAY) {
1245 * Cloning loop avoidance: In the presence of
1246 * protocol-cloning and bad configuration, it is
1247 * possible to get stuck in bottomless mutual recursion
1248 * (rtrequest rt_setgate rtlookup). We avoid this
1249 * by not allowing protocol-cloning to operate for
1250 * gateways (which is probably the correct choice
1251 * anyway), and avoid the resulting reference loops
1252 * by disallowing any route to run through itself as
1253 * a gateway. This is obviously mandatory when we
1254 * get rt->rt_output().
1256 * This breaks TTCP for hosts outside the gateway! XXX JH
1258 rt->rt_gwroute = _rtlookup(gate, generate_report,
1259 RTF_PRCLONING);
1260 if (rt->rt_gwroute == rt) {
1261 rt->rt_gwroute = NULL;
1262 --rt->rt_refcnt;
1263 return EDQUOT; /* failure */
1268 * This isn't going to do anything useful for host routes, so
1269 * don't bother. Also make sure we have a reasonable mask
1270 * (we don't yet have one during adds).
1272 if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL) {
1273 struct rtfc_arg arg = { rt, rnh };
1275 rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
1276 (char *)rt_mask(rt),
1277 rt_fixchange, &arg);
1280 return 0;
1283 static void
1284 rt_maskedcopy(
1285 struct sockaddr *src,
1286 struct sockaddr *dst,
1287 struct sockaddr *netmask)
1289 u_char *cp1 = (u_char *)src;
1290 u_char *cp2 = (u_char *)dst;
1291 u_char *cp3 = (u_char *)netmask;
1292 u_char *cplim = cp2 + *cp3;
1293 u_char *cplim2 = cp2 + *cp1;
1295 *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
1296 cp3 += 2;
1297 if (cplim > cplim2)
1298 cplim = cplim2;
1299 while (cp2 < cplim)
1300 *cp2++ = *cp1++ & *cp3++;
1301 if (cp2 < cplim2)
1302 bzero(cp2, cplim2 - cp2);
1306 rt_llroute(struct sockaddr *dst, struct rtentry *rt0, struct rtentry **drt)
1308 struct rtentry *up_rt, *rt;
1310 if (!(rt0->rt_flags & RTF_UP)) {
1311 up_rt = rtlookup(dst);
1312 if (up_rt == NULL)
1313 return (EHOSTUNREACH);
1314 up_rt->rt_refcnt--;
1315 } else
1316 up_rt = rt0;
1317 if (up_rt->rt_flags & RTF_GATEWAY) {
1318 if (up_rt->rt_gwroute == NULL) {
1319 up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway);
1320 if (up_rt->rt_gwroute == NULL)
1321 return (EHOSTUNREACH);
1322 } else if (!(up_rt->rt_gwroute->rt_flags & RTF_UP)) {
1323 rtfree(up_rt->rt_gwroute);
1324 up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway);
1325 if (up_rt->rt_gwroute == NULL)
1326 return (EHOSTUNREACH);
1328 rt = up_rt->rt_gwroute;
1329 } else
1330 rt = up_rt;
1331 if (rt->rt_flags & RTF_REJECT &&
1332 (rt->rt_rmx.rmx_expire == 0 || /* rt doesn't expire */
1333 time_uptime < rt->rt_rmx.rmx_expire)) /* rt not expired */
1334 return (rt->rt_flags & RTF_HOST ? EHOSTDOWN : EHOSTUNREACH);
1335 *drt = rt;
1336 return 0;
1339 static int
1340 rt_setshims(struct rtentry *rt, struct sockaddr **rt_shim){
1341 int i;
1343 for (i=0; i<3; i++) {
1344 struct sockaddr *shim = rt_shim[RTAX_MPLS1 + i];
1345 int shimlen;
1347 if (shim == NULL)
1348 break;
1350 shimlen = RT_ROUNDUP(shim->sa_len);
1351 R_Malloc(rt->rt_shim[i], struct sockaddr *, shimlen);
1352 bcopy(shim, rt->rt_shim[i], shimlen);
1355 return 0;
1358 #ifdef ROUTE_DEBUG
1361 * Print out a route table entry
1363 void
1364 rt_print(struct rt_addrinfo *rtinfo, struct rtentry *rn)
1366 kprintf("rti %p cpu %d route %p flags %08lx: ",
1367 rtinfo, mycpuid, rn, rn->rt_flags);
1368 sockaddr_print(rt_key(rn));
1369 kprintf(" mask ");
1370 sockaddr_print(rt_mask(rn));
1371 kprintf(" gw ");
1372 sockaddr_print(rn->rt_gateway);
1373 kprintf(" ifc \"%s\"", rn->rt_ifp ? rn->rt_ifp->if_dname : "?");
1374 kprintf(" ifa %p\n", rn->rt_ifa);
1377 void
1378 rt_addrinfo_print(int cmd, struct rt_addrinfo *rti)
1380 int didit = 0;
1381 int i;
1383 #ifdef ROUTE_DEBUG
1384 if (cmd == RTM_DELETE && route_debug > 1)
1385 print_backtrace(-1);
1386 #endif
1388 switch(cmd) {
1389 case RTM_ADD:
1390 kprintf("ADD ");
1391 break;
1392 case RTM_RESOLVE:
1393 kprintf("RES ");
1394 break;
1395 case RTM_DELETE:
1396 kprintf("DEL ");
1397 break;
1398 default:
1399 kprintf("C%02d ", cmd);
1400 break;
1402 kprintf("rti %p cpu %d ", rti, mycpuid);
1403 for (i = 0; i < rti->rti_addrs; ++i) {
1404 if (rti->rti_info[i] == NULL)
1405 continue;
1406 if (didit)
1407 kprintf(" ,");
1408 switch(i) {
1409 case RTAX_DST:
1410 kprintf("(DST ");
1411 break;
1412 case RTAX_GATEWAY:
1413 kprintf("(GWY ");
1414 break;
1415 case RTAX_NETMASK:
1416 kprintf("(MSK ");
1417 break;
1418 case RTAX_GENMASK:
1419 kprintf("(GEN ");
1420 break;
1421 case RTAX_IFP:
1422 kprintf("(IFP ");
1423 break;
1424 case RTAX_IFA:
1425 kprintf("(IFA ");
1426 break;
1427 case RTAX_AUTHOR:
1428 kprintf("(AUT ");
1429 break;
1430 case RTAX_BRD:
1431 kprintf("(BRD ");
1432 break;
1433 default:
1434 kprintf("(?%02d ", i);
1435 break;
1437 sockaddr_print(rti->rti_info[i]);
1438 kprintf(")");
1439 didit = 1;
1441 kprintf("\n");
1444 void
1445 sockaddr_print(struct sockaddr *sa)
1447 struct sockaddr_in *sa4;
1448 struct sockaddr_in6 *sa6;
1449 int len;
1450 int i;
1452 if (sa == NULL) {
1453 kprintf("NULL");
1454 return;
1457 len = sa->sa_len - offsetof(struct sockaddr, sa_data[0]);
1459 switch(sa->sa_family) {
1460 case AF_INET:
1461 case AF_INET6:
1462 default:
1463 switch(sa->sa_family) {
1464 case AF_INET:
1465 sa4 = (struct sockaddr_in *)sa;
1466 kprintf("INET %d %d.%d.%d.%d",
1467 ntohs(sa4->sin_port),
1468 (ntohl(sa4->sin_addr.s_addr) >> 24) & 255,
1469 (ntohl(sa4->sin_addr.s_addr) >> 16) & 255,
1470 (ntohl(sa4->sin_addr.s_addr) >> 8) & 255,
1471 (ntohl(sa4->sin_addr.s_addr) >> 0) & 255
1473 break;
1474 case AF_INET6:
1475 sa6 = (struct sockaddr_in6 *)sa;
1476 kprintf("INET6 %d %04x:%04x%04x:%04x:%04x:%04x:%04x:%04x",
1477 ntohs(sa6->sin6_port),
1478 sa6->sin6_addr.s6_addr16[0],
1479 sa6->sin6_addr.s6_addr16[1],
1480 sa6->sin6_addr.s6_addr16[2],
1481 sa6->sin6_addr.s6_addr16[3],
1482 sa6->sin6_addr.s6_addr16[4],
1483 sa6->sin6_addr.s6_addr16[5],
1484 sa6->sin6_addr.s6_addr16[6],
1485 sa6->sin6_addr.s6_addr16[7]
1487 break;
1488 default:
1489 kprintf("AF%d ", sa->sa_family);
1490 while (len > 0 && sa->sa_data[len-1] == 0)
1491 --len;
1493 for (i = 0; i < len; ++i) {
1494 if (i)
1495 kprintf(".");
1496 kprintf("%d", (unsigned char)sa->sa_data[i]);
1498 break;
1503 #endif
1506 * Set up a routing table entry, normally for an interface.
1509 rtinit(struct ifaddr *ifa, int cmd, int flags)
1511 struct sockaddr *dst, *deldst, *netmask;
1512 struct mbuf *m = NULL;
1513 struct radix_node_head *rnh;
1514 struct radix_node *rn;
1515 struct rt_addrinfo rtinfo;
1516 int error;
1518 if (flags & RTF_HOST) {
1519 dst = ifa->ifa_dstaddr;
1520 netmask = NULL;
1521 } else {
1522 dst = ifa->ifa_addr;
1523 netmask = ifa->ifa_netmask;
1526 * If it's a delete, check that if it exists, it's on the correct
1527 * interface or we might scrub a route to another ifa which would
1528 * be confusing at best and possibly worse.
1530 if (cmd == RTM_DELETE) {
1532 * It's a delete, so it should already exist..
1533 * If it's a net, mask off the host bits
1534 * (Assuming we have a mask)
1536 if (netmask != NULL) {
1537 m = m_get(M_NOWAIT, MT_SONAME);
1538 if (m == NULL)
1539 return (ENOBUFS);
1540 mbuftrackid(m, 34);
1541 deldst = mtod(m, struct sockaddr *);
1542 rt_maskedcopy(dst, deldst, netmask);
1543 dst = deldst;
1546 * Look up an rtentry that is in the routing tree and
1547 * contains the correct info.
1549 if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL ||
1550 (rn = rnh->rnh_lookup((char *)dst,
1551 (char *)netmask, rnh)) == NULL ||
1552 ((struct rtentry *)rn)->rt_ifa != ifa ||
1553 !sa_equal((struct sockaddr *)rn->rn_key, dst)) {
1554 if (m != NULL)
1555 m_free(m);
1556 return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
1558 /* XXX */
1559 #if 0
1560 else {
1562 * One would think that as we are deleting, and we know
1563 * it doesn't exist, we could just return at this point
1564 * with an "ELSE" clause, but apparently not..
1566 return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
1568 #endif
1571 * Do the actual request
1573 bzero(&rtinfo, sizeof(struct rt_addrinfo));
1574 rtinfo.rti_info[RTAX_DST] = dst;
1575 rtinfo.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1576 rtinfo.rti_info[RTAX_NETMASK] = netmask;
1577 rtinfo.rti_flags = flags | ifa->ifa_flags;
1578 rtinfo.rti_ifa = ifa;
1579 error = rtrequest1_global(cmd, &rtinfo, rtinit_rtrequest_callback, ifa,
1580 RTREQ_PRIO_HIGH);
1581 if (m != NULL)
1582 m_free(m);
1583 return (error);
1586 static void
1587 rtinit_rtrequest_callback(int cmd, int error,
1588 struct rt_addrinfo *rtinfo, struct rtentry *rt,
1589 void *arg)
1591 struct ifaddr *ifa = arg;
1593 if (error == 0 && rt) {
1594 if (mycpuid == 0) {
1595 ++rt->rt_refcnt;
1596 rt_newaddrmsg(cmd, ifa, error, rt);
1597 --rt->rt_refcnt;
1599 if (cmd == RTM_DELETE) {
1600 if (rt->rt_refcnt == 0) {
1601 ++rt->rt_refcnt;
1602 rtfree(rt);
1608 struct netmsg_rts {
1609 struct netmsg_base base;
1610 int req;
1611 struct rt_addrinfo *rtinfo;
1612 rtsearch_callback_func_t callback;
1613 void *arg;
1614 boolean_t exact_match;
1615 int found_cnt;
1619 rtsearch_global(int req, struct rt_addrinfo *rtinfo,
1620 rtsearch_callback_func_t callback, void *arg, boolean_t exact_match,
1621 boolean_t req_prio)
1623 struct netmsg_rts msg;
1624 int flags = 0;
1626 if (req_prio)
1627 flags = MSGF_PRIORITY;
1628 netmsg_init(&msg.base, NULL, &curthread->td_msgport, flags,
1629 rtsearch_msghandler);
1630 msg.req = req;
1631 msg.rtinfo = rtinfo;
1632 msg.callback = callback;
1633 msg.arg = arg;
1634 msg.exact_match = exact_match;
1635 msg.found_cnt = 0;
1636 return rt_domsg_global(&msg.base);
1639 static void
1640 rtsearch_msghandler(netmsg_t msg)
1642 struct netmsg_rts *rmsg = (void *)msg;
1643 struct rt_addrinfo rtinfo;
1644 struct radix_node_head *rnh;
1645 struct rtentry *rt;
1646 int nextcpu, error;
1649 * Copy the rtinfo. We need to make sure that the original
1650 * rtinfo, which is setup by the caller, in the netmsg will
1651 * _not_ be changed; else the next CPU on the netmsg forwarding
1652 * path will see a different rtinfo than what this CPU has seen.
1654 rtinfo = *rmsg->rtinfo;
1657 * Find the correct routing tree to use for this Address Family
1659 if ((rnh = rt_tables[mycpuid][rtinfo.rti_dst->sa_family]) == NULL) {
1660 if (mycpuid != 0)
1661 panic("partially initialized routing tables");
1662 lwkt_replymsg(&rmsg->base.lmsg, EAFNOSUPPORT);
1663 return;
1667 * Correct rtinfo for the host route searching.
1669 if (rtinfo.rti_flags & RTF_HOST) {
1670 rtinfo.rti_netmask = NULL;
1671 rtinfo.rti_flags &= ~(RTF_CLONING | RTF_PRCLONING);
1674 rt = (struct rtentry *)
1675 rnh->rnh_lookup((char *)rtinfo.rti_dst,
1676 (char *)rtinfo.rti_netmask, rnh);
1679 * If we are asked to do the "exact match", we need to make sure
1680 * that host route searching got a host route while a network
1681 * route searching got a network route.
1683 if (rt != NULL && rmsg->exact_match &&
1684 ((rt->rt_flags ^ rtinfo.rti_flags) & RTF_HOST))
1685 rt = NULL;
1687 if (rt == NULL) {
1689 * No matching routes have been found, don't count this
1690 * as a critical error (here, we set 'error' to 0), just
1691 * keep moving on, since at least prcloned routes are not
1692 * duplicated onto each CPU.
1694 error = 0;
1695 } else {
1696 rmsg->found_cnt++;
1698 rt->rt_refcnt++;
1699 error = rmsg->callback(rmsg->req, &rtinfo, rt, rmsg->arg,
1700 rmsg->found_cnt);
1701 rt->rt_refcnt--;
1703 if (error == EJUSTRETURN) {
1704 lwkt_replymsg(&rmsg->base.lmsg, 0);
1705 return;
1709 nextcpu = mycpuid + 1;
1710 if (error) {
1711 KKASSERT(rmsg->found_cnt > 0);
1714 * Under following cases, unrecoverable error has
1715 * not occured:
1716 * o Request is RTM_GET
1717 * o The first time that we find the route, but the
1718 * modification fails.
1720 if (rmsg->req != RTM_GET && rmsg->found_cnt > 1) {
1721 panic("rtsearch_msghandler: unrecoverable error "
1722 "cpu %d", mycpuid);
1724 lwkt_replymsg(&rmsg->base.lmsg, error);
1725 } else if (nextcpu < ncpus) {
1726 lwkt_forwardmsg(netisr_cpuport(nextcpu), &rmsg->base.lmsg);
1727 } else {
1728 if (rmsg->found_cnt == 0) {
1729 /* The requested route was never seen ... */
1730 error = ESRCH;
1732 lwkt_replymsg(&rmsg->base.lmsg, error);
1737 rtmask_add_global(struct sockaddr *mask, boolean_t req_prio)
1739 struct netmsg_base msg;
1740 int flags = 0;
1742 if (req_prio)
1743 flags = MSGF_PRIORITY;
1744 netmsg_init(&msg, NULL, &curthread->td_msgport, flags,
1745 rtmask_add_msghandler);
1746 msg.lmsg.u.ms_resultp = mask;
1748 return rt_domsg_global(&msg);
1751 struct sockaddr *
1752 _rtmask_lookup(struct sockaddr *mask, boolean_t search)
1754 struct radix_node *n;
1756 #define clen(s) (*(u_char *)(s))
1757 n = rn_addmask((char *)mask, search, 1, rn_cpumaskhead(mycpuid));
1758 if (n != NULL &&
1759 mask->sa_len >= clen(n->rn_key) &&
1760 bcmp((char *)mask + 1,
1761 (char *)n->rn_key + 1, clen(n->rn_key) - 1) == 0) {
1762 return (struct sockaddr *)n->rn_key;
1763 } else {
1764 return NULL;
1766 #undef clen
1769 static void
1770 rtmask_add_msghandler(netmsg_t msg)
1772 struct lwkt_msg *lmsg = &msg->lmsg;
1773 struct sockaddr *mask = lmsg->u.ms_resultp;
1774 int error = 0, nextcpu;
1776 if (rtmask_lookup(mask) == NULL)
1777 error = ENOBUFS;
1779 nextcpu = mycpuid + 1;
1780 if (!error && nextcpu < ncpus)
1781 lwkt_forwardmsg(netisr_cpuport(nextcpu), lmsg);
1782 else
1783 lwkt_replymsg(lmsg, error);
1786 /* This must be before ip6_init2(), which is now SI_ORDER_MIDDLE */
1787 SYSINIT(route, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, route_init, 0);
1789 struct rtchange_arg {
1790 struct ifaddr *old_ifa;
1791 struct ifaddr *new_ifa;
1792 struct rtentry *rt;
1793 int changed;
1796 static void
1797 rtchange_ifa(struct rtentry *rt, struct rtchange_arg *ap)
1799 if (rt->rt_ifa->ifa_rtrequest != NULL)
1800 rt->rt_ifa->ifa_rtrequest(RTM_DELETE, rt);
1801 IFAFREE(rt->rt_ifa);
1803 IFAREF(ap->new_ifa);
1804 rt->rt_ifa = ap->new_ifa;
1805 rt->rt_ifp = ap->new_ifa->ifa_ifp;
1806 if (rt->rt_ifa->ifa_rtrequest != NULL)
1807 rt->rt_ifa->ifa_rtrequest(RTM_ADD, rt);
1809 ap->changed = 1;
1812 static int
1813 rtchange_callback(struct radix_node *rn, void *xap)
1815 struct rtchange_arg *ap = xap;
1816 struct rtentry *rt = (struct rtentry *)rn;
1818 if (rt->rt_ifa == ap->old_ifa) {
1819 if (rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) {
1821 * We could saw the branch off when we are
1822 * still sitting on it, if the ifa_rtrequest
1823 * DEL/ADD are called directly from here.
1825 ap->rt = rt;
1826 return EJUSTRETURN;
1828 rtchange_ifa(rt, ap);
1830 return 0;
1833 struct netmsg_rtchange {
1834 struct netmsg_base base;
1835 struct ifaddr *old_ifa;
1836 struct ifaddr *new_ifa;
1837 int changed;
1840 static void
1841 rtchange_dispatch(netmsg_t msg)
1843 struct netmsg_rtchange *rmsg = (void *)msg;
1844 struct radix_node_head *rnh;
1845 struct rtchange_arg arg;
1846 int nextcpu, cpu;
1848 cpu = mycpuid;
1850 memset(&arg, 0, sizeof(arg));
1851 arg.old_ifa = rmsg->old_ifa;
1852 arg.new_ifa = rmsg->new_ifa;
1854 rnh = rt_tables[cpu][AF_INET];
1855 for (;;) {
1856 int error;
1858 KKASSERT(arg.rt == NULL);
1859 error = rnh->rnh_walktree(rnh, rtchange_callback, &arg);
1860 if (arg.rt != NULL) {
1861 struct rtentry *rt;
1863 rt = arg.rt;
1864 arg.rt = NULL;
1865 rtchange_ifa(rt, &arg);
1866 } else {
1867 break;
1870 if (arg.changed)
1871 rmsg->changed = 1;
1873 nextcpu = cpu + 1;
1874 if (nextcpu < ncpus)
1875 lwkt_forwardmsg(netisr_cpuport(nextcpu), &rmsg->base.lmsg);
1876 else
1877 lwkt_replymsg(&rmsg->base.lmsg, 0);
1881 rtchange(struct ifaddr *old_ifa, struct ifaddr *new_ifa)
1883 struct netmsg_rtchange msg;
1886 * XXX individual requests are not independantly chained,
1887 * which means that the per-cpu route tables will not be
1888 * consistent in the middle of the operation. If routes
1889 * related to the interface are manipulated while we are
1890 * doing this the inconsistancy could trigger a panic.
1892 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY,
1893 rtchange_dispatch);
1894 msg.old_ifa = old_ifa;
1895 msg.new_ifa = new_ifa;
1896 msg.changed = 0;
1897 rt_domsg_global(&msg.base);
1899 if (msg.changed) {
1900 old_ifa->ifa_flags &= ~IFA_ROUTE;
1901 new_ifa->ifa_flags |= IFA_ROUTE;
1902 return 0;
1903 } else {
1904 return ENOENT;
1909 rt_domsg_global(struct netmsg_base *nmsg)
1911 ASSERT_CANDOMSG_NETISR0(curthread);
1912 return lwkt_domsg(netisr_cpuport(0), &nmsg->lmsg, 0);