drm: Sync drm_buffer.c with Linux 3.10
[dragonfly.git] / sys / net / route.c
blob06bb4562b1c35b734902cc953a8903749feefc8b
1 /*
2 * Copyright (c) 2004, 2005 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey M. Hsu.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
34 * Copyright (c) 1980, 1986, 1991, 1993
35 * The Regents of the University of California. All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
61 * @(#)route.c 8.3 (Berkeley) 1/9/95
62 * $FreeBSD: src/sys/net/route.c,v 1.59.2.10 2003/01/17 08:04:00 ru Exp $
65 #include "opt_inet.h"
66 #include "opt_mpls.h"
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/malloc.h>
71 #include <sys/mbuf.h>
72 #include <sys/socket.h>
73 #include <sys/domain.h>
74 #include <sys/kernel.h>
75 #include <sys/sysctl.h>
76 #include <sys/globaldata.h>
77 #include <sys/thread.h>
79 #include <net/if.h>
80 #include <net/route.h>
81 #include <net/netisr.h>
83 #include <netinet/in.h>
84 #include <net/ip_mroute/ip_mroute.h>
86 #include <sys/thread2.h>
87 #include <sys/msgport2.h>
88 #include <net/netmsg2.h>
89 #include <net/netisr2.h>
91 #ifdef MPLS
92 #include <netproto/mpls/mpls.h>
93 #endif
95 static struct rtstatistics rtstatistics_percpu[MAXCPU];
96 #define rtstat rtstatistics_percpu[mycpuid]
98 struct radix_node_head *rt_tables[MAXCPU][AF_MAX+1];
100 static void rt_maskedcopy (struct sockaddr *, struct sockaddr *,
101 struct sockaddr *);
102 static void rtable_init(void);
103 static void rtinit_rtrequest_callback(int, int, struct rt_addrinfo *,
104 struct rtentry *, void *);
106 static void rtredirect_msghandler(netmsg_t msg);
107 static void rtrequest1_msghandler(netmsg_t msg);
108 static void rtsearch_msghandler(netmsg_t msg);
109 static void rtmask_add_msghandler(netmsg_t msg);
111 static int rt_setshims(struct rtentry *, struct sockaddr **);
113 SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RW, 0, "Routing");
115 #ifdef ROUTE_DEBUG
116 static int route_debug = 1;
117 SYSCTL_INT(_net_route, OID_AUTO, route_debug, CTLFLAG_RW,
118 &route_debug, 0, "");
119 #endif
121 int route_assert_owner_access = 1;
122 SYSCTL_INT(_net_route, OID_AUTO, assert_owner_access, CTLFLAG_RW,
123 &route_assert_owner_access, 0, "");
125 u_long route_kmalloc_limit = 0;
126 TUNABLE_ULONG("net.route.kmalloc_limit", &route_kmalloc_limit);
129 * Initialize the route table(s) for protocol domains and
130 * create a helper thread which will be responsible for updating
131 * route table entries on each cpu.
133 void
134 route_init(void)
136 int cpu;
138 for (cpu = 0; cpu < ncpus; ++cpu)
139 bzero(&rtstatistics_percpu[cpu], sizeof(struct rtstatistics));
140 rn_init(); /* initialize all zeroes, all ones, mask table */
141 rtable_init(); /* call dom_rtattach() on each cpu */
143 if (route_kmalloc_limit)
144 kmalloc_raise_limit(M_RTABLE, route_kmalloc_limit);
147 static void
148 rtable_init_oncpu(netmsg_t msg)
150 struct domain *dom;
151 int cpu = mycpuid;
153 SLIST_FOREACH(dom, &domains, dom_next) {
154 if (dom->dom_rtattach) {
155 dom->dom_rtattach(
156 (void **)&rt_tables[cpu][dom->dom_family],
157 dom->dom_rtoffset);
160 ifnet_forwardmsg(&msg->lmsg, cpu + 1);
163 static void
164 rtable_init(void)
166 struct netmsg_base msg;
168 netmsg_init(&msg, NULL, &curthread->td_msgport, 0, rtable_init_oncpu);
169 ifnet_domsg(&msg.lmsg, 0);
173 * Routing statistics.
175 static int
176 sysctl_rtstatistics(SYSCTL_HANDLER_ARGS)
178 int cpu, error = 0;
180 for (cpu = 0; cpu < ncpus; ++cpu) {
181 if ((error = SYSCTL_OUT(req, &rtstatistics_percpu[cpu],
182 sizeof(struct rtstatistics))))
183 break;
184 if ((error = SYSCTL_IN(req, &rtstatistics_percpu[cpu],
185 sizeof(struct rtstatistics))))
186 break;
189 return (error);
191 SYSCTL_PROC(_net_route, OID_AUTO, stats, (CTLTYPE_OPAQUE|CTLFLAG_RW),
192 0, 0, sysctl_rtstatistics, "S,rtstatistics", "Routing statistics");
195 * Packet routing routines.
199 * Look up and fill in the "ro_rt" rtentry field in a route structure given
200 * an address in the "ro_dst" field. Always send a report on a miss and
201 * always clone routes.
203 void
204 rtalloc(struct route *ro)
206 rtalloc_ign(ro, 0UL);
210 * Look up and fill in the "ro_rt" rtentry field in a route structure given
211 * an address in the "ro_dst" field. Always send a report on a miss and
212 * optionally clone routes when RTF_CLONING or RTF_PRCLONING are not being
213 * ignored.
215 void
216 rtalloc_ign(struct route *ro, u_long ignoreflags)
218 if (ro->ro_rt != NULL) {
219 if (ro->ro_rt->rt_ifp != NULL && ro->ro_rt->rt_flags & RTF_UP)
220 return;
221 rtfree(ro->ro_rt);
222 ro->ro_rt = NULL;
224 ro->ro_rt = _rtlookup(&ro->ro_dst, RTL_REPORTMSG, ignoreflags);
228 * Look up the route that matches the given "dst" address.
230 * Route lookup can have the side-effect of creating and returning
231 * a cloned route instead when "dst" matches a cloning route and the
232 * RTF_CLONING and RTF_PRCLONING flags are not being ignored.
234 * Any route returned has its reference count incremented.
236 struct rtentry *
237 _rtlookup(struct sockaddr *dst, boolean_t generate_report, u_long ignore)
239 struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family];
240 struct rtentry *rt;
242 if (rnh == NULL)
243 goto unreach;
246 * Look up route in the radix tree.
248 rt = (struct rtentry *) rnh->rnh_matchaddr((char *)dst, rnh);
249 if (rt == NULL)
250 goto unreach;
253 * Handle cloning routes.
255 if ((rt->rt_flags & ~ignore & (RTF_CLONING | RTF_PRCLONING)) != 0) {
256 struct rtentry *clonedroute;
257 int error;
259 clonedroute = rt; /* copy in/copy out parameter */
260 error = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0,
261 &clonedroute); /* clone the route */
262 if (error != 0) { /* cloning failed */
263 if (generate_report)
264 rt_dstmsg(RTM_MISS, dst, error);
265 rt->rt_refcnt++;
266 return (rt); /* return the uncloned route */
268 if (generate_report) {
269 if (clonedroute->rt_flags & RTF_XRESOLVE)
270 rt_dstmsg(RTM_RESOLVE, dst, 0);
271 else
272 rt_rtmsg(RTM_ADD, clonedroute,
273 clonedroute->rt_ifp, 0);
275 return (clonedroute); /* return cloned route */
279 * Increment the reference count of the matched route and return.
281 rt->rt_refcnt++;
282 return (rt);
284 unreach:
285 rtstat.rts_unreach++;
286 if (generate_report)
287 rt_dstmsg(RTM_MISS, dst, 0);
288 return (NULL);
291 void
292 rtfree(struct rtentry *rt)
294 if (rt->rt_cpuid == mycpuid)
295 rtfree_oncpu(rt);
296 else
297 rtfree_remote(rt);
300 void
301 rtfree_oncpu(struct rtentry *rt)
303 KKASSERT(rt->rt_cpuid == mycpuid);
304 KASSERT(rt->rt_refcnt > 0, ("rtfree: rt_refcnt %ld", rt->rt_refcnt));
306 --rt->rt_refcnt;
307 if (rt->rt_refcnt == 0) {
308 struct radix_node_head *rnh =
309 rt_tables[mycpuid][rt_key(rt)->sa_family];
311 if (rnh->rnh_close)
312 rnh->rnh_close((struct radix_node *)rt, rnh);
313 if (!(rt->rt_flags & RTF_UP)) {
314 /* deallocate route */
315 if (rt->rt_ifa != NULL)
316 IFAFREE(rt->rt_ifa);
317 if (rt->rt_parent != NULL)
318 RTFREE(rt->rt_parent); /* recursive call! */
319 Free(rt_key(rt));
320 Free(rt);
325 static void
326 rtfree_remote_dispatch(netmsg_t msg)
328 struct lwkt_msg *lmsg = &msg->lmsg;
329 struct rtentry *rt = lmsg->u.ms_resultp;
331 rtfree_oncpu(rt);
332 lwkt_replymsg(lmsg, 0);
335 void
336 rtfree_remote(struct rtentry *rt)
338 struct netmsg_base *msg;
339 struct lwkt_msg *lmsg;
341 KKASSERT(rt->rt_cpuid != mycpuid);
343 if (route_assert_owner_access) {
344 panic("rt remote free rt_cpuid %d, mycpuid %d",
345 rt->rt_cpuid, mycpuid);
346 } else {
347 kprintf("rt remote free rt_cpuid %d, mycpuid %d\n",
348 rt->rt_cpuid, mycpuid);
349 print_backtrace(-1);
352 msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_INTWAIT);
353 netmsg_init(msg, NULL, &netisr_afree_rport, 0, rtfree_remote_dispatch);
354 lmsg = &msg->lmsg;
355 lmsg->u.ms_resultp = rt;
357 lwkt_sendmsg(netisr_cpuport(rt->rt_cpuid), lmsg);
361 rtredirect_oncpu(struct sockaddr *dst, struct sockaddr *gateway,
362 struct sockaddr *netmask, int flags, struct sockaddr *src)
364 struct rtentry *rt = NULL;
365 struct rt_addrinfo rtinfo;
366 struct ifaddr *ifa;
367 u_long *stat = NULL;
368 int error;
370 /* verify the gateway is directly reachable */
371 if ((ifa = ifa_ifwithnet(gateway)) == NULL) {
372 error = ENETUNREACH;
373 goto out;
377 * If the redirect isn't from our current router for this destination,
378 * it's either old or wrong.
380 if (!(flags & RTF_DONE) && /* XXX JH */
381 (rt = rtpurelookup(dst)) != NULL &&
382 (!sa_equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) {
383 error = EINVAL;
384 goto done;
388 * If it redirects us to ourselves, we have a routing loop,
389 * perhaps as a result of an interface going down recently.
391 if (ifa_ifwithaddr(gateway)) {
392 error = EHOSTUNREACH;
393 goto done;
397 * Create a new entry if the lookup failed or if we got back
398 * a wildcard entry for the default route. This is necessary
399 * for hosts which use routing redirects generated by smart
400 * gateways to dynamically build the routing tables.
402 if (rt == NULL)
403 goto create;
404 if ((rt_mask(rt) != NULL && rt_mask(rt)->sa_len < 2)) {
405 rtfree(rt);
406 goto create;
409 /* Ignore redirects for directly connected hosts. */
410 if (!(rt->rt_flags & RTF_GATEWAY)) {
411 error = EHOSTUNREACH;
412 goto done;
415 if (!(rt->rt_flags & RTF_HOST) && (flags & RTF_HOST)) {
417 * Changing from a network route to a host route.
418 * Create a new host route rather than smashing the
419 * network route.
421 create:
422 flags |= RTF_GATEWAY | RTF_DYNAMIC;
423 bzero(&rtinfo, sizeof(struct rt_addrinfo));
424 rtinfo.rti_info[RTAX_DST] = dst;
425 rtinfo.rti_info[RTAX_GATEWAY] = gateway;
426 rtinfo.rti_info[RTAX_NETMASK] = netmask;
427 rtinfo.rti_flags = flags;
428 rtinfo.rti_ifa = ifa;
429 rt = NULL; /* copy-in/copy-out parameter */
430 error = rtrequest1(RTM_ADD, &rtinfo, &rt);
431 if (rt != NULL)
432 flags = rt->rt_flags;
433 stat = &rtstat.rts_dynamic;
434 } else {
436 * Smash the current notion of the gateway to this destination.
437 * Should check about netmask!!!
439 rt->rt_flags |= RTF_MODIFIED;
440 flags |= RTF_MODIFIED;
442 /* We only need to report rtmsg on CPU0 */
443 rt_setgate(rt, rt_key(rt), gateway,
444 mycpuid == 0 ? RTL_REPORTMSG : RTL_DONTREPORT);
445 error = 0;
446 stat = &rtstat.rts_newgateway;
449 done:
450 if (rt != NULL)
451 rtfree(rt);
452 out:
453 if (error != 0)
454 rtstat.rts_badredirect++;
455 else if (stat != NULL)
456 (*stat)++;
458 return error;
461 struct netmsg_rtredirect {
462 struct netmsg_base base;
463 struct sockaddr *dst;
464 struct sockaddr *gateway;
465 struct sockaddr *netmask;
466 int flags;
467 struct sockaddr *src;
471 * Force a routing table entry to the specified
472 * destination to go through the given gateway.
473 * Normally called as a result of a routing redirect
474 * message from the network layer.
476 * N.B.: must be called at splnet
478 void
479 rtredirect(struct sockaddr *dst, struct sockaddr *gateway,
480 struct sockaddr *netmask, int flags, struct sockaddr *src)
482 struct rt_addrinfo rtinfo;
483 int error;
484 struct netmsg_rtredirect msg;
486 netmsg_init(&msg.base, NULL, &curthread->td_msgport,
487 0, rtredirect_msghandler);
488 msg.dst = dst;
489 msg.gateway = gateway;
490 msg.netmask = netmask;
491 msg.flags = flags;
492 msg.src = src;
493 error = rt_domsg_global(&msg.base);
494 bzero(&rtinfo, sizeof(struct rt_addrinfo));
495 rtinfo.rti_info[RTAX_DST] = dst;
496 rtinfo.rti_info[RTAX_GATEWAY] = gateway;
497 rtinfo.rti_info[RTAX_NETMASK] = netmask;
498 rtinfo.rti_info[RTAX_AUTHOR] = src;
499 rt_missmsg(RTM_REDIRECT, &rtinfo, flags, error);
502 static void
503 rtredirect_msghandler(netmsg_t msg)
505 struct netmsg_rtredirect *rmsg = (void *)msg;
506 int nextcpu;
508 rtredirect_oncpu(rmsg->dst, rmsg->gateway, rmsg->netmask,
509 rmsg->flags, rmsg->src);
510 nextcpu = mycpuid + 1;
511 if (nextcpu < ncpus)
512 lwkt_forwardmsg(netisr_cpuport(nextcpu), &msg->lmsg);
513 else
514 lwkt_replymsg(&msg->lmsg, 0);
518 * Routing table ioctl interface.
521 rtioctl(u_long req, caddr_t data, struct ucred *cred)
523 #ifdef INET
524 /* Multicast goop, grrr... */
525 return mrt_ioctl ? mrt_ioctl(req, data) : EOPNOTSUPP;
526 #else
527 return ENXIO;
528 #endif
531 struct ifaddr *
532 ifa_ifwithroute(int flags, struct sockaddr *dst, struct sockaddr *gateway)
534 struct ifaddr *ifa;
536 if (!(flags & RTF_GATEWAY)) {
538 * If we are adding a route to an interface,
539 * and the interface is a point-to-point link,
540 * we should search for the destination
541 * as our clue to the interface. Otherwise
542 * we can use the local address.
544 ifa = NULL;
545 if (flags & RTF_HOST) {
546 ifa = ifa_ifwithdstaddr(dst);
548 if (ifa == NULL)
549 ifa = ifa_ifwithaddr(gateway);
550 } else {
552 * If we are adding a route to a remote net
553 * or host, the gateway may still be on the
554 * other end of a pt to pt link.
556 ifa = ifa_ifwithdstaddr(gateway);
558 if (ifa == NULL)
559 ifa = ifa_ifwithnet(gateway);
560 if (ifa == NULL) {
561 struct rtentry *rt;
563 rt = rtpurelookup(gateway);
564 if (rt == NULL)
565 return (NULL);
566 rt->rt_refcnt--;
567 if ((ifa = rt->rt_ifa) == NULL)
568 return (NULL);
570 if (ifa->ifa_addr->sa_family != dst->sa_family) {
571 struct ifaddr *oldifa = ifa;
573 ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
574 if (ifa == NULL)
575 ifa = oldifa;
577 return (ifa);
580 static int rt_fixdelete (struct radix_node *, void *);
581 static int rt_fixchange (struct radix_node *, void *);
583 struct rtfc_arg {
584 struct rtentry *rt0;
585 struct radix_node_head *rnh;
589 * Set rtinfo->rti_ifa and rtinfo->rti_ifp.
592 rt_getifa(struct rt_addrinfo *rtinfo)
594 struct sockaddr *gateway = rtinfo->rti_info[RTAX_GATEWAY];
595 struct sockaddr *dst = rtinfo->rti_info[RTAX_DST];
596 struct sockaddr *ifaaddr = rtinfo->rti_info[RTAX_IFA];
597 int flags = rtinfo->rti_flags;
600 * ifp may be specified by sockaddr_dl
601 * when protocol address is ambiguous.
603 if (rtinfo->rti_ifp == NULL) {
604 struct sockaddr *ifpaddr;
606 ifpaddr = rtinfo->rti_info[RTAX_IFP];
607 if (ifpaddr != NULL && ifpaddr->sa_family == AF_LINK) {
608 struct ifaddr *ifa;
610 ifa = ifa_ifwithnet(ifpaddr);
611 if (ifa != NULL)
612 rtinfo->rti_ifp = ifa->ifa_ifp;
616 if (rtinfo->rti_ifa == NULL && ifaaddr != NULL)
617 rtinfo->rti_ifa = ifa_ifwithaddr(ifaaddr);
618 if (rtinfo->rti_ifa == NULL) {
619 struct sockaddr *sa;
621 sa = ifaaddr != NULL ? ifaaddr :
622 (gateway != NULL ? gateway : dst);
623 if (sa != NULL && rtinfo->rti_ifp != NULL)
624 rtinfo->rti_ifa = ifaof_ifpforaddr(sa, rtinfo->rti_ifp);
625 else if (dst != NULL && gateway != NULL)
626 rtinfo->rti_ifa = ifa_ifwithroute(flags, dst, gateway);
627 else if (sa != NULL)
628 rtinfo->rti_ifa = ifa_ifwithroute(flags, sa, sa);
630 if (rtinfo->rti_ifa == NULL)
631 return (ENETUNREACH);
633 if (rtinfo->rti_ifp == NULL)
634 rtinfo->rti_ifp = rtinfo->rti_ifa->ifa_ifp;
635 return (0);
639 * Do appropriate manipulations of a routing tree given
640 * all the bits of info needed
643 rtrequest(
644 int req,
645 struct sockaddr *dst,
646 struct sockaddr *gateway,
647 struct sockaddr *netmask,
648 int flags,
649 struct rtentry **ret_nrt)
651 struct rt_addrinfo rtinfo;
653 bzero(&rtinfo, sizeof(struct rt_addrinfo));
654 rtinfo.rti_info[RTAX_DST] = dst;
655 rtinfo.rti_info[RTAX_GATEWAY] = gateway;
656 rtinfo.rti_info[RTAX_NETMASK] = netmask;
657 rtinfo.rti_flags = flags;
658 return rtrequest1(req, &rtinfo, ret_nrt);
662 rtrequest_global(
663 int req,
664 struct sockaddr *dst,
665 struct sockaddr *gateway,
666 struct sockaddr *netmask,
667 int flags)
669 struct rt_addrinfo rtinfo;
671 bzero(&rtinfo, sizeof(struct rt_addrinfo));
672 rtinfo.rti_info[RTAX_DST] = dst;
673 rtinfo.rti_info[RTAX_GATEWAY] = gateway;
674 rtinfo.rti_info[RTAX_NETMASK] = netmask;
675 rtinfo.rti_flags = flags;
676 return rtrequest1_global(req, &rtinfo, NULL, NULL, RTREQ_PRIO_NORM);
679 struct netmsg_rtq {
680 struct netmsg_base base;
681 int req;
682 struct rt_addrinfo *rtinfo;
683 rtrequest1_callback_func_t callback;
684 void *arg;
688 rtrequest1_global(int req, struct rt_addrinfo *rtinfo,
689 rtrequest1_callback_func_t callback, void *arg, boolean_t req_prio)
691 int error, flags = 0;
692 struct netmsg_rtq msg;
694 if (req_prio)
695 flags = MSGF_PRIORITY;
696 netmsg_init(&msg.base, NULL, &curthread->td_msgport, flags,
697 rtrequest1_msghandler);
698 msg.base.lmsg.ms_error = -1;
699 msg.req = req;
700 msg.rtinfo = rtinfo;
701 msg.callback = callback;
702 msg.arg = arg;
703 error = rt_domsg_global(&msg.base);
704 return (error);
708 * Handle a route table request on the current cpu. Since the route table's
709 * are supposed to be identical on each cpu, an error occuring later in the
710 * message chain is considered system-fatal.
712 static void
713 rtrequest1_msghandler(netmsg_t msg)
715 struct netmsg_rtq *rmsg = (void *)msg;
716 struct rt_addrinfo rtinfo;
717 struct rtentry *rt = NULL;
718 int nextcpu;
719 int error;
722 * Copy the rtinfo. We need to make sure that the original
723 * rtinfo, which is setup by the caller, in the netmsg will
724 * _not_ be changed; else the next CPU on the netmsg forwarding
725 * path will see a different rtinfo than what this CPU has seen.
727 rtinfo = *rmsg->rtinfo;
729 error = rtrequest1(rmsg->req, &rtinfo, &rt);
730 if (rt)
731 --rt->rt_refcnt;
732 if (rmsg->callback)
733 rmsg->callback(rmsg->req, error, &rtinfo, rt, rmsg->arg);
736 * RTM_DELETE's are propogated even if an error occurs, since a
737 * cloned route might be undergoing deletion and cloned routes
738 * are not necessarily replicated. An overall error is returned
739 * only if no cpus have the route in question.
741 if (rmsg->base.lmsg.ms_error < 0 || error == 0)
742 rmsg->base.lmsg.ms_error = error;
744 nextcpu = mycpuid + 1;
745 if (error && rmsg->req != RTM_DELETE) {
746 if (mycpuid != 0) {
747 panic("rtrequest1_msghandler: rtrequest table "
748 "error was cpu%d, err %d\n", mycpuid, error);
750 lwkt_replymsg(&rmsg->base.lmsg, error);
751 } else if (nextcpu < ncpus) {
752 lwkt_forwardmsg(netisr_cpuport(nextcpu), &rmsg->base.lmsg);
753 } else {
754 lwkt_replymsg(&rmsg->base.lmsg, rmsg->base.lmsg.ms_error);
759 rtrequest1(int req, struct rt_addrinfo *rtinfo, struct rtentry **ret_nrt)
761 struct sockaddr *dst = rtinfo->rti_info[RTAX_DST];
762 struct rtentry *rt;
763 struct radix_node *rn;
764 struct radix_node_head *rnh;
765 struct ifaddr *ifa;
766 struct sockaddr *ndst;
767 boolean_t reportmsg;
768 int error = 0;
770 #define gotoerr(x) { error = x ; goto bad; }
772 #ifdef ROUTE_DEBUG
773 if (route_debug)
774 rt_addrinfo_print(req, rtinfo);
775 #endif
777 crit_enter();
779 * Find the correct routing tree to use for this Address Family
781 if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL)
782 gotoerr(EAFNOSUPPORT);
785 * If we are adding a host route then we don't want to put
786 * a netmask in the tree, nor do we want to clone it.
788 if (rtinfo->rti_flags & RTF_HOST) {
789 rtinfo->rti_info[RTAX_NETMASK] = NULL;
790 rtinfo->rti_flags &= ~(RTF_CLONING | RTF_PRCLONING);
793 switch (req) {
794 case RTM_DELETE:
795 /* Remove the item from the tree. */
796 rn = rnh->rnh_deladdr((char *)rtinfo->rti_info[RTAX_DST],
797 (char *)rtinfo->rti_info[RTAX_NETMASK],
798 rnh);
799 if (rn == NULL)
800 gotoerr(ESRCH);
801 KASSERT(!(rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)),
802 ("rnh_deladdr returned flags 0x%x", rn->rn_flags));
803 rt = (struct rtentry *)rn;
805 /* ref to prevent a deletion race */
806 ++rt->rt_refcnt;
808 /* Free any routes cloned from this one. */
809 if ((rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) &&
810 rt_mask(rt) != NULL) {
811 rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
812 (char *)rt_mask(rt),
813 rt_fixdelete, rt);
816 if (rt->rt_gwroute != NULL) {
817 RTFREE(rt->rt_gwroute);
818 rt->rt_gwroute = NULL;
822 * NB: RTF_UP must be set during the search above,
823 * because we might delete the last ref, causing
824 * rt to get freed prematurely.
826 rt->rt_flags &= ~RTF_UP;
828 #ifdef ROUTE_DEBUG
829 if (route_debug)
830 rt_print(rtinfo, rt);
831 #endif
833 /* Give the protocol a chance to keep things in sync. */
834 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
835 ifa->ifa_rtrequest(RTM_DELETE, rt);
838 * If the caller wants it, then it can have it,
839 * but it's up to it to free the rtentry as we won't be
840 * doing it.
842 KASSERT(rt->rt_refcnt >= 0,
843 ("rtrequest1(DELETE): refcnt %ld", rt->rt_refcnt));
844 if (ret_nrt != NULL) {
845 /* leave ref intact for return */
846 *ret_nrt = rt;
847 } else {
848 /* deref / attempt to destroy */
849 rtfree(rt);
851 break;
853 case RTM_RESOLVE:
854 if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
855 gotoerr(EINVAL);
857 KASSERT(rt->rt_cpuid == mycpuid,
858 ("rt resolve rt_cpuid %d, mycpuid %d",
859 rt->rt_cpuid, mycpuid));
861 ifa = rt->rt_ifa;
862 rtinfo->rti_flags =
863 rt->rt_flags & ~(RTF_CLONING | RTF_PRCLONING | RTF_STATIC);
864 rtinfo->rti_flags |= RTF_WASCLONED;
865 rtinfo->rti_info[RTAX_GATEWAY] = rt->rt_gateway;
866 if ((rtinfo->rti_info[RTAX_NETMASK] = rt->rt_genmask) == NULL)
867 rtinfo->rti_flags |= RTF_HOST;
868 rtinfo->rti_info[RTAX_MPLS1] = rt->rt_shim[0];
869 rtinfo->rti_info[RTAX_MPLS2] = rt->rt_shim[1];
870 rtinfo->rti_info[RTAX_MPLS3] = rt->rt_shim[2];
871 goto makeroute;
873 case RTM_ADD:
874 KASSERT(!(rtinfo->rti_flags & RTF_GATEWAY) ||
875 rtinfo->rti_info[RTAX_GATEWAY] != NULL,
876 ("rtrequest: GATEWAY but no gateway"));
878 if (rtinfo->rti_ifa == NULL && (error = rt_getifa(rtinfo)))
879 gotoerr(error);
880 ifa = rtinfo->rti_ifa;
881 makeroute:
882 R_Malloc(rt, struct rtentry *, sizeof(struct rtentry));
883 if (rt == NULL) {
884 if (req == RTM_ADD) {
885 kprintf("rtrequest1: alloc rtentry failed on "
886 "cpu%d\n", mycpuid);
888 gotoerr(ENOBUFS);
890 bzero(rt, sizeof(struct rtentry));
891 rt->rt_flags = RTF_UP | rtinfo->rti_flags;
892 rt->rt_cpuid = mycpuid;
894 if (mycpuid != 0 && req == RTM_ADD) {
895 /* For RTM_ADD, we have already sent rtmsg on CPU0. */
896 reportmsg = RTL_DONTREPORT;
897 } else {
899 * For RTM_ADD, we only send rtmsg on CPU0.
900 * For RTM_RESOLVE, we always send rtmsg. XXX
902 reportmsg = RTL_REPORTMSG;
904 error = rt_setgate(rt, dst, rtinfo->rti_info[RTAX_GATEWAY],
905 reportmsg);
906 if (error != 0) {
907 Free(rt);
908 gotoerr(error);
911 ndst = rt_key(rt);
912 if (rtinfo->rti_info[RTAX_NETMASK] != NULL)
913 rt_maskedcopy(dst, ndst,
914 rtinfo->rti_info[RTAX_NETMASK]);
915 else
916 bcopy(dst, ndst, dst->sa_len);
918 if (rtinfo->rti_info[RTAX_MPLS1] != NULL)
919 rt_setshims(rt, rtinfo->rti_info);
922 * Note that we now have a reference to the ifa.
923 * This moved from below so that rnh->rnh_addaddr() can
924 * examine the ifa and ifa->ifa_ifp if it so desires.
926 IFAREF(ifa);
927 rt->rt_ifa = ifa;
928 rt->rt_ifp = ifa->ifa_ifp;
929 /* XXX mtu manipulation will be done in rnh_addaddr -- itojun */
931 rn = rnh->rnh_addaddr((char *)ndst,
932 (char *)rtinfo->rti_info[RTAX_NETMASK],
933 rnh, rt->rt_nodes);
934 if (rn == NULL) {
935 struct rtentry *oldrt;
938 * We already have one of these in the tree.
939 * We do a special hack: if the old route was
940 * cloned, then we blow it away and try
941 * re-inserting the new one.
943 oldrt = rtpurelookup(ndst);
944 if (oldrt != NULL) {
945 --oldrt->rt_refcnt;
946 if (oldrt->rt_flags & RTF_WASCLONED) {
947 rtrequest(RTM_DELETE, rt_key(oldrt),
948 oldrt->rt_gateway,
949 rt_mask(oldrt),
950 oldrt->rt_flags, NULL);
951 rn = rnh->rnh_addaddr((char *)ndst,
952 (char *)
953 rtinfo->rti_info[RTAX_NETMASK],
954 rnh, rt->rt_nodes);
960 * If it still failed to go into the tree,
961 * then un-make it (this should be a function).
963 if (rn == NULL) {
964 if (rt->rt_gwroute != NULL)
965 rtfree(rt->rt_gwroute);
966 IFAFREE(ifa);
967 Free(rt_key(rt));
968 Free(rt);
969 gotoerr(EEXIST);
973 * If we got here from RESOLVE, then we are cloning
974 * so clone the rest, and note that we
975 * are a clone (and increment the parent's references)
977 if (req == RTM_RESOLVE) {
978 rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */
979 rt->rt_rmx.rmx_pksent = 0; /* reset packet counter */
980 if ((*ret_nrt)->rt_flags &
981 (RTF_CLONING | RTF_PRCLONING)) {
982 rt->rt_parent = *ret_nrt;
983 (*ret_nrt)->rt_refcnt++;
988 * if this protocol has something to add to this then
989 * allow it to do that as well.
991 if (ifa->ifa_rtrequest != NULL)
992 ifa->ifa_rtrequest(req, rt);
995 * We repeat the same procedure from rt_setgate() here because
996 * it doesn't fire when we call it there because the node
997 * hasn't been added to the tree yet.
999 if (req == RTM_ADD && !(rt->rt_flags & RTF_HOST) &&
1000 rt_mask(rt) != NULL) {
1001 struct rtfc_arg arg = { rt, rnh };
1003 rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
1004 (char *)rt_mask(rt),
1005 rt_fixchange, &arg);
1008 #ifdef ROUTE_DEBUG
1009 if (route_debug)
1010 rt_print(rtinfo, rt);
1011 #endif
1013 * Return the resulting rtentry,
1014 * increasing the number of references by one.
1016 if (ret_nrt != NULL) {
1017 rt->rt_refcnt++;
1018 *ret_nrt = rt;
1020 break;
1021 default:
1022 error = EOPNOTSUPP;
1024 bad:
1025 #ifdef ROUTE_DEBUG
1026 if (route_debug) {
1027 if (error)
1028 kprintf("rti %p failed error %d\n", rtinfo, error);
1029 else
1030 kprintf("rti %p succeeded\n", rtinfo);
1032 #endif
1033 crit_exit();
1034 return (error);
1038 * Called from rtrequest(RTM_DELETE, ...) to fix up the route's ``family''
1039 * (i.e., the routes related to it by the operation of cloning). This
1040 * routine is iterated over all potential former-child-routes by way of
1041 * rnh->rnh_walktree_from() above, and those that actually are children of
1042 * the late parent (passed in as VP here) are themselves deleted.
1044 static int
1045 rt_fixdelete(struct radix_node *rn, void *vp)
1047 struct rtentry *rt = (struct rtentry *)rn;
1048 struct rtentry *rt0 = vp;
1050 if (rt->rt_parent == rt0 &&
1051 !(rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
1052 return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1053 rt->rt_flags, NULL);
1055 return 0;
1059 * This routine is called from rt_setgate() to do the analogous thing for
1060 * adds and changes. There is the added complication in this case of a
1061 * middle insert; i.e., insertion of a new network route between an older
1062 * network route and (cloned) host routes. For this reason, a simple check
1063 * of rt->rt_parent is insufficient; each candidate route must be tested
1064 * against the (mask, value) of the new route (passed as before in vp)
1065 * to see if the new route matches it.
1067 * XXX - it may be possible to do fixdelete() for changes and reserve this
1068 * routine just for adds. I'm not sure why I thought it was necessary to do
1069 * changes this way.
1071 #ifdef DEBUG
1072 static int rtfcdebug = 0;
1073 #endif
1075 static int
1076 rt_fixchange(struct radix_node *rn, void *vp)
1078 struct rtentry *rt = (struct rtentry *)rn;
1079 struct rtfc_arg *ap = vp;
1080 struct rtentry *rt0 = ap->rt0;
1081 struct radix_node_head *rnh = ap->rnh;
1082 u_char *xk1, *xm1, *xk2, *xmp;
1083 int i, len, mlen;
1085 #ifdef DEBUG
1086 if (rtfcdebug)
1087 kprintf("rt_fixchange: rt %p, rt0 %p\n", rt, rt0);
1088 #endif
1090 if (rt->rt_parent == NULL ||
1091 (rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
1092 #ifdef DEBUG
1093 if (rtfcdebug) kprintf("no parent, pinned or cloning\n");
1094 #endif
1095 return 0;
1098 if (rt->rt_parent == rt0) {
1099 #ifdef DEBUG
1100 if (rtfcdebug) kprintf("parent match\n");
1101 #endif
1102 return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1103 rt->rt_flags, NULL);
1107 * There probably is a function somewhere which does this...
1108 * if not, there should be.
1110 len = imin(rt_key(rt0)->sa_len, rt_key(rt)->sa_len);
1112 xk1 = (u_char *)rt_key(rt0);
1113 xm1 = (u_char *)rt_mask(rt0);
1114 xk2 = (u_char *)rt_key(rt);
1116 /* avoid applying a less specific route */
1117 xmp = (u_char *)rt_mask(rt->rt_parent);
1118 mlen = rt_key(rt->rt_parent)->sa_len;
1119 if (mlen > rt_key(rt0)->sa_len) {
1120 #ifdef DEBUG
1121 if (rtfcdebug)
1122 kprintf("rt_fixchange: inserting a less "
1123 "specific route\n");
1124 #endif
1125 return 0;
1127 for (i = rnh->rnh_treetop->rn_offset; i < mlen; i++) {
1128 if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i]) {
1129 #ifdef DEBUG
1130 if (rtfcdebug)
1131 kprintf("rt_fixchange: inserting a less "
1132 "specific route\n");
1133 #endif
1134 return 0;
1138 for (i = rnh->rnh_treetop->rn_offset; i < len; i++) {
1139 if ((xk2[i] & xm1[i]) != xk1[i]) {
1140 #ifdef DEBUG
1141 if (rtfcdebug) kprintf("no match\n");
1142 #endif
1143 return 0;
1148 * OK, this node is a clone, and matches the node currently being
1149 * changed/added under the node's mask. So, get rid of it.
1151 #ifdef DEBUG
1152 if (rtfcdebug) kprintf("deleting\n");
1153 #endif
1154 return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1155 rt->rt_flags, NULL);
1159 rt_setgate(struct rtentry *rt0, struct sockaddr *dst, struct sockaddr *gate,
1160 boolean_t generate_report)
1162 char *space, *oldspace;
1163 int dlen = RT_ROUNDUP(dst->sa_len), glen = RT_ROUNDUP(gate->sa_len);
1164 struct rtentry *rt = rt0;
1165 struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family];
1168 * A host route with the destination equal to the gateway
1169 * will interfere with keeping LLINFO in the routing
1170 * table, so disallow it.
1172 if (((rt0->rt_flags & (RTF_HOST | RTF_GATEWAY | RTF_LLINFO)) ==
1173 (RTF_HOST | RTF_GATEWAY)) &&
1174 dst->sa_len == gate->sa_len &&
1175 sa_equal(dst, gate)) {
1177 * The route might already exist if this is an RTM_CHANGE
1178 * or a routing redirect, so try to delete it.
1180 if (rt_key(rt0) != NULL)
1181 rtrequest(RTM_DELETE, rt_key(rt0), rt0->rt_gateway,
1182 rt_mask(rt0), rt0->rt_flags, NULL);
1183 return EADDRNOTAVAIL;
1187 * Both dst and gateway are stored in the same malloc'ed chunk
1188 * (If I ever get my hands on....)
1189 * if we need to malloc a new chunk, then keep the old one around
1190 * till we don't need it any more.
1192 if (rt->rt_gateway == NULL ||
1193 glen > RT_ROUNDUP(rt->rt_gateway->sa_len)) {
1194 oldspace = (char *)rt_key(rt);
1195 R_Malloc(space, char *, dlen + glen);
1196 if (space == NULL)
1197 return ENOBUFS;
1198 rt->rt_nodes->rn_key = space;
1199 } else {
1200 space = (char *)rt_key(rt); /* Just use the old space. */
1201 oldspace = NULL;
1204 /* Set the gateway value. */
1205 rt->rt_gateway = (struct sockaddr *)(space + dlen);
1206 bcopy(gate, rt->rt_gateway, glen);
1208 if (oldspace != NULL) {
1210 * If we allocated a new chunk, preserve the original dst.
1211 * This way, rt_setgate() really just sets the gate
1212 * and leaves the dst field alone.
1214 bcopy(dst, space, dlen);
1215 Free(oldspace);
1219 * If there is already a gwroute, it's now almost definitely wrong
1220 * so drop it.
1222 if (rt->rt_gwroute != NULL) {
1223 RTFREE(rt->rt_gwroute);
1224 rt->rt_gwroute = NULL;
1226 if (rt->rt_flags & RTF_GATEWAY) {
1228 * Cloning loop avoidance: In the presence of
1229 * protocol-cloning and bad configuration, it is
1230 * possible to get stuck in bottomless mutual recursion
1231 * (rtrequest rt_setgate rtlookup). We avoid this
1232 * by not allowing protocol-cloning to operate for
1233 * gateways (which is probably the correct choice
1234 * anyway), and avoid the resulting reference loops
1235 * by disallowing any route to run through itself as
1236 * a gateway. This is obviously mandatory when we
1237 * get rt->rt_output().
1239 * This breaks TTCP for hosts outside the gateway! XXX JH
1241 rt->rt_gwroute = _rtlookup(gate, generate_report,
1242 RTF_PRCLONING);
1243 if (rt->rt_gwroute == rt) {
1244 rt->rt_gwroute = NULL;
1245 --rt->rt_refcnt;
1246 return EDQUOT; /* failure */
1251 * This isn't going to do anything useful for host routes, so
1252 * don't bother. Also make sure we have a reasonable mask
1253 * (we don't yet have one during adds).
1255 if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL) {
1256 struct rtfc_arg arg = { rt, rnh };
1258 rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
1259 (char *)rt_mask(rt),
1260 rt_fixchange, &arg);
1263 return 0;
1266 static void
1267 rt_maskedcopy(
1268 struct sockaddr *src,
1269 struct sockaddr *dst,
1270 struct sockaddr *netmask)
1272 u_char *cp1 = (u_char *)src;
1273 u_char *cp2 = (u_char *)dst;
1274 u_char *cp3 = (u_char *)netmask;
1275 u_char *cplim = cp2 + *cp3;
1276 u_char *cplim2 = cp2 + *cp1;
1278 *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
1279 cp3 += 2;
1280 if (cplim > cplim2)
1281 cplim = cplim2;
1282 while (cp2 < cplim)
1283 *cp2++ = *cp1++ & *cp3++;
1284 if (cp2 < cplim2)
1285 bzero(cp2, cplim2 - cp2);
1289 rt_llroute(struct sockaddr *dst, struct rtentry *rt0, struct rtentry **drt)
1291 struct rtentry *up_rt, *rt;
1293 if (!(rt0->rt_flags & RTF_UP)) {
1294 up_rt = rtlookup(dst);
1295 if (up_rt == NULL)
1296 return (EHOSTUNREACH);
1297 up_rt->rt_refcnt--;
1298 } else
1299 up_rt = rt0;
1300 if (up_rt->rt_flags & RTF_GATEWAY) {
1301 if (up_rt->rt_gwroute == NULL) {
1302 up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway);
1303 if (up_rt->rt_gwroute == NULL)
1304 return (EHOSTUNREACH);
1305 } else if (!(up_rt->rt_gwroute->rt_flags & RTF_UP)) {
1306 rtfree(up_rt->rt_gwroute);
1307 up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway);
1308 if (up_rt->rt_gwroute == NULL)
1309 return (EHOSTUNREACH);
1311 rt = up_rt->rt_gwroute;
1312 } else
1313 rt = up_rt;
1314 if (rt->rt_flags & RTF_REJECT &&
1315 (rt->rt_rmx.rmx_expire == 0 || /* rt doesn't expire */
1316 time_uptime < rt->rt_rmx.rmx_expire)) /* rt not expired */
1317 return (rt->rt_flags & RTF_HOST ? EHOSTDOWN : EHOSTUNREACH);
1318 *drt = rt;
1319 return 0;
1322 static int
1323 rt_setshims(struct rtentry *rt, struct sockaddr **rt_shim){
1324 int i;
1326 for (i=0; i<3; i++) {
1327 struct sockaddr *shim = rt_shim[RTAX_MPLS1 + i];
1328 int shimlen;
1330 if (shim == NULL)
1331 break;
1333 shimlen = RT_ROUNDUP(shim->sa_len);
1334 R_Malloc(rt->rt_shim[i], struct sockaddr *, shimlen);
1335 bcopy(shim, rt->rt_shim[i], shimlen);
1338 return 0;
1341 #ifdef ROUTE_DEBUG
1344 * Print out a route table entry
1346 void
1347 rt_print(struct rt_addrinfo *rtinfo, struct rtentry *rn)
1349 kprintf("rti %p cpu %d route %p flags %08lx: ",
1350 rtinfo, mycpuid, rn, rn->rt_flags);
1351 sockaddr_print(rt_key(rn));
1352 kprintf(" mask ");
1353 sockaddr_print(rt_mask(rn));
1354 kprintf(" gw ");
1355 sockaddr_print(rn->rt_gateway);
1356 kprintf(" ifc \"%s\"", rn->rt_ifp ? rn->rt_ifp->if_dname : "?");
1357 kprintf(" ifa %p\n", rn->rt_ifa);
1360 void
1361 rt_addrinfo_print(int cmd, struct rt_addrinfo *rti)
1363 int didit = 0;
1364 int i;
1366 #ifdef ROUTE_DEBUG
1367 if (cmd == RTM_DELETE && route_debug > 1)
1368 print_backtrace(-1);
1369 #endif
1371 switch(cmd) {
1372 case RTM_ADD:
1373 kprintf("ADD ");
1374 break;
1375 case RTM_RESOLVE:
1376 kprintf("RES ");
1377 break;
1378 case RTM_DELETE:
1379 kprintf("DEL ");
1380 break;
1381 default:
1382 kprintf("C%02d ", cmd);
1383 break;
1385 kprintf("rti %p cpu %d ", rti, mycpuid);
1386 for (i = 0; i < rti->rti_addrs; ++i) {
1387 if (rti->rti_info[i] == NULL)
1388 continue;
1389 if (didit)
1390 kprintf(" ,");
1391 switch(i) {
1392 case RTAX_DST:
1393 kprintf("(DST ");
1394 break;
1395 case RTAX_GATEWAY:
1396 kprintf("(GWY ");
1397 break;
1398 case RTAX_NETMASK:
1399 kprintf("(MSK ");
1400 break;
1401 case RTAX_GENMASK:
1402 kprintf("(GEN ");
1403 break;
1404 case RTAX_IFP:
1405 kprintf("(IFP ");
1406 break;
1407 case RTAX_IFA:
1408 kprintf("(IFA ");
1409 break;
1410 case RTAX_AUTHOR:
1411 kprintf("(AUT ");
1412 break;
1413 case RTAX_BRD:
1414 kprintf("(BRD ");
1415 break;
1416 default:
1417 kprintf("(?%02d ", i);
1418 break;
1420 sockaddr_print(rti->rti_info[i]);
1421 kprintf(")");
1422 didit = 1;
1424 kprintf("\n");
1427 void
1428 sockaddr_print(struct sockaddr *sa)
1430 struct sockaddr_in *sa4;
1431 struct sockaddr_in6 *sa6;
1432 int len;
1433 int i;
1435 if (sa == NULL) {
1436 kprintf("NULL");
1437 return;
1440 len = sa->sa_len - offsetof(struct sockaddr, sa_data[0]);
1442 switch(sa->sa_family) {
1443 case AF_INET:
1444 case AF_INET6:
1445 default:
1446 switch(sa->sa_family) {
1447 case AF_INET:
1448 sa4 = (struct sockaddr_in *)sa;
1449 kprintf("INET %d %d.%d.%d.%d",
1450 ntohs(sa4->sin_port),
1451 (ntohl(sa4->sin_addr.s_addr) >> 24) & 255,
1452 (ntohl(sa4->sin_addr.s_addr) >> 16) & 255,
1453 (ntohl(sa4->sin_addr.s_addr) >> 8) & 255,
1454 (ntohl(sa4->sin_addr.s_addr) >> 0) & 255
1456 break;
1457 case AF_INET6:
1458 sa6 = (struct sockaddr_in6 *)sa;
1459 kprintf("INET6 %d %04x:%04x%04x:%04x:%04x:%04x:%04x:%04x",
1460 ntohs(sa6->sin6_port),
1461 sa6->sin6_addr.s6_addr16[0],
1462 sa6->sin6_addr.s6_addr16[1],
1463 sa6->sin6_addr.s6_addr16[2],
1464 sa6->sin6_addr.s6_addr16[3],
1465 sa6->sin6_addr.s6_addr16[4],
1466 sa6->sin6_addr.s6_addr16[5],
1467 sa6->sin6_addr.s6_addr16[6],
1468 sa6->sin6_addr.s6_addr16[7]
1470 break;
1471 default:
1472 kprintf("AF%d ", sa->sa_family);
1473 while (len > 0 && sa->sa_data[len-1] == 0)
1474 --len;
1476 for (i = 0; i < len; ++i) {
1477 if (i)
1478 kprintf(".");
1479 kprintf("%d", (unsigned char)sa->sa_data[i]);
1481 break;
1486 #endif
1489 * Set up a routing table entry, normally for an interface.
1492 rtinit(struct ifaddr *ifa, int cmd, int flags)
1494 struct sockaddr *dst, *deldst, *netmask;
1495 struct mbuf *m = NULL;
1496 struct radix_node_head *rnh;
1497 struct radix_node *rn;
1498 struct rt_addrinfo rtinfo;
1499 int error;
1501 if (flags & RTF_HOST) {
1502 dst = ifa->ifa_dstaddr;
1503 netmask = NULL;
1504 } else {
1505 dst = ifa->ifa_addr;
1506 netmask = ifa->ifa_netmask;
1509 * If it's a delete, check that if it exists, it's on the correct
1510 * interface or we might scrub a route to another ifa which would
1511 * be confusing at best and possibly worse.
1513 if (cmd == RTM_DELETE) {
1515 * It's a delete, so it should already exist..
1516 * If it's a net, mask off the host bits
1517 * (Assuming we have a mask)
1519 if (netmask != NULL) {
1520 m = m_get(MB_DONTWAIT, MT_SONAME);
1521 if (m == NULL)
1522 return (ENOBUFS);
1523 mbuftrackid(m, 34);
1524 deldst = mtod(m, struct sockaddr *);
1525 rt_maskedcopy(dst, deldst, netmask);
1526 dst = deldst;
1529 * Look up an rtentry that is in the routing tree and
1530 * contains the correct info.
1532 if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL ||
1533 (rn = rnh->rnh_lookup((char *)dst,
1534 (char *)netmask, rnh)) == NULL ||
1535 ((struct rtentry *)rn)->rt_ifa != ifa ||
1536 !sa_equal((struct sockaddr *)rn->rn_key, dst)) {
1537 if (m != NULL)
1538 m_free(m);
1539 return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
1541 /* XXX */
1542 #if 0
1543 else {
1545 * One would think that as we are deleting, and we know
1546 * it doesn't exist, we could just return at this point
1547 * with an "ELSE" clause, but apparently not..
1549 return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
1551 #endif
1554 * Do the actual request
1556 bzero(&rtinfo, sizeof(struct rt_addrinfo));
1557 rtinfo.rti_info[RTAX_DST] = dst;
1558 rtinfo.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1559 rtinfo.rti_info[RTAX_NETMASK] = netmask;
1560 rtinfo.rti_flags = flags | ifa->ifa_flags;
1561 rtinfo.rti_ifa = ifa;
1562 error = rtrequest1_global(cmd, &rtinfo, rtinit_rtrequest_callback, ifa,
1563 RTREQ_PRIO_HIGH);
1564 if (m != NULL)
1565 m_free(m);
1566 return (error);
1569 static void
1570 rtinit_rtrequest_callback(int cmd, int error,
1571 struct rt_addrinfo *rtinfo, struct rtentry *rt,
1572 void *arg)
1574 struct ifaddr *ifa = arg;
1576 if (error == 0 && rt) {
1577 if (mycpuid == 0) {
1578 ++rt->rt_refcnt;
1579 rt_newaddrmsg(cmd, ifa, error, rt);
1580 --rt->rt_refcnt;
1582 if (cmd == RTM_DELETE) {
1583 if (rt->rt_refcnt == 0) {
1584 ++rt->rt_refcnt;
1585 rtfree(rt);
1591 struct netmsg_rts {
1592 struct netmsg_base base;
1593 int req;
1594 struct rt_addrinfo *rtinfo;
1595 rtsearch_callback_func_t callback;
1596 void *arg;
1597 boolean_t exact_match;
1598 int found_cnt;
1602 rtsearch_global(int req, struct rt_addrinfo *rtinfo,
1603 rtsearch_callback_func_t callback, void *arg, boolean_t exact_match,
1604 boolean_t req_prio)
1606 struct netmsg_rts msg;
1607 int flags = 0;
1609 if (req_prio)
1610 flags = MSGF_PRIORITY;
1611 netmsg_init(&msg.base, NULL, &curthread->td_msgport, flags,
1612 rtsearch_msghandler);
1613 msg.req = req;
1614 msg.rtinfo = rtinfo;
1615 msg.callback = callback;
1616 msg.arg = arg;
1617 msg.exact_match = exact_match;
1618 msg.found_cnt = 0;
1619 return rt_domsg_global(&msg.base);
1622 static void
1623 rtsearch_msghandler(netmsg_t msg)
1625 struct netmsg_rts *rmsg = (void *)msg;
1626 struct rt_addrinfo rtinfo;
1627 struct radix_node_head *rnh;
1628 struct rtentry *rt;
1629 int nextcpu, error;
1632 * Copy the rtinfo. We need to make sure that the original
1633 * rtinfo, which is setup by the caller, in the netmsg will
1634 * _not_ be changed; else the next CPU on the netmsg forwarding
1635 * path will see a different rtinfo than what this CPU has seen.
1637 rtinfo = *rmsg->rtinfo;
1640 * Find the correct routing tree to use for this Address Family
1642 if ((rnh = rt_tables[mycpuid][rtinfo.rti_dst->sa_family]) == NULL) {
1643 if (mycpuid != 0)
1644 panic("partially initialized routing tables");
1645 lwkt_replymsg(&rmsg->base.lmsg, EAFNOSUPPORT);
1646 return;
1650 * Correct rtinfo for the host route searching.
1652 if (rtinfo.rti_flags & RTF_HOST) {
1653 rtinfo.rti_netmask = NULL;
1654 rtinfo.rti_flags &= ~(RTF_CLONING | RTF_PRCLONING);
1657 rt = (struct rtentry *)
1658 rnh->rnh_lookup((char *)rtinfo.rti_dst,
1659 (char *)rtinfo.rti_netmask, rnh);
1662 * If we are asked to do the "exact match", we need to make sure
1663 * that host route searching got a host route while a network
1664 * route searching got a network route.
1666 if (rt != NULL && rmsg->exact_match &&
1667 ((rt->rt_flags ^ rtinfo.rti_flags) & RTF_HOST))
1668 rt = NULL;
1670 if (rt == NULL) {
1672 * No matching routes have been found, don't count this
1673 * as a critical error (here, we set 'error' to 0), just
1674 * keep moving on, since at least prcloned routes are not
1675 * duplicated onto each CPU.
1677 error = 0;
1678 } else {
1679 rmsg->found_cnt++;
1681 rt->rt_refcnt++;
1682 error = rmsg->callback(rmsg->req, &rtinfo, rt, rmsg->arg,
1683 rmsg->found_cnt);
1684 rt->rt_refcnt--;
1686 if (error == EJUSTRETURN) {
1687 lwkt_replymsg(&rmsg->base.lmsg, 0);
1688 return;
1692 nextcpu = mycpuid + 1;
1693 if (error) {
1694 KKASSERT(rmsg->found_cnt > 0);
1697 * Under following cases, unrecoverable error has
1698 * not occured:
1699 * o Request is RTM_GET
1700 * o The first time that we find the route, but the
1701 * modification fails.
1703 if (rmsg->req != RTM_GET && rmsg->found_cnt > 1) {
1704 panic("rtsearch_msghandler: unrecoverable error "
1705 "cpu %d", mycpuid);
1707 lwkt_replymsg(&rmsg->base.lmsg, error);
1708 } else if (nextcpu < ncpus) {
1709 lwkt_forwardmsg(netisr_cpuport(nextcpu), &rmsg->base.lmsg);
1710 } else {
1711 if (rmsg->found_cnt == 0) {
1712 /* The requested route was never seen ... */
1713 error = ESRCH;
1715 lwkt_replymsg(&rmsg->base.lmsg, error);
1720 rtmask_add_global(struct sockaddr *mask, boolean_t req_prio)
1722 struct netmsg_base msg;
1723 int flags = 0;
1725 if (req_prio)
1726 flags = MSGF_PRIORITY;
1727 netmsg_init(&msg, NULL, &curthread->td_msgport, flags,
1728 rtmask_add_msghandler);
1729 msg.lmsg.u.ms_resultp = mask;
1731 return rt_domsg_global(&msg);
1734 struct sockaddr *
1735 _rtmask_lookup(struct sockaddr *mask, boolean_t search)
1737 struct radix_node *n;
1739 #define clen(s) (*(u_char *)(s))
1740 n = rn_addmask((char *)mask, search, 1, rn_cpumaskhead(mycpuid));
1741 if (n != NULL &&
1742 mask->sa_len >= clen(n->rn_key) &&
1743 bcmp((char *)mask + 1,
1744 (char *)n->rn_key + 1, clen(n->rn_key) - 1) == 0) {
1745 return (struct sockaddr *)n->rn_key;
1746 } else {
1747 return NULL;
1749 #undef clen
1752 static void
1753 rtmask_add_msghandler(netmsg_t msg)
1755 struct lwkt_msg *lmsg = &msg->lmsg;
1756 struct sockaddr *mask = lmsg->u.ms_resultp;
1757 int error = 0, nextcpu;
1759 if (rtmask_lookup(mask) == NULL)
1760 error = ENOBUFS;
1762 nextcpu = mycpuid + 1;
1763 if (!error && nextcpu < ncpus)
1764 lwkt_forwardmsg(netisr_cpuport(nextcpu), lmsg);
1765 else
1766 lwkt_replymsg(lmsg, error);
1769 /* This must be before ip6_init2(), which is now SI_ORDER_MIDDLE */
1770 SYSINIT(route, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, route_init, 0);
1772 struct rtchange_arg {
1773 struct ifaddr *old_ifa;
1774 struct ifaddr *new_ifa;
1775 struct rtentry *rt;
1776 int changed;
1779 static void
1780 rtchange_ifa(struct rtentry *rt, struct rtchange_arg *ap)
1782 if (rt->rt_ifa->ifa_rtrequest != NULL)
1783 rt->rt_ifa->ifa_rtrequest(RTM_DELETE, rt);
1784 IFAFREE(rt->rt_ifa);
1786 IFAREF(ap->new_ifa);
1787 rt->rt_ifa = ap->new_ifa;
1788 rt->rt_ifp = ap->new_ifa->ifa_ifp;
1789 if (rt->rt_ifa->ifa_rtrequest != NULL)
1790 rt->rt_ifa->ifa_rtrequest(RTM_ADD, rt);
1792 ap->changed = 1;
1795 static int
1796 rtchange_callback(struct radix_node *rn, void *xap)
1798 struct rtchange_arg *ap = xap;
1799 struct rtentry *rt = (struct rtentry *)rn;
1801 if (rt->rt_ifa == ap->old_ifa) {
1802 if (rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) {
1804 * We could saw the branch off when we are
1805 * still sitting on it, if the ifa_rtrequest
1806 * DEL/ADD are called directly from here.
1808 ap->rt = rt;
1809 return EJUSTRETURN;
1811 rtchange_ifa(rt, ap);
1813 return 0;
1816 struct netmsg_rtchange {
1817 struct netmsg_base base;
1818 struct ifaddr *old_ifa;
1819 struct ifaddr *new_ifa;
1820 int changed;
1823 static void
1824 rtchange_dispatch(netmsg_t msg)
1826 struct netmsg_rtchange *rmsg = (void *)msg;
1827 struct radix_node_head *rnh;
1828 struct rtchange_arg arg;
1829 int nextcpu, cpu;
1831 cpu = mycpuid;
1833 memset(&arg, 0, sizeof(arg));
1834 arg.old_ifa = rmsg->old_ifa;
1835 arg.new_ifa = rmsg->new_ifa;
1837 rnh = rt_tables[cpu][AF_INET];
1838 for (;;) {
1839 int error;
1841 KKASSERT(arg.rt == NULL);
1842 error = rnh->rnh_walktree(rnh, rtchange_callback, &arg);
1843 if (arg.rt != NULL) {
1844 struct rtentry *rt;
1846 rt = arg.rt;
1847 arg.rt = NULL;
1848 rtchange_ifa(rt, &arg);
1849 } else {
1850 break;
1853 if (arg.changed)
1854 rmsg->changed = 1;
1856 nextcpu = cpu + 1;
1857 if (nextcpu < ncpus)
1858 lwkt_forwardmsg(netisr_cpuport(nextcpu), &rmsg->base.lmsg);
1859 else
1860 lwkt_replymsg(&rmsg->base.lmsg, 0);
1864 rtchange(struct ifaddr *old_ifa, struct ifaddr *new_ifa)
1866 struct netmsg_rtchange msg;
1869 * XXX individual requests are not independantly chained,
1870 * which means that the per-cpu route tables will not be
1871 * consistent in the middle of the operation. If routes
1872 * related to the interface are manipulated while we are
1873 * doing this the inconsistancy could trigger a panic.
1875 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY,
1876 rtchange_dispatch);
1877 msg.old_ifa = old_ifa;
1878 msg.new_ifa = new_ifa;
1879 msg.changed = 0;
1880 rt_domsg_global(&msg.base);
1882 if (msg.changed) {
1883 old_ifa->ifa_flags &= ~IFA_ROUTE;
1884 new_ifa->ifa_flags |= IFA_ROUTE;
1885 return 0;
1886 } else {
1887 return ENOENT;
1892 rt_domsg_global(struct netmsg_base *nmsg)
1894 ASSERT_CANDOMSG_NETISR0(curthread);
1895 return lwkt_domsg(netisr_cpuport(0), &nmsg->lmsg, 0);