2 * Copyright (c) 2004, 2005 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1980, 1986, 1991, 1993
35 * The Regents of the University of California. All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * @(#)route.c 8.3 (Berkeley) 1/9/95
62 * $FreeBSD: src/sys/net/route.c,v 1.59.2.10 2003/01/17 08:04:00 ru Exp $
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/malloc.h>
72 #include <sys/socket.h>
73 #include <sys/domain.h>
74 #include <sys/kernel.h>
75 #include <sys/sysctl.h>
76 #include <sys/globaldata.h>
77 #include <sys/thread.h>
80 #include <net/route.h>
81 #include <net/netisr.h>
83 #include <netinet/in.h>
84 #include <net/ip_mroute/ip_mroute.h>
86 #include <sys/thread2.h>
87 #include <sys/msgport2.h>
88 #include <net/netmsg2.h>
89 #include <net/netisr2.h>
92 #include <netproto/mpls/mpls.h>
95 static struct rtstatistics rtstatistics_percpu
[MAXCPU
] __cachealign
;
96 #define rtstat rtstatistics_percpu[mycpuid]
98 struct radix_node_head
*rt_tables
[MAXCPU
][AF_MAX
+1];
100 static void rt_maskedcopy (struct sockaddr
*, struct sockaddr
*,
102 static void rtable_init(void);
103 static void rtinit_rtrequest_callback(int, int, struct rt_addrinfo
*,
104 struct rtentry
*, void *);
106 static void rtredirect_msghandler(netmsg_t msg
);
107 static void rtrequest1_msghandler(netmsg_t msg
);
108 static void rtsearch_msghandler(netmsg_t msg
);
109 static void rtmask_add_msghandler(netmsg_t msg
);
111 static int rt_setshims(struct rtentry
*, struct sockaddr
**);
113 SYSCTL_NODE(_net
, OID_AUTO
, route
, CTLFLAG_RW
, 0, "Routing");
116 static int route_debug
= 1;
117 SYSCTL_INT(_net_route
, OID_AUTO
, route_debug
, CTLFLAG_RW
,
118 &route_debug
, 0, "");
121 u_long route_kmalloc_limit
= 0;
122 TUNABLE_ULONG("net.route.kmalloc_limit", &route_kmalloc_limit
);
125 * Initialize the route table(s) for protocol domains and
126 * create a helper thread which will be responsible for updating
127 * route table entries on each cpu.
134 if (route_kmalloc_limit
)
135 kmalloc_raise_limit(M_RTABLE
, route_kmalloc_limit
);
137 for (cpu
= 0; cpu
< netisr_ncpus
; ++cpu
)
138 bzero(&rtstatistics_percpu
[cpu
], sizeof(struct rtstatistics
));
139 rn_init(); /* initialize all zeroes, all ones, mask table */
140 rtable_init(); /* call dom_rtattach() on each cpu */
144 rtable_init_oncpu(netmsg_t msg
)
149 ASSERT_NETISR_NCPUS(cpu
);
151 SLIST_FOREACH(dom
, &domains
, dom_next
) {
152 if (dom
->dom_rtattach
) {
154 (void **)&rt_tables
[cpu
][dom
->dom_family
],
158 netisr_forwardmsg(&msg
->base
, cpu
+ 1);
164 struct netmsg_base msg
;
166 netmsg_init(&msg
, NULL
, &curthread
->td_msgport
, 0, rtable_init_oncpu
);
167 netisr_domsg_global(&msg
);
171 * Routing statistics.
174 sysctl_rtstatistics(SYSCTL_HANDLER_ARGS
)
178 for (cpu
= 0; cpu
< netisr_ncpus
; ++cpu
) {
179 if ((error
= SYSCTL_OUT(req
, &rtstatistics_percpu
[cpu
],
180 sizeof(struct rtstatistics
))))
182 if ((error
= SYSCTL_IN(req
, &rtstatistics_percpu
[cpu
],
183 sizeof(struct rtstatistics
))))
189 SYSCTL_PROC(_net_route
, OID_AUTO
, stats
, (CTLTYPE_OPAQUE
|CTLFLAG_RW
),
190 0, 0, sysctl_rtstatistics
, "S,rtstatistics", "Routing statistics");
193 * Packet routing routines.
197 * Look up and fill in the "ro_rt" rtentry field in a route structure given
198 * an address in the "ro_dst" field. Always send a report on a miss and
199 * always clone routes.
202 rtalloc(struct route
*ro
)
204 rtalloc_ign(ro
, 0UL);
208 * Look up and fill in the "ro_rt" rtentry field in a route structure given
209 * an address in the "ro_dst" field. Always send a report on a miss and
210 * optionally clone routes when RTF_CLONING or RTF_PRCLONING are not being
214 rtalloc_ign(struct route
*ro
, u_long ignoreflags
)
216 if (ro
->ro_rt
!= NULL
) {
217 if (ro
->ro_rt
->rt_ifp
!= NULL
&& ro
->ro_rt
->rt_flags
& RTF_UP
)
222 ro
->ro_rt
= _rtlookup(&ro
->ro_dst
, RTL_REPORTMSG
, ignoreflags
);
226 * Look up the route that matches the given "dst" address.
228 * Route lookup can have the side-effect of creating and returning
229 * a cloned route instead when "dst" matches a cloning route and the
230 * RTF_CLONING and RTF_PRCLONING flags are not being ignored.
232 * Any route returned has its reference count incremented.
235 _rtlookup(struct sockaddr
*dst
, boolean_t generate_report
, u_long ignore
)
237 struct radix_node_head
*rnh
= rt_tables
[mycpuid
][dst
->sa_family
];
240 ASSERT_NETISR_NCPUS(mycpuid
);
246 * Look up route in the radix tree.
248 rt
= (struct rtentry
*) rnh
->rnh_matchaddr((char *)dst
, rnh
);
253 * Handle cloning routes.
255 if ((rt
->rt_flags
& ~ignore
& (RTF_CLONING
| RTF_PRCLONING
)) != 0) {
256 struct rtentry
*clonedroute
;
259 clonedroute
= rt
; /* copy in/copy out parameter */
260 error
= rtrequest(RTM_RESOLVE
, dst
, NULL
, NULL
, 0,
261 &clonedroute
); /* clone the route */
262 if (error
!= 0) { /* cloning failed */
264 rt_dstmsg(RTM_MISS
, dst
, error
);
266 return (rt
); /* return the uncloned route */
268 if (generate_report
) {
269 if (clonedroute
->rt_flags
& RTF_XRESOLVE
)
270 rt_dstmsg(RTM_RESOLVE
, dst
, 0);
272 rt_rtmsg(RTM_ADD
, clonedroute
,
273 clonedroute
->rt_ifp
, 0);
275 return (clonedroute
); /* return cloned route */
279 * Increment the reference count of the matched route and return.
285 rtstat
.rts_unreach
++;
287 rt_dstmsg(RTM_MISS
, dst
, 0);
292 rtfree(struct rtentry
*rt
)
295 ASSERT_NETISR_NCPUS(rt
->rt_cpuid
);
296 KASSERT(rt
->rt_refcnt
> 0, ("rtfree: rt_refcnt %ld", rt
->rt_refcnt
));
299 if (rt
->rt_refcnt
== 0) {
300 struct radix_node_head
*rnh
=
301 rt_tables
[mycpuid
][rt_key(rt
)->sa_family
];
304 rnh
->rnh_close((struct radix_node
*)rt
, rnh
);
305 if (!(rt
->rt_flags
& RTF_UP
)) {
306 /* deallocate route */
307 if (rt
->rt_ifa
!= NULL
)
309 if (rt
->rt_parent
!= NULL
)
310 RTFREE(rt
->rt_parent
); /* recursive call! */
318 rtfree_async_dispatch(netmsg_t msg
)
320 struct rtentry
*rt
= msg
->lmsg
.u
.ms_resultp
;
323 netisr_replymsg(&msg
->base
, 0);
327 rtfree_async(struct rtentry
*rt
)
329 struct netmsg_base
*msg
;
331 if (IN_NETISR_NCPUS(rt
->rt_cpuid
)) {
336 KASSERT(rt
->rt_refcnt
> 0,
337 ("rtfree_async: rt_refcnt %ld", rt
->rt_refcnt
));
339 msg
= kmalloc(sizeof(*msg
), M_LWKTMSG
, M_INTWAIT
);
340 netmsg_init(msg
, NULL
, &netisr_afree_rport
, 0, rtfree_async_dispatch
);
341 msg
->lmsg
.u
.ms_resultp
= rt
;
343 netisr_sendmsg(msg
, rt
->rt_cpuid
);
347 rtredirect_oncpu(struct sockaddr
*dst
, struct sockaddr
*gateway
,
348 struct sockaddr
*netmask
, int flags
, struct sockaddr
*src
)
350 struct rtentry
*rt
= NULL
;
351 struct rt_addrinfo rtinfo
;
356 ASSERT_NETISR_NCPUS(mycpuid
);
358 /* verify the gateway is directly reachable */
359 if ((ifa
= ifa_ifwithnet(gateway
)) == NULL
) {
365 * If the redirect isn't from our current router for this destination,
366 * it's either old or wrong.
368 if (!(flags
& RTF_DONE
) && /* XXX JH */
369 (rt
= rtpurelookup(dst
)) != NULL
&&
370 (!sa_equal(src
, rt
->rt_gateway
) || rt
->rt_ifa
!= ifa
)) {
376 * If it redirects us to ourselves, we have a routing loop,
377 * perhaps as a result of an interface going down recently.
379 if (ifa_ifwithaddr(gateway
)) {
380 error
= EHOSTUNREACH
;
385 * Create a new entry if the lookup failed or if we got back
386 * a wildcard entry for the default route. This is necessary
387 * for hosts which use routing redirects generated by smart
388 * gateways to dynamically build the routing tables.
392 if ((rt_mask(rt
) != NULL
&& rt_mask(rt
)->sa_len
< 2)) {
397 /* Ignore redirects for directly connected hosts. */
398 if (!(rt
->rt_flags
& RTF_GATEWAY
)) {
399 error
= EHOSTUNREACH
;
403 if (!(rt
->rt_flags
& RTF_HOST
) && (flags
& RTF_HOST
)) {
405 * Changing from a network route to a host route.
406 * Create a new host route rather than smashing the
410 flags
|= RTF_GATEWAY
| RTF_DYNAMIC
;
411 bzero(&rtinfo
, sizeof(struct rt_addrinfo
));
412 rtinfo
.rti_info
[RTAX_DST
] = dst
;
413 rtinfo
.rti_info
[RTAX_GATEWAY
] = gateway
;
414 rtinfo
.rti_info
[RTAX_NETMASK
] = netmask
;
415 rtinfo
.rti_flags
= flags
;
416 rtinfo
.rti_ifa
= ifa
;
417 rt
= NULL
; /* copy-in/copy-out parameter */
418 error
= rtrequest1(RTM_ADD
, &rtinfo
, &rt
);
420 flags
= rt
->rt_flags
;
421 stat
= &rtstat
.rts_dynamic
;
424 * Smash the current notion of the gateway to this destination.
425 * Should check about netmask!!!
427 rt
->rt_flags
|= RTF_MODIFIED
;
428 flags
|= RTF_MODIFIED
;
430 /* We only need to report rtmsg on CPU0 */
431 rt_setgate(rt
, rt_key(rt
), gateway
,
432 mycpuid
== 0 ? RTL_REPORTMSG
: RTL_DONTREPORT
);
434 stat
= &rtstat
.rts_newgateway
;
442 rtstat
.rts_badredirect
++;
443 else if (stat
!= NULL
)
449 struct netmsg_rtredirect
{
450 struct netmsg_base base
;
451 struct sockaddr
*dst
;
452 struct sockaddr
*gateway
;
453 struct sockaddr
*netmask
;
455 struct sockaddr
*src
;
459 * Force a routing table entry to the specified
460 * destination to go through the given gateway.
461 * Normally called as a result of a routing redirect
462 * message from the network layer.
465 rtredirect(struct sockaddr
*dst
, struct sockaddr
*gateway
,
466 struct sockaddr
*netmask
, int flags
, struct sockaddr
*src
)
468 struct rt_addrinfo rtinfo
;
470 struct netmsg_rtredirect msg
;
472 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
,
473 0, rtredirect_msghandler
);
475 msg
.gateway
= gateway
;
476 msg
.netmask
= netmask
;
479 error
= netisr_domsg_global(&msg
.base
);
481 bzero(&rtinfo
, sizeof(struct rt_addrinfo
));
482 rtinfo
.rti_info
[RTAX_DST
] = dst
;
483 rtinfo
.rti_info
[RTAX_GATEWAY
] = gateway
;
484 rtinfo
.rti_info
[RTAX_NETMASK
] = netmask
;
485 rtinfo
.rti_info
[RTAX_AUTHOR
] = src
;
486 rt_missmsg(RTM_REDIRECT
, &rtinfo
, flags
, error
);
490 rtredirect_msghandler(netmsg_t msg
)
492 struct netmsg_rtredirect
*rmsg
= (void *)msg
;
494 rtredirect_oncpu(rmsg
->dst
, rmsg
->gateway
, rmsg
->netmask
,
495 rmsg
->flags
, rmsg
->src
);
496 netisr_forwardmsg(&msg
->base
, mycpuid
+ 1);
500 * Routing table ioctl interface.
503 rtioctl(u_long req
, caddr_t data
, struct ucred
*cred
)
506 /* Multicast goop, grrr... */
507 return mrt_ioctl
? mrt_ioctl(req
, data
) : EOPNOTSUPP
;
514 ifa_ifwithroute(int flags
, struct sockaddr
*dst
, struct sockaddr
*gateway
)
518 if (!(flags
& RTF_GATEWAY
)) {
520 * If we are adding a route to an interface,
521 * and the interface is a point-to-point link,
522 * we should search for the destination
523 * as our clue to the interface. Otherwise
524 * we can use the local address.
527 if (flags
& RTF_HOST
) {
528 ifa
= ifa_ifwithdstaddr(dst
);
531 ifa
= ifa_ifwithaddr(gateway
);
534 * If we are adding a route to a remote net
535 * or host, the gateway may still be on the
536 * other end of a pt to pt link.
538 ifa
= ifa_ifwithdstaddr(gateway
);
541 ifa
= ifa_ifwithnet(gateway
);
545 rt
= rtpurelookup(gateway
);
549 if ((ifa
= rt
->rt_ifa
) == NULL
)
552 if (ifa
->ifa_addr
->sa_family
!= dst
->sa_family
) {
553 struct ifaddr
*oldifa
= ifa
;
555 ifa
= ifaof_ifpforaddr(dst
, ifa
->ifa_ifp
);
562 static int rt_fixdelete (struct radix_node
*, void *);
563 static int rt_fixchange (struct radix_node
*, void *);
567 struct radix_node_head
*rnh
;
571 * Set rtinfo->rti_ifa and rtinfo->rti_ifp.
574 rt_getifa(struct rt_addrinfo
*rtinfo
)
576 struct sockaddr
*gateway
= rtinfo
->rti_info
[RTAX_GATEWAY
];
577 struct sockaddr
*dst
= rtinfo
->rti_info
[RTAX_DST
];
578 struct sockaddr
*ifaaddr
= rtinfo
->rti_info
[RTAX_IFA
];
579 int flags
= rtinfo
->rti_flags
;
582 * ifp may be specified by sockaddr_dl
583 * when protocol address is ambiguous.
585 if (rtinfo
->rti_ifp
== NULL
) {
586 struct sockaddr
*ifpaddr
;
588 ifpaddr
= rtinfo
->rti_info
[RTAX_IFP
];
589 if (ifpaddr
!= NULL
&& ifpaddr
->sa_family
== AF_LINK
) {
592 ifa
= ifa_ifwithnet(ifpaddr
);
594 rtinfo
->rti_ifp
= ifa
->ifa_ifp
;
598 if (rtinfo
->rti_ifa
== NULL
&& ifaaddr
!= NULL
)
599 rtinfo
->rti_ifa
= ifa_ifwithaddr(ifaaddr
);
600 if (rtinfo
->rti_ifa
== NULL
) {
603 sa
= ifaaddr
!= NULL
? ifaaddr
:
604 (gateway
!= NULL
? gateway
: dst
);
605 if (sa
!= NULL
&& rtinfo
->rti_ifp
!= NULL
)
606 rtinfo
->rti_ifa
= ifaof_ifpforaddr(sa
, rtinfo
->rti_ifp
);
607 else if (dst
!= NULL
&& gateway
!= NULL
)
608 rtinfo
->rti_ifa
= ifa_ifwithroute(flags
, dst
, gateway
);
610 rtinfo
->rti_ifa
= ifa_ifwithroute(flags
, sa
, sa
);
612 if (rtinfo
->rti_ifa
== NULL
)
613 return (ENETUNREACH
);
615 if (rtinfo
->rti_ifp
== NULL
)
616 rtinfo
->rti_ifp
= rtinfo
->rti_ifa
->ifa_ifp
;
621 * Do appropriate manipulations of a routing tree given
622 * all the bits of info needed
627 struct sockaddr
*dst
,
628 struct sockaddr
*gateway
,
629 struct sockaddr
*netmask
,
631 struct rtentry
**ret_nrt
)
633 struct rt_addrinfo rtinfo
;
635 bzero(&rtinfo
, sizeof(struct rt_addrinfo
));
636 rtinfo
.rti_info
[RTAX_DST
] = dst
;
637 rtinfo
.rti_info
[RTAX_GATEWAY
] = gateway
;
638 rtinfo
.rti_info
[RTAX_NETMASK
] = netmask
;
639 rtinfo
.rti_flags
= flags
;
640 return rtrequest1(req
, &rtinfo
, ret_nrt
);
646 struct sockaddr
*dst
,
647 struct sockaddr
*gateway
,
648 struct sockaddr
*netmask
,
651 struct rt_addrinfo rtinfo
;
653 bzero(&rtinfo
, sizeof(struct rt_addrinfo
));
654 rtinfo
.rti_info
[RTAX_DST
] = dst
;
655 rtinfo
.rti_info
[RTAX_GATEWAY
] = gateway
;
656 rtinfo
.rti_info
[RTAX_NETMASK
] = netmask
;
657 rtinfo
.rti_flags
= flags
;
658 return rtrequest1_global(req
, &rtinfo
, NULL
, NULL
, RTREQ_PRIO_NORM
);
662 struct netmsg_base base
;
664 struct rt_addrinfo
*rtinfo
;
665 rtrequest1_callback_func_t callback
;
670 rtrequest1_global(int req
, struct rt_addrinfo
*rtinfo
,
671 rtrequest1_callback_func_t callback
, void *arg
, boolean_t req_prio
)
673 struct netmsg_rtq msg
;
677 flags
= MSGF_PRIORITY
;
678 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
, flags
,
679 rtrequest1_msghandler
);
680 msg
.base
.lmsg
.ms_error
= -1;
683 msg
.callback
= callback
;
685 return (netisr_domsg_global(&msg
.base
));
689 * Handle a route table request on the current cpu. Since the route table's
690 * are supposed to be identical on each cpu, an error occuring later in the
691 * message chain is considered system-fatal.
694 rtrequest1_msghandler(netmsg_t msg
)
696 struct netmsg_rtq
*rmsg
= (void *)msg
;
697 struct rt_addrinfo rtinfo
;
698 struct rtentry
*rt
= NULL
;
702 * Copy the rtinfo. We need to make sure that the original
703 * rtinfo, which is setup by the caller, in the netmsg will
704 * _not_ be changed; else the next CPU on the netmsg forwarding
705 * path will see a different rtinfo than what this CPU has seen.
707 rtinfo
= *rmsg
->rtinfo
;
709 error
= rtrequest1(rmsg
->req
, &rtinfo
, &rt
);
713 rmsg
->callback(rmsg
->req
, error
, &rtinfo
, rt
, rmsg
->arg
);
716 * RTM_DELETE's are propogated even if an error occurs, since a
717 * cloned route might be undergoing deletion and cloned routes
718 * are not necessarily replicated. An overall error is returned
719 * only if no cpus have the route in question.
721 if (rmsg
->base
.lmsg
.ms_error
< 0 || error
== 0)
722 rmsg
->base
.lmsg
.ms_error
= error
;
724 if (error
&& rmsg
->req
!= RTM_DELETE
) {
726 panic("rtrequest1_msghandler: rtrequest table req %d, "
727 "failed on cpu%d, error %d\n",
728 rmsg
->req
, mycpuid
, error
);
730 netisr_replymsg(&rmsg
->base
, error
);
732 netisr_forwardmsg_error(&rmsg
->base
, mycpuid
+ 1,
733 rmsg
->base
.lmsg
.ms_error
);
738 rtrequest1(int req
, struct rt_addrinfo
*rtinfo
, struct rtentry
**ret_nrt
)
740 struct sockaddr
*dst
= rtinfo
->rti_info
[RTAX_DST
];
742 struct radix_node
*rn
;
743 struct radix_node_head
*rnh
;
745 struct sockaddr
*ndst
;
749 ASSERT_NETISR_NCPUS(mycpuid
);
751 #define gotoerr(x) { error = x ; goto bad; }
755 rt_addrinfo_print(req
, rtinfo
);
760 * Find the correct routing tree to use for this Address Family
762 if ((rnh
= rt_tables
[mycpuid
][dst
->sa_family
]) == NULL
)
763 gotoerr(EAFNOSUPPORT
);
766 * If we are adding a host route then we don't want to put
767 * a netmask in the tree, nor do we want to clone it.
769 if (rtinfo
->rti_flags
& RTF_HOST
) {
770 rtinfo
->rti_info
[RTAX_NETMASK
] = NULL
;
771 rtinfo
->rti_flags
&= ~(RTF_CLONING
| RTF_PRCLONING
);
776 /* Remove the item from the tree. */
777 rn
= rnh
->rnh_deladdr((char *)rtinfo
->rti_info
[RTAX_DST
],
778 (char *)rtinfo
->rti_info
[RTAX_NETMASK
],
782 KASSERT(!(rn
->rn_flags
& (RNF_ACTIVE
| RNF_ROOT
)),
783 ("rnh_deladdr returned flags 0x%x", rn
->rn_flags
));
784 rt
= (struct rtentry
*)rn
;
786 /* ref to prevent a deletion race */
789 /* Free any routes cloned from this one. */
790 if ((rt
->rt_flags
& (RTF_CLONING
| RTF_PRCLONING
)) &&
791 rt_mask(rt
) != NULL
) {
792 rnh
->rnh_walktree_from(rnh
, (char *)rt_key(rt
),
797 if (rt
->rt_gwroute
!= NULL
) {
798 RTFREE(rt
->rt_gwroute
);
799 rt
->rt_gwroute
= NULL
;
803 * NB: RTF_UP must be set during the search above,
804 * because we might delete the last ref, causing
805 * rt to get freed prematurely.
807 rt
->rt_flags
&= ~RTF_UP
;
811 rt_print(rtinfo
, rt
);
814 /* Give the protocol a chance to keep things in sync. */
815 if ((ifa
= rt
->rt_ifa
) && ifa
->ifa_rtrequest
)
816 ifa
->ifa_rtrequest(RTM_DELETE
, rt
);
819 * If the caller wants it, then it can have it,
820 * but it's up to it to free the rtentry as we won't be
823 KASSERT(rt
->rt_refcnt
>= 0,
824 ("rtrequest1(DELETE): refcnt %ld", rt
->rt_refcnt
));
825 if (ret_nrt
!= NULL
) {
826 /* leave ref intact for return */
829 /* deref / attempt to destroy */
835 if (ret_nrt
== NULL
|| (rt
= *ret_nrt
) == NULL
)
838 KASSERT(rt
->rt_cpuid
== mycpuid
,
839 ("rt resolve rt_cpuid %d, mycpuid %d",
840 rt
->rt_cpuid
, mycpuid
));
844 rt
->rt_flags
& ~(RTF_CLONING
| RTF_PRCLONING
| RTF_STATIC
);
845 rtinfo
->rti_flags
|= RTF_WASCLONED
;
846 rtinfo
->rti_info
[RTAX_GATEWAY
] = rt
->rt_gateway
;
847 if ((rtinfo
->rti_info
[RTAX_NETMASK
] = rt
->rt_genmask
) == NULL
)
848 rtinfo
->rti_flags
|= RTF_HOST
;
849 rtinfo
->rti_info
[RTAX_MPLS1
] = rt
->rt_shim
[0];
850 rtinfo
->rti_info
[RTAX_MPLS2
] = rt
->rt_shim
[1];
851 rtinfo
->rti_info
[RTAX_MPLS3
] = rt
->rt_shim
[2];
855 KASSERT(!(rtinfo
->rti_flags
& RTF_GATEWAY
) ||
856 rtinfo
->rti_info
[RTAX_GATEWAY
] != NULL
,
857 ("rtrequest: GATEWAY but no gateway"));
859 if (rtinfo
->rti_ifa
== NULL
&& (error
= rt_getifa(rtinfo
)))
861 ifa
= rtinfo
->rti_ifa
;
863 R_Malloc(rt
, struct rtentry
*, sizeof(struct rtentry
));
865 if (req
== RTM_ADD
) {
866 kprintf("rtrequest1: alloc rtentry failed on "
871 bzero(rt
, sizeof(struct rtentry
));
872 rt
->rt_flags
= RTF_UP
| rtinfo
->rti_flags
;
873 rt
->rt_cpuid
= mycpuid
;
875 if (mycpuid
!= 0 && req
== RTM_ADD
) {
876 /* For RTM_ADD, we have already sent rtmsg on CPU0. */
877 reportmsg
= RTL_DONTREPORT
;
880 * For RTM_ADD, we only send rtmsg on CPU0.
881 * For RTM_RESOLVE, we always send rtmsg. XXX
883 reportmsg
= RTL_REPORTMSG
;
885 error
= rt_setgate(rt
, dst
, rtinfo
->rti_info
[RTAX_GATEWAY
],
893 if (rtinfo
->rti_info
[RTAX_NETMASK
] != NULL
)
894 rt_maskedcopy(dst
, ndst
,
895 rtinfo
->rti_info
[RTAX_NETMASK
]);
897 bcopy(dst
, ndst
, dst
->sa_len
);
899 if (rtinfo
->rti_info
[RTAX_MPLS1
] != NULL
)
900 rt_setshims(rt
, rtinfo
->rti_info
);
903 * Note that we now have a reference to the ifa.
904 * This moved from below so that rnh->rnh_addaddr() can
905 * examine the ifa and ifa->ifa_ifp if it so desires.
909 rt
->rt_ifp
= ifa
->ifa_ifp
;
910 /* XXX mtu manipulation will be done in rnh_addaddr -- itojun */
912 rn
= rnh
->rnh_addaddr((char *)ndst
,
913 (char *)rtinfo
->rti_info
[RTAX_NETMASK
],
916 struct rtentry
*oldrt
;
919 * We already have one of these in the tree.
920 * We do a special hack: if the old route was
921 * cloned, then we blow it away and try
922 * re-inserting the new one.
924 oldrt
= rtpurelookup(ndst
);
927 if (oldrt
->rt_flags
& RTF_WASCLONED
) {
928 rtrequest(RTM_DELETE
, rt_key(oldrt
),
931 oldrt
->rt_flags
, NULL
);
932 rn
= rnh
->rnh_addaddr((char *)ndst
,
934 rtinfo
->rti_info
[RTAX_NETMASK
],
939 /* NOTE: rt_ifa may have been changed */
943 * If it still failed to go into the tree,
944 * then un-make it (this should be a function).
947 if (rt
->rt_gwroute
!= NULL
)
948 rtfree(rt
->rt_gwroute
);
956 * If we got here from RESOLVE, then we are cloning
957 * so clone the rest, and note that we
958 * are a clone (and increment the parent's references)
960 if (req
== RTM_RESOLVE
) {
961 rt
->rt_rmx
= (*ret_nrt
)->rt_rmx
; /* copy metrics */
962 rt
->rt_rmx
.rmx_pksent
= 0; /* reset packet counter */
963 if ((*ret_nrt
)->rt_flags
&
964 (RTF_CLONING
| RTF_PRCLONING
)) {
965 rt
->rt_parent
= *ret_nrt
;
966 (*ret_nrt
)->rt_refcnt
++;
971 * if this protocol has something to add to this then
972 * allow it to do that as well.
974 if (ifa
->ifa_rtrequest
!= NULL
)
975 ifa
->ifa_rtrequest(req
, rt
);
978 * We repeat the same procedure from rt_setgate() here because
979 * it doesn't fire when we call it there because the node
980 * hasn't been added to the tree yet.
982 if (req
== RTM_ADD
&& !(rt
->rt_flags
& RTF_HOST
) &&
983 rt_mask(rt
) != NULL
) {
984 struct rtfc_arg arg
= { rt
, rnh
};
986 rnh
->rnh_walktree_from(rnh
, (char *)rt_key(rt
),
993 rt_print(rtinfo
, rt
);
996 * Return the resulting rtentry,
997 * increasing the number of references by one.
999 if (ret_nrt
!= NULL
) {
1011 kprintf("rti %p failed error %d\n", rtinfo
, error
);
1013 kprintf("rti %p succeeded\n", rtinfo
);
1021 * Called from rtrequest(RTM_DELETE, ...) to fix up the route's ``family''
1022 * (i.e., the routes related to it by the operation of cloning). This
1023 * routine is iterated over all potential former-child-routes by way of
1024 * rnh->rnh_walktree_from() above, and those that actually are children of
1025 * the late parent (passed in as VP here) are themselves deleted.
1028 rt_fixdelete(struct radix_node
*rn
, void *vp
)
1030 struct rtentry
*rt
= (struct rtentry
*)rn
;
1031 struct rtentry
*rt0
= vp
;
1033 if (rt
->rt_parent
== rt0
&&
1034 !(rt
->rt_flags
& (RTF_PINNED
| RTF_CLONING
| RTF_PRCLONING
))) {
1035 return rtrequest(RTM_DELETE
, rt_key(rt
), NULL
, rt_mask(rt
),
1036 rt
->rt_flags
, NULL
);
1042 * This routine is called from rt_setgate() to do the analogous thing for
1043 * adds and changes. There is the added complication in this case of a
1044 * middle insert; i.e., insertion of a new network route between an older
1045 * network route and (cloned) host routes. For this reason, a simple check
1046 * of rt->rt_parent is insufficient; each candidate route must be tested
1047 * against the (mask, value) of the new route (passed as before in vp)
1048 * to see if the new route matches it.
1050 * XXX - it may be possible to do fixdelete() for changes and reserve this
1051 * routine just for adds. I'm not sure why I thought it was necessary to do
1055 static int rtfcdebug
= 0;
1059 rt_fixchange(struct radix_node
*rn
, void *vp
)
1061 struct rtentry
*rt
= (struct rtentry
*)rn
;
1062 struct rtfc_arg
*ap
= vp
;
1063 struct rtentry
*rt0
= ap
->rt0
;
1064 struct radix_node_head
*rnh
= ap
->rnh
;
1065 u_char
*xk1
, *xm1
, *xk2
, *xmp
;
1070 kprintf("rt_fixchange: rt %p, rt0 %p\n", rt
, rt0
);
1073 if (rt
->rt_parent
== NULL
||
1074 (rt
->rt_flags
& (RTF_PINNED
| RTF_CLONING
| RTF_PRCLONING
))) {
1076 if (rtfcdebug
) kprintf("no parent, pinned or cloning\n");
1081 if (rt
->rt_parent
== rt0
) {
1083 if (rtfcdebug
) kprintf("parent match\n");
1085 return rtrequest(RTM_DELETE
, rt_key(rt
), NULL
, rt_mask(rt
),
1086 rt
->rt_flags
, NULL
);
1090 * There probably is a function somewhere which does this...
1091 * if not, there should be.
1093 len
= imin(rt_key(rt0
)->sa_len
, rt_key(rt
)->sa_len
);
1095 xk1
= (u_char
*)rt_key(rt0
);
1096 xm1
= (u_char
*)rt_mask(rt0
);
1097 xk2
= (u_char
*)rt_key(rt
);
1099 /* avoid applying a less specific route */
1100 xmp
= (u_char
*)rt_mask(rt
->rt_parent
);
1101 mlen
= rt_key(rt
->rt_parent
)->sa_len
;
1102 if (mlen
> rt_key(rt0
)->sa_len
) {
1105 kprintf("rt_fixchange: inserting a less "
1106 "specific route\n");
1110 for (i
= rnh
->rnh_treetop
->rn_offset
; i
< mlen
; i
++) {
1111 if ((xmp
[i
] & ~(xmp
[i
] ^ xm1
[i
])) != xmp
[i
]) {
1114 kprintf("rt_fixchange: inserting a less "
1115 "specific route\n");
1121 for (i
= rnh
->rnh_treetop
->rn_offset
; i
< len
; i
++) {
1122 if ((xk2
[i
] & xm1
[i
]) != xk1
[i
]) {
1124 if (rtfcdebug
) kprintf("no match\n");
1131 * OK, this node is a clone, and matches the node currently being
1132 * changed/added under the node's mask. So, get rid of it.
1135 if (rtfcdebug
) kprintf("deleting\n");
1137 return rtrequest(RTM_DELETE
, rt_key(rt
), NULL
, rt_mask(rt
),
1138 rt
->rt_flags
, NULL
);
1142 rt_setgate(struct rtentry
*rt0
, struct sockaddr
*dst
, struct sockaddr
*gate
,
1143 boolean_t generate_report
)
1145 char *space
, *oldspace
;
1146 int dlen
= RT_ROUNDUP(dst
->sa_len
), glen
= RT_ROUNDUP(gate
->sa_len
);
1147 struct rtentry
*rt
= rt0
;
1148 struct radix_node_head
*rnh
= rt_tables
[mycpuid
][dst
->sa_family
];
1150 ASSERT_NETISR_NCPUS(mycpuid
);
1153 * A host route with the destination equal to the gateway
1154 * will interfere with keeping LLINFO in the routing
1155 * table, so disallow it.
1157 if (((rt0
->rt_flags
& (RTF_HOST
| RTF_GATEWAY
| RTF_LLINFO
)) ==
1158 (RTF_HOST
| RTF_GATEWAY
)) &&
1159 dst
->sa_len
== gate
->sa_len
&&
1160 sa_equal(dst
, gate
)) {
1162 * The route might already exist if this is an RTM_CHANGE
1163 * or a routing redirect, so try to delete it.
1165 if (rt_key(rt0
) != NULL
)
1166 rtrequest(RTM_DELETE
, rt_key(rt0
), rt0
->rt_gateway
,
1167 rt_mask(rt0
), rt0
->rt_flags
, NULL
);
1168 return EADDRNOTAVAIL
;
1172 * Both dst and gateway are stored in the same malloc'ed chunk
1173 * (If I ever get my hands on....)
1174 * if we need to malloc a new chunk, then keep the old one around
1175 * till we don't need it any more.
1177 if (rt
->rt_gateway
== NULL
||
1178 glen
> RT_ROUNDUP(rt
->rt_gateway
->sa_len
)) {
1179 oldspace
= (char *)rt_key(rt
);
1180 R_Malloc(space
, char *, dlen
+ glen
);
1183 rt
->rt_nodes
->rn_key
= space
;
1185 space
= (char *)rt_key(rt
); /* Just use the old space. */
1189 /* Set the gateway value. */
1190 rt
->rt_gateway
= (struct sockaddr
*)(space
+ dlen
);
1191 bcopy(gate
, rt
->rt_gateway
, glen
);
1193 if (oldspace
!= NULL
) {
1195 * If we allocated a new chunk, preserve the original dst.
1196 * This way, rt_setgate() really just sets the gate
1197 * and leaves the dst field alone.
1199 bcopy(dst
, space
, dlen
);
1204 * If there is already a gwroute, it's now almost definitely wrong
1207 if (rt
->rt_gwroute
!= NULL
) {
1208 RTFREE(rt
->rt_gwroute
);
1209 rt
->rt_gwroute
= NULL
;
1211 if (rt
->rt_flags
& RTF_GATEWAY
) {
1213 * Cloning loop avoidance: In the presence of
1214 * protocol-cloning and bad configuration, it is
1215 * possible to get stuck in bottomless mutual recursion
1216 * (rtrequest rt_setgate rtlookup). We avoid this
1217 * by not allowing protocol-cloning to operate for
1218 * gateways (which is probably the correct choice
1219 * anyway), and avoid the resulting reference loops
1220 * by disallowing any route to run through itself as
1221 * a gateway. This is obviously mandatory when we
1222 * get rt->rt_output().
1224 * This breaks TTCP for hosts outside the gateway! XXX JH
1226 rt
->rt_gwroute
= _rtlookup(gate
, generate_report
,
1228 if (rt
->rt_gwroute
== rt
) {
1229 rt
->rt_gwroute
= NULL
;
1231 return EDQUOT
; /* failure */
1236 * This isn't going to do anything useful for host routes, so
1237 * don't bother. Also make sure we have a reasonable mask
1238 * (we don't yet have one during adds).
1240 if (!(rt
->rt_flags
& RTF_HOST
) && rt_mask(rt
) != NULL
) {
1241 struct rtfc_arg arg
= { rt
, rnh
};
1243 rnh
->rnh_walktree_from(rnh
, (char *)rt_key(rt
),
1244 (char *)rt_mask(rt
),
1245 rt_fixchange
, &arg
);
1253 struct sockaddr
*src
,
1254 struct sockaddr
*dst
,
1255 struct sockaddr
*netmask
)
1257 u_char
*cp1
= (u_char
*)src
;
1258 u_char
*cp2
= (u_char
*)dst
;
1259 u_char
*cp3
= (u_char
*)netmask
;
1260 u_char
*cplim
= cp2
+ *cp3
;
1261 u_char
*cplim2
= cp2
+ *cp1
;
1263 *cp2
++ = *cp1
++; *cp2
++ = *cp1
++; /* copies sa_len & sa_family */
1268 *cp2
++ = *cp1
++ & *cp3
++;
1270 bzero(cp2
, cplim2
- cp2
);
1274 rt_llroute(struct sockaddr
*dst
, struct rtentry
*rt0
, struct rtentry
**drt
)
1276 struct rtentry
*up_rt
, *rt
;
1278 ASSERT_NETISR_NCPUS(mycpuid
);
1280 if (!(rt0
->rt_flags
& RTF_UP
)) {
1281 up_rt
= rtlookup(dst
);
1283 return (EHOSTUNREACH
);
1287 if (up_rt
->rt_flags
& RTF_GATEWAY
) {
1288 if (up_rt
->rt_gwroute
== NULL
) {
1289 up_rt
->rt_gwroute
= rtlookup(up_rt
->rt_gateway
);
1290 if (up_rt
->rt_gwroute
== NULL
)
1291 return (EHOSTUNREACH
);
1292 } else if (!(up_rt
->rt_gwroute
->rt_flags
& RTF_UP
)) {
1293 rtfree(up_rt
->rt_gwroute
);
1294 up_rt
->rt_gwroute
= rtlookup(up_rt
->rt_gateway
);
1295 if (up_rt
->rt_gwroute
== NULL
)
1296 return (EHOSTUNREACH
);
1298 rt
= up_rt
->rt_gwroute
;
1301 if (rt
->rt_flags
& RTF_REJECT
&&
1302 (rt
->rt_rmx
.rmx_expire
== 0 || /* rt doesn't expire */
1303 time_uptime
< rt
->rt_rmx
.rmx_expire
)) /* rt not expired */
1304 return (rt
->rt_flags
& RTF_HOST
? EHOSTDOWN
: EHOSTUNREACH
);
1310 rt_setshims(struct rtentry
*rt
, struct sockaddr
**rt_shim
){
1313 for (i
=0; i
<3; i
++) {
1314 struct sockaddr
*shim
= rt_shim
[RTAX_MPLS1
+ i
];
1320 shimlen
= RT_ROUNDUP(shim
->sa_len
);
1321 R_Malloc(rt
->rt_shim
[i
], struct sockaddr
*, shimlen
);
1322 bcopy(shim
, rt
->rt_shim
[i
], shimlen
);
1331 * Print out a route table entry
1334 rt_print(struct rt_addrinfo
*rtinfo
, struct rtentry
*rn
)
1336 kprintf("rti %p cpu %d route %p flags %08lx: ",
1337 rtinfo
, mycpuid
, rn
, rn
->rt_flags
);
1338 sockaddr_print(rt_key(rn
));
1340 sockaddr_print(rt_mask(rn
));
1342 sockaddr_print(rn
->rt_gateway
);
1343 kprintf(" ifc \"%s\"", rn
->rt_ifp
? rn
->rt_ifp
->if_dname
: "?");
1344 kprintf(" ifa %p\n", rn
->rt_ifa
);
1348 rt_addrinfo_print(int cmd
, struct rt_addrinfo
*rti
)
1354 if (cmd
== RTM_DELETE
&& route_debug
> 1)
1355 print_backtrace(-1);
1369 kprintf("C%02d ", cmd
);
1372 kprintf("rti %p cpu %d ", rti
, mycpuid
);
1373 for (i
= 0; i
< rti
->rti_addrs
; ++i
) {
1374 if (rti
->rti_info
[i
] == NULL
)
1404 kprintf("(?%02d ", i
);
1407 sockaddr_print(rti
->rti_info
[i
]);
1415 sockaddr_print(struct sockaddr
*sa
)
1417 struct sockaddr_in
*sa4
;
1418 struct sockaddr_in6
*sa6
;
1427 len
= sa
->sa_len
- offsetof(struct sockaddr
, sa_data
[0]);
1429 switch(sa
->sa_family
) {
1433 switch(sa
->sa_family
) {
1435 sa4
= (struct sockaddr_in
*)sa
;
1436 kprintf("INET %d %d.%d.%d.%d",
1437 ntohs(sa4
->sin_port
),
1438 (ntohl(sa4
->sin_addr
.s_addr
) >> 24) & 255,
1439 (ntohl(sa4
->sin_addr
.s_addr
) >> 16) & 255,
1440 (ntohl(sa4
->sin_addr
.s_addr
) >> 8) & 255,
1441 (ntohl(sa4
->sin_addr
.s_addr
) >> 0) & 255
1445 sa6
= (struct sockaddr_in6
*)sa
;
1446 kprintf("INET6 %d %04x:%04x%04x:%04x:%04x:%04x:%04x:%04x",
1447 ntohs(sa6
->sin6_port
),
1448 sa6
->sin6_addr
.s6_addr16
[0],
1449 sa6
->sin6_addr
.s6_addr16
[1],
1450 sa6
->sin6_addr
.s6_addr16
[2],
1451 sa6
->sin6_addr
.s6_addr16
[3],
1452 sa6
->sin6_addr
.s6_addr16
[4],
1453 sa6
->sin6_addr
.s6_addr16
[5],
1454 sa6
->sin6_addr
.s6_addr16
[6],
1455 sa6
->sin6_addr
.s6_addr16
[7]
1459 kprintf("AF%d ", sa
->sa_family
);
1460 while (len
> 0 && sa
->sa_data
[len
-1] == 0)
1463 for (i
= 0; i
< len
; ++i
) {
1466 kprintf("%d", (unsigned char)sa
->sa_data
[i
]);
1476 * Set up a routing table entry, normally for an interface.
1479 rtinit(struct ifaddr
*ifa
, int cmd
, int flags
)
1481 struct sockaddr
*dst
, *deldst
, *netmask
;
1482 struct mbuf
*m
= NULL
;
1483 struct radix_node_head
*rnh
;
1484 struct radix_node
*rn
;
1485 struct rt_addrinfo rtinfo
;
1490 if (flags
& RTF_HOST
) {
1491 dst
= ifa
->ifa_dstaddr
;
1494 dst
= ifa
->ifa_addr
;
1495 netmask
= ifa
->ifa_netmask
;
1498 * If it's a delete, check that if it exists, it's on the correct
1499 * interface or we might scrub a route to another ifa which would
1500 * be confusing at best and possibly worse.
1502 if (cmd
== RTM_DELETE
) {
1504 * It's a delete, so it should already exist..
1505 * If it's a net, mask off the host bits
1506 * (Assuming we have a mask)
1508 if (netmask
!= NULL
) {
1509 m
= m_get(M_NOWAIT
, MT_SONAME
);
1513 deldst
= mtod(m
, struct sockaddr
*);
1514 rt_maskedcopy(dst
, deldst
, netmask
);
1518 * Look up an rtentry that is in the routing tree and
1519 * contains the correct info.
1521 if ((rnh
= rt_tables
[mycpuid
][dst
->sa_family
]) == NULL
||
1522 (rn
= rnh
->rnh_lookup((char *)dst
,
1523 (char *)netmask
, rnh
)) == NULL
||
1524 ((struct rtentry
*)rn
)->rt_ifa
!= ifa
||
1525 !sa_equal((struct sockaddr
*)rn
->rn_key
, dst
)) {
1528 return (flags
& RTF_HOST
? EHOSTUNREACH
: ENETUNREACH
);
1534 * One would think that as we are deleting, and we know
1535 * it doesn't exist, we could just return at this point
1536 * with an "ELSE" clause, but apparently not..
1538 return (flags
& RTF_HOST
? EHOSTUNREACH
: ENETUNREACH
);
1543 * Do the actual request
1545 bzero(&rtinfo
, sizeof(struct rt_addrinfo
));
1546 rtinfo
.rti_info
[RTAX_DST
] = dst
;
1547 rtinfo
.rti_info
[RTAX_GATEWAY
] = ifa
->ifa_addr
;
1548 rtinfo
.rti_info
[RTAX_NETMASK
] = netmask
;
1549 rtinfo
.rti_flags
= flags
| ifa
->ifa_flags
;
1550 rtinfo
.rti_ifa
= ifa
;
1551 error
= rtrequest1_global(cmd
, &rtinfo
, rtinit_rtrequest_callback
, ifa
,
1559 rtinit_rtrequest_callback(int cmd
, int error
,
1560 struct rt_addrinfo
*rtinfo
, struct rtentry
*rt
,
1563 struct ifaddr
*ifa
= arg
;
1565 if (error
== 0 && rt
) {
1568 rt_newaddrmsg(cmd
, ifa
, error
, rt
);
1571 if (cmd
== RTM_DELETE
) {
1572 if (rt
->rt_refcnt
== 0) {
1581 struct netmsg_base base
;
1583 struct rt_addrinfo
*rtinfo
;
1584 rtsearch_callback_func_t callback
;
1586 boolean_t exact_match
;
1591 rtsearch_global(int req
, struct rt_addrinfo
*rtinfo
,
1592 rtsearch_callback_func_t callback
, void *arg
, boolean_t exact_match
,
1595 struct netmsg_rts msg
;
1599 flags
= MSGF_PRIORITY
;
1600 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
, flags
,
1601 rtsearch_msghandler
);
1603 msg
.rtinfo
= rtinfo
;
1604 msg
.callback
= callback
;
1606 msg
.exact_match
= exact_match
;
1608 return (netisr_domsg_global(&msg
.base
));
1612 rtsearch_msghandler(netmsg_t msg
)
1614 struct netmsg_rts
*rmsg
= (void *)msg
;
1615 struct rt_addrinfo rtinfo
;
1616 struct radix_node_head
*rnh
;
1620 ASSERT_NETISR_NCPUS(mycpuid
);
1623 * Copy the rtinfo. We need to make sure that the original
1624 * rtinfo, which is setup by the caller, in the netmsg will
1625 * _not_ be changed; else the next CPU on the netmsg forwarding
1626 * path will see a different rtinfo than what this CPU has seen.
1628 rtinfo
= *rmsg
->rtinfo
;
1631 * Find the correct routing tree to use for this Address Family
1633 if ((rnh
= rt_tables
[mycpuid
][rtinfo
.rti_dst
->sa_family
]) == NULL
) {
1635 panic("partially initialized routing tables");
1636 netisr_replymsg(&rmsg
->base
, EAFNOSUPPORT
);
1641 * Correct rtinfo for the host route searching.
1643 if (rtinfo
.rti_flags
& RTF_HOST
) {
1644 rtinfo
.rti_netmask
= NULL
;
1645 rtinfo
.rti_flags
&= ~(RTF_CLONING
| RTF_PRCLONING
);
1648 rt
= (struct rtentry
*)
1649 rnh
->rnh_lookup((char *)rtinfo
.rti_dst
,
1650 (char *)rtinfo
.rti_netmask
, rnh
);
1653 * If we are asked to do the "exact match", we need to make sure
1654 * that host route searching got a host route while a network
1655 * route searching got a network route.
1657 if (rt
!= NULL
&& rmsg
->exact_match
&&
1658 ((rt
->rt_flags
^ rtinfo
.rti_flags
) & RTF_HOST
))
1663 * No matching routes have been found, don't count this
1664 * as a critical error (here, we set 'error' to 0), just
1665 * keep moving on, since at least prcloned routes are not
1666 * duplicated onto each CPU.
1673 error
= rmsg
->callback(rmsg
->req
, &rtinfo
, rt
, rmsg
->arg
,
1677 if (error
== EJUSTRETURN
) {
1678 netisr_replymsg(&rmsg
->base
, 0);
1684 KKASSERT(rmsg
->found_cnt
> 0);
1687 * Under following cases, unrecoverable error has
1689 * o Request is RTM_GET
1690 * o The first time that we find the route, but the
1691 * modification fails.
1693 if (rmsg
->req
!= RTM_GET
&& rmsg
->found_cnt
> 1) {
1694 panic("rtsearch_msghandler: unrecoverable error "
1697 netisr_replymsg(&rmsg
->base
, error
);
1699 if (rmsg
->found_cnt
== 0) {
1700 /* The requested route has not been seen ... */
1703 netisr_forwardmsg_error(&rmsg
->base
, mycpuid
+ 1, error
);
1708 rtmask_add_global(struct sockaddr
*mask
, boolean_t req_prio
)
1710 struct netmsg_base msg
;
1714 flags
= MSGF_PRIORITY
;
1715 netmsg_init(&msg
, NULL
, &curthread
->td_msgport
, flags
,
1716 rtmask_add_msghandler
);
1717 msg
.lmsg
.u
.ms_resultp
= mask
;
1719 return (netisr_domsg_global(&msg
));
1723 _rtmask_lookup(struct sockaddr
*mask
, boolean_t search
)
1725 struct radix_node
*n
;
1727 #define clen(s) (*(u_char *)(s))
1728 n
= rn_addmask((char *)mask
, search
, 1, rn_cpumaskhead(mycpuid
));
1730 mask
->sa_len
>= clen(n
->rn_key
) &&
1731 bcmp((char *)mask
+ 1,
1732 (char *)n
->rn_key
+ 1, clen(n
->rn_key
) - 1) == 0) {
1733 return (struct sockaddr
*)n
->rn_key
;
1741 rtmask_add_msghandler(netmsg_t msg
)
1743 struct sockaddr
*mask
= msg
->lmsg
.u
.ms_resultp
;
1745 ASSERT_NETISR_NCPUS(mycpuid
);
1747 if (rtmask_lookup(mask
) == NULL
) {
1748 netisr_replymsg(&msg
->base
, ENOBUFS
);
1751 netisr_forwardmsg(&msg
->base
, mycpuid
+ 1);
1754 /* This must be before ip6_init2(), which is now SI_ORDER_MIDDLE */
1755 SYSINIT(route
, SI_SUB_PROTO_DOMAIN
, SI_ORDER_THIRD
, route_init
, 0);
1757 struct rtchange_arg
{
1758 struct ifaddr
*old_ifa
;
1759 struct ifaddr
*new_ifa
;
1765 rtchange_ifa(struct rtentry
*rt
, struct rtchange_arg
*ap
)
1767 if (rt
->rt_ifa
->ifa_rtrequest
!= NULL
)
1768 rt
->rt_ifa
->ifa_rtrequest(RTM_DELETE
, rt
);
1769 IFAFREE(rt
->rt_ifa
);
1771 IFAREF(ap
->new_ifa
);
1772 rt
->rt_ifa
= ap
->new_ifa
;
1773 rt
->rt_ifp
= ap
->new_ifa
->ifa_ifp
;
1774 if (rt
->rt_ifa
->ifa_rtrequest
!= NULL
)
1775 rt
->rt_ifa
->ifa_rtrequest(RTM_ADD
, rt
);
1781 rtchange_callback(struct radix_node
*rn
, void *xap
)
1783 struct rtchange_arg
*ap
= xap
;
1784 struct rtentry
*rt
= (struct rtentry
*)rn
;
1786 if (rt
->rt_ifa
== ap
->old_ifa
) {
1787 if (rt
->rt_flags
& (RTF_CLONING
| RTF_PRCLONING
)) {
1789 * We could saw the branch off when we are
1790 * still sitting on it, if the ifa_rtrequest
1791 * DEL/ADD are called directly from here.
1796 rtchange_ifa(rt
, ap
);
1801 struct netmsg_rtchange
{
1802 struct netmsg_base base
;
1803 struct ifaddr
*old_ifa
;
1804 struct ifaddr
*new_ifa
;
1809 rtchange_dispatch(netmsg_t msg
)
1811 struct netmsg_rtchange
*rmsg
= (void *)msg
;
1812 struct radix_node_head
*rnh
;
1813 struct rtchange_arg arg
;
1817 ASSERT_NETISR_NCPUS(cpu
);
1819 memset(&arg
, 0, sizeof(arg
));
1820 arg
.old_ifa
= rmsg
->old_ifa
;
1821 arg
.new_ifa
= rmsg
->new_ifa
;
1823 rnh
= rt_tables
[cpu
][AF_INET
];
1827 KKASSERT(arg
.rt
== NULL
);
1828 error
= rnh
->rnh_walktree(rnh
, rtchange_callback
, &arg
);
1829 if (arg
.rt
!= NULL
) {
1834 rtchange_ifa(rt
, &arg
);
1842 netisr_forwardmsg(&rmsg
->base
, cpu
+ 1);
1846 rtchange(struct ifaddr
*old_ifa
, struct ifaddr
*new_ifa
)
1848 struct netmsg_rtchange msg
;
1851 * XXX individual requests are not independantly chained,
1852 * which means that the per-cpu route tables will not be
1853 * consistent in the middle of the operation. If routes
1854 * related to the interface are manipulated while we are
1855 * doing this the inconsistancy could trigger a panic.
1857 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
, MSGF_PRIORITY
,
1859 msg
.old_ifa
= old_ifa
;
1860 msg
.new_ifa
= new_ifa
;
1862 netisr_domsg_global(&msg
.base
);
1865 old_ifa
->ifa_flags
&= ~IFA_ROUTE
;
1866 new_ifa
->ifa_flags
|= IFA_ROUTE
;