2 * Copyright (c) 2004, 2005 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1980, 1986, 1991, 1993
35 * The Regents of the University of California. All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * @(#)route.c 8.3 (Berkeley) 1/9/95
62 * $FreeBSD: src/sys/net/route.c,v 1.59.2.10 2003/01/17 08:04:00 ru Exp $
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/malloc.h>
72 #include <sys/socket.h>
73 #include <sys/domain.h>
74 #include <sys/kernel.h>
75 #include <sys/sysctl.h>
76 #include <sys/globaldata.h>
77 #include <sys/thread.h>
80 #include <net/if_var.h>
81 #include <net/route.h>
82 #include <net/netisr.h>
84 #include <netinet/in.h>
85 #include <net/ip_mroute/ip_mroute.h>
87 #include <sys/thread2.h>
88 #include <sys/msgport2.h>
89 #include <net/netmsg2.h>
90 #include <net/netisr2.h>
93 #include <netproto/mpls/mpls.h>
96 static struct rtstatistics rtstatistics_percpu
[MAXCPU
] __cachealign
;
97 #define rtstat rtstatistics_percpu[mycpuid]
99 struct radix_node_head
*rt_tables
[MAXCPU
][AF_MAX
+1];
101 static void rt_maskedcopy (struct sockaddr
*, struct sockaddr
*,
103 static void rtable_init(void);
104 static void rtinit_rtrequest_callback(int, int, struct rt_addrinfo
*,
105 struct rtentry
*, void *);
107 static void rtredirect_msghandler(netmsg_t msg
);
108 static void rtrequest1_msghandler(netmsg_t msg
);
109 static void rtsearch_msghandler(netmsg_t msg
);
110 static void rtmask_add_msghandler(netmsg_t msg
);
112 static int rt_setshims(struct rtentry
*, struct sockaddr
**);
114 SYSCTL_NODE(_net
, OID_AUTO
, route
, CTLFLAG_RW
, 0, "Routing");
117 static int route_debug
= 1;
118 SYSCTL_INT(_net_route
, OID_AUTO
, route_debug
, CTLFLAG_RW
,
119 &route_debug
, 0, "");
122 u_long route_kmalloc_limit
= 0;
123 TUNABLE_ULONG("net.route.kmalloc_limit", &route_kmalloc_limit
);
126 * Initialize the route table(s) for protocol domains and
127 * create a helper thread which will be responsible for updating
128 * route table entries on each cpu.
135 if (route_kmalloc_limit
)
136 kmalloc_raise_limit(M_RTABLE
, route_kmalloc_limit
);
138 for (cpu
= 0; cpu
< netisr_ncpus
; ++cpu
)
139 bzero(&rtstatistics_percpu
[cpu
], sizeof(struct rtstatistics
));
140 rn_init(); /* initialize all zeroes, all ones, mask table */
141 rtable_init(); /* call dom_rtattach() on each cpu */
145 rtable_init_oncpu(netmsg_t msg
)
150 ASSERT_NETISR_NCPUS(cpu
);
152 SLIST_FOREACH(dom
, &domains
, dom_next
) {
153 if (dom
->dom_rtattach
) {
155 (void **)&rt_tables
[cpu
][dom
->dom_family
],
159 netisr_forwardmsg(&msg
->base
, cpu
+ 1);
165 struct netmsg_base msg
;
167 netmsg_init(&msg
, NULL
, &curthread
->td_msgport
, 0, rtable_init_oncpu
);
168 netisr_domsg_global(&msg
);
172 * Routing statistics.
175 sysctl_rtstatistics(SYSCTL_HANDLER_ARGS
)
179 for (cpu
= 0; cpu
< netisr_ncpus
; ++cpu
) {
180 if ((error
= SYSCTL_OUT(req
, &rtstatistics_percpu
[cpu
],
181 sizeof(struct rtstatistics
))))
183 if ((error
= SYSCTL_IN(req
, &rtstatistics_percpu
[cpu
],
184 sizeof(struct rtstatistics
))))
190 SYSCTL_PROC(_net_route
, OID_AUTO
, stats
, (CTLTYPE_OPAQUE
|CTLFLAG_RW
),
191 0, 0, sysctl_rtstatistics
, "S,rtstatistics", "Routing statistics");
194 * Packet routing routines.
198 * Look up and fill in the "ro_rt" rtentry field in a route structure given
199 * an address in the "ro_dst" field. Always send a report on a miss and
200 * always clone routes.
203 rtalloc(struct route
*ro
)
205 rtalloc_ign(ro
, 0UL);
209 * Look up and fill in the "ro_rt" rtentry field in a route structure given
210 * an address in the "ro_dst" field. Always send a report on a miss and
211 * optionally clone routes when RTF_CLONING or RTF_PRCLONING are not being
215 rtalloc_ign(struct route
*ro
, u_long ignoreflags
)
217 if (ro
->ro_rt
!= NULL
) {
218 if (ro
->ro_rt
->rt_ifp
!= NULL
&& ro
->ro_rt
->rt_flags
& RTF_UP
)
223 ro
->ro_rt
= _rtlookup(&ro
->ro_dst
, RTL_REPORTMSG
, ignoreflags
);
227 * Look up the route that matches the given "dst" address.
229 * Route lookup can have the side-effect of creating and returning
230 * a cloned route instead when "dst" matches a cloning route and the
231 * RTF_CLONING and RTF_PRCLONING flags are not being ignored.
233 * Any route returned has its reference count incremented.
236 _rtlookup(struct sockaddr
*dst
, boolean_t generate_report
, u_long ignore
)
238 struct radix_node_head
*rnh
= rt_tables
[mycpuid
][dst
->sa_family
];
241 ASSERT_NETISR_NCPUS(mycpuid
);
247 * Look up route in the radix tree.
249 rt
= (struct rtentry
*) rnh
->rnh_matchaddr((char *)dst
, rnh
);
254 * Handle cloning routes.
256 if ((rt
->rt_flags
& ~ignore
& (RTF_CLONING
| RTF_PRCLONING
)) != 0) {
257 struct rtentry
*clonedroute
;
260 clonedroute
= rt
; /* copy in/copy out parameter */
261 error
= rtrequest(RTM_RESOLVE
, dst
, NULL
, NULL
, 0,
262 &clonedroute
); /* clone the route */
263 if (error
!= 0) { /* cloning failed */
265 rt_dstmsg(RTM_MISS
, dst
, error
);
267 return (rt
); /* return the uncloned route */
269 if (generate_report
) {
270 if (clonedroute
->rt_flags
& RTF_XRESOLVE
)
271 rt_dstmsg(RTM_RESOLVE
, dst
, 0);
273 rt_rtmsg(RTM_ADD
, clonedroute
,
274 clonedroute
->rt_ifp
, 0);
276 return (clonedroute
); /* return cloned route */
280 * Increment the reference count of the matched route and return.
286 rtstat
.rts_unreach
++;
288 rt_dstmsg(RTM_MISS
, dst
, 0);
293 rtfree(struct rtentry
*rt
)
296 ASSERT_NETISR_NCPUS(rt
->rt_cpuid
);
297 KASSERT(rt
->rt_refcnt
> 0, ("rtfree: rt_refcnt %ld", rt
->rt_refcnt
));
300 if (rt
->rt_refcnt
== 0) {
301 struct radix_node_head
*rnh
=
302 rt_tables
[mycpuid
][rt_key(rt
)->sa_family
];
305 rnh
->rnh_close((struct radix_node
*)rt
, rnh
);
306 if (!(rt
->rt_flags
& RTF_UP
)) {
307 /* deallocate route */
308 if (rt
->rt_ifa
!= NULL
)
310 if (rt
->rt_parent
!= NULL
)
311 RTFREE(rt
->rt_parent
); /* recursive call! */
319 rtfree_async_dispatch(netmsg_t msg
)
321 struct rtentry
*rt
= msg
->lmsg
.u
.ms_resultp
;
324 netisr_replymsg(&msg
->base
, 0);
328 rtfree_async(struct rtentry
*rt
)
330 struct netmsg_base
*msg
;
332 if (IN_NETISR_NCPUS(rt
->rt_cpuid
)) {
337 KASSERT(rt
->rt_refcnt
> 0,
338 ("rtfree_async: rt_refcnt %ld", rt
->rt_refcnt
));
340 msg
= kmalloc(sizeof(*msg
), M_LWKTMSG
, M_INTWAIT
);
341 netmsg_init(msg
, NULL
, &netisr_afree_rport
, 0, rtfree_async_dispatch
);
342 msg
->lmsg
.u
.ms_resultp
= rt
;
344 netisr_sendmsg(msg
, rt
->rt_cpuid
);
348 rtredirect_oncpu(struct sockaddr
*dst
, struct sockaddr
*gateway
,
349 struct sockaddr
*netmask
, int flags
, struct sockaddr
*src
)
351 struct rtentry
*rt
= NULL
;
352 struct rt_addrinfo rtinfo
;
357 ASSERT_NETISR_NCPUS(mycpuid
);
359 /* verify the gateway is directly reachable */
360 if ((ifa
= ifa_ifwithnet(gateway
)) == NULL
) {
366 * If the redirect isn't from our current router for this destination,
367 * it's either old or wrong.
369 if (!(flags
& RTF_DONE
) && /* XXX JH */
370 (rt
= rtpurelookup(dst
)) != NULL
&&
371 (!sa_equal(src
, rt
->rt_gateway
) || rt
->rt_ifa
!= ifa
)) {
377 * If it redirects us to ourselves, we have a routing loop,
378 * perhaps as a result of an interface going down recently.
380 if (ifa_ifwithaddr(gateway
)) {
381 error
= EHOSTUNREACH
;
386 * Create a new entry if the lookup failed or if we got back
387 * a wildcard entry for the default route. This is necessary
388 * for hosts which use routing redirects generated by smart
389 * gateways to dynamically build the routing tables.
393 if ((rt_mask(rt
) != NULL
&& rt_mask(rt
)->sa_len
< 2)) {
398 /* Ignore redirects for directly connected hosts. */
399 if (!(rt
->rt_flags
& RTF_GATEWAY
)) {
400 error
= EHOSTUNREACH
;
404 if (!(rt
->rt_flags
& RTF_HOST
) && (flags
& RTF_HOST
)) {
406 * Changing from a network route to a host route.
407 * Create a new host route rather than smashing the
411 flags
|= RTF_GATEWAY
| RTF_DYNAMIC
;
412 bzero(&rtinfo
, sizeof(struct rt_addrinfo
));
413 rtinfo
.rti_info
[RTAX_DST
] = dst
;
414 rtinfo
.rti_info
[RTAX_GATEWAY
] = gateway
;
415 rtinfo
.rti_info
[RTAX_NETMASK
] = netmask
;
416 rtinfo
.rti_flags
= flags
;
417 rtinfo
.rti_ifa
= ifa
;
418 rt
= NULL
; /* copy-in/copy-out parameter */
419 error
= rtrequest1(RTM_ADD
, &rtinfo
, &rt
);
421 flags
= rt
->rt_flags
;
422 stat
= &rtstat
.rts_dynamic
;
425 * Smash the current notion of the gateway to this destination.
426 * Should check about netmask!!!
428 rt
->rt_flags
|= RTF_MODIFIED
;
429 flags
|= RTF_MODIFIED
;
431 /* We only need to report rtmsg on CPU0 */
432 rt_setgate(rt
, rt_key(rt
), gateway
,
433 mycpuid
== 0 ? RTL_REPORTMSG
: RTL_DONTREPORT
);
435 stat
= &rtstat
.rts_newgateway
;
443 rtstat
.rts_badredirect
++;
444 else if (stat
!= NULL
)
450 struct netmsg_rtredirect
{
451 struct netmsg_base base
;
452 struct sockaddr
*dst
;
453 struct sockaddr
*gateway
;
454 struct sockaddr
*netmask
;
456 struct sockaddr
*src
;
460 * Force a routing table entry to the specified
461 * destination to go through the given gateway.
462 * Normally called as a result of a routing redirect
463 * message from the network layer.
466 rtredirect(struct sockaddr
*dst
, struct sockaddr
*gateway
,
467 struct sockaddr
*netmask
, int flags
, struct sockaddr
*src
)
469 struct rt_addrinfo rtinfo
;
471 struct netmsg_rtredirect msg
;
473 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
,
474 0, rtredirect_msghandler
);
476 msg
.gateway
= gateway
;
477 msg
.netmask
= netmask
;
480 error
= netisr_domsg_global(&msg
.base
);
482 bzero(&rtinfo
, sizeof(struct rt_addrinfo
));
483 rtinfo
.rti_info
[RTAX_DST
] = dst
;
484 rtinfo
.rti_info
[RTAX_GATEWAY
] = gateway
;
485 rtinfo
.rti_info
[RTAX_NETMASK
] = netmask
;
486 rtinfo
.rti_info
[RTAX_AUTHOR
] = src
;
487 rt_missmsg(RTM_REDIRECT
, &rtinfo
, flags
, error
);
491 rtredirect_msghandler(netmsg_t msg
)
493 struct netmsg_rtredirect
*rmsg
= (void *)msg
;
495 rtredirect_oncpu(rmsg
->dst
, rmsg
->gateway
, rmsg
->netmask
,
496 rmsg
->flags
, rmsg
->src
);
497 netisr_forwardmsg(&msg
->base
, mycpuid
+ 1);
501 * Routing table ioctl interface.
504 rtioctl(u_long req
, caddr_t data
, struct ucred
*cred
)
507 /* Multicast goop, grrr... */
508 return mrt_ioctl
? mrt_ioctl(req
, data
) : EOPNOTSUPP
;
515 ifa_ifwithroute(int flags
, struct sockaddr
*dst
, struct sockaddr
*gateway
)
519 if (!(flags
& RTF_GATEWAY
)) {
521 * If we are adding a route to an interface,
522 * and the interface is a point-to-point link,
523 * we should search for the destination
524 * as our clue to the interface. Otherwise
525 * we can use the local address.
528 if (flags
& RTF_HOST
) {
529 ifa
= ifa_ifwithdstaddr(dst
);
532 ifa
= ifa_ifwithaddr(gateway
);
535 * If we are adding a route to a remote net
536 * or host, the gateway may still be on the
537 * other end of a pt to pt link.
539 ifa
= ifa_ifwithdstaddr(gateway
);
542 ifa
= ifa_ifwithnet(gateway
);
546 rt
= rtpurelookup(gateway
);
550 if ((ifa
= rt
->rt_ifa
) == NULL
)
553 if (ifa
->ifa_addr
->sa_family
!= dst
->sa_family
) {
554 struct ifaddr
*oldifa
= ifa
;
556 ifa
= ifaof_ifpforaddr(dst
, ifa
->ifa_ifp
);
563 static int rt_fixdelete (struct radix_node
*, void *);
564 static int rt_fixchange (struct radix_node
*, void *);
568 struct radix_node_head
*rnh
;
572 * Set rtinfo->rti_ifa and rtinfo->rti_ifp.
575 rt_getifa(struct rt_addrinfo
*rtinfo
)
577 struct sockaddr
*gateway
= rtinfo
->rti_info
[RTAX_GATEWAY
];
578 struct sockaddr
*dst
= rtinfo
->rti_info
[RTAX_DST
];
579 struct sockaddr
*ifaaddr
= rtinfo
->rti_info
[RTAX_IFA
];
580 int flags
= rtinfo
->rti_flags
;
583 * ifp may be specified by sockaddr_dl
584 * when protocol address is ambiguous.
586 if (rtinfo
->rti_ifp
== NULL
) {
587 struct sockaddr
*ifpaddr
;
589 ifpaddr
= rtinfo
->rti_info
[RTAX_IFP
];
590 if (ifpaddr
!= NULL
&& ifpaddr
->sa_family
== AF_LINK
) {
593 ifa
= ifa_ifwithnet(ifpaddr
);
595 rtinfo
->rti_ifp
= ifa
->ifa_ifp
;
599 if (rtinfo
->rti_ifa
== NULL
&& ifaaddr
!= NULL
)
600 rtinfo
->rti_ifa
= ifa_ifwithaddr(ifaaddr
);
601 if (rtinfo
->rti_ifa
== NULL
) {
604 sa
= ifaaddr
!= NULL
? ifaaddr
:
605 (gateway
!= NULL
? gateway
: dst
);
606 if (sa
!= NULL
&& rtinfo
->rti_ifp
!= NULL
)
607 rtinfo
->rti_ifa
= ifaof_ifpforaddr(sa
, rtinfo
->rti_ifp
);
608 else if (dst
!= NULL
&& gateway
!= NULL
)
609 rtinfo
->rti_ifa
= ifa_ifwithroute(flags
, dst
, gateway
);
611 rtinfo
->rti_ifa
= ifa_ifwithroute(flags
, sa
, sa
);
613 if (rtinfo
->rti_ifa
== NULL
)
614 return (ENETUNREACH
);
616 if (rtinfo
->rti_ifp
== NULL
)
617 rtinfo
->rti_ifp
= rtinfo
->rti_ifa
->ifa_ifp
;
622 * Do appropriate manipulations of a routing tree given
623 * all the bits of info needed
628 struct sockaddr
*dst
,
629 struct sockaddr
*gateway
,
630 struct sockaddr
*netmask
,
632 struct rtentry
**ret_nrt
)
634 struct rt_addrinfo rtinfo
;
636 bzero(&rtinfo
, sizeof(struct rt_addrinfo
));
637 rtinfo
.rti_info
[RTAX_DST
] = dst
;
638 rtinfo
.rti_info
[RTAX_GATEWAY
] = gateway
;
639 rtinfo
.rti_info
[RTAX_NETMASK
] = netmask
;
640 rtinfo
.rti_flags
= flags
;
641 return rtrequest1(req
, &rtinfo
, ret_nrt
);
647 struct sockaddr
*dst
,
648 struct sockaddr
*gateway
,
649 struct sockaddr
*netmask
,
652 struct rt_addrinfo rtinfo
;
654 bzero(&rtinfo
, sizeof(struct rt_addrinfo
));
655 rtinfo
.rti_info
[RTAX_DST
] = dst
;
656 rtinfo
.rti_info
[RTAX_GATEWAY
] = gateway
;
657 rtinfo
.rti_info
[RTAX_NETMASK
] = netmask
;
658 rtinfo
.rti_flags
= flags
;
659 return rtrequest1_global(req
, &rtinfo
, NULL
, NULL
, RTREQ_PRIO_NORM
);
663 struct netmsg_base base
;
665 struct rt_addrinfo
*rtinfo
;
666 rtrequest1_callback_func_t callback
;
671 rtrequest1_global(int req
, struct rt_addrinfo
*rtinfo
,
672 rtrequest1_callback_func_t callback
, void *arg
, boolean_t req_prio
)
674 struct netmsg_rtq msg
;
678 flags
= MSGF_PRIORITY
;
679 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
, flags
,
680 rtrequest1_msghandler
);
681 msg
.base
.lmsg
.ms_error
= -1;
684 msg
.callback
= callback
;
686 return (netisr_domsg_global(&msg
.base
));
690 * Handle a route table request on the current cpu. Since the route table's
691 * are supposed to be identical on each cpu, an error occuring later in the
692 * message chain is considered system-fatal.
695 rtrequest1_msghandler(netmsg_t msg
)
697 struct netmsg_rtq
*rmsg
= (void *)msg
;
698 struct rt_addrinfo rtinfo
;
699 struct rtentry
*rt
= NULL
;
703 * Copy the rtinfo. We need to make sure that the original
704 * rtinfo, which is setup by the caller, in the netmsg will
705 * _not_ be changed; else the next CPU on the netmsg forwarding
706 * path will see a different rtinfo than what this CPU has seen.
708 rtinfo
= *rmsg
->rtinfo
;
710 error
= rtrequest1(rmsg
->req
, &rtinfo
, &rt
);
714 rmsg
->callback(rmsg
->req
, error
, &rtinfo
, rt
, rmsg
->arg
);
717 * RTM_DELETE's are propogated even if an error occurs, since a
718 * cloned route might be undergoing deletion and cloned routes
719 * are not necessarily replicated. An overall error is returned
720 * only if no cpus have the route in question.
722 if (rmsg
->base
.lmsg
.ms_error
< 0 || error
== 0)
723 rmsg
->base
.lmsg
.ms_error
= error
;
725 if (error
&& rmsg
->req
!= RTM_DELETE
) {
727 panic("rtrequest1_msghandler: rtrequest table req %d, "
728 "failed on cpu%d, error %d\n",
729 rmsg
->req
, mycpuid
, error
);
731 netisr_replymsg(&rmsg
->base
, error
);
733 netisr_forwardmsg_error(&rmsg
->base
, mycpuid
+ 1,
734 rmsg
->base
.lmsg
.ms_error
);
739 rtrequest1(int req
, struct rt_addrinfo
*rtinfo
, struct rtentry
**ret_nrt
)
741 struct sockaddr
*dst
= rtinfo
->rti_info
[RTAX_DST
];
743 struct radix_node
*rn
;
744 struct radix_node_head
*rnh
;
746 struct sockaddr
*ndst
;
750 ASSERT_NETISR_NCPUS(mycpuid
);
752 #define gotoerr(x) { error = x ; goto bad; }
756 rt_addrinfo_print(req
, rtinfo
);
761 * Find the correct routing tree to use for this Address Family
763 if ((rnh
= rt_tables
[mycpuid
][dst
->sa_family
]) == NULL
)
764 gotoerr(EAFNOSUPPORT
);
767 * If we are adding a host route then we don't want to put
768 * a netmask in the tree, nor do we want to clone it.
770 if (rtinfo
->rti_flags
& RTF_HOST
) {
771 rtinfo
->rti_info
[RTAX_NETMASK
] = NULL
;
772 rtinfo
->rti_flags
&= ~(RTF_CLONING
| RTF_PRCLONING
);
777 /* Remove the item from the tree. */
778 rn
= rnh
->rnh_deladdr((char *)rtinfo
->rti_info
[RTAX_DST
],
779 (char *)rtinfo
->rti_info
[RTAX_NETMASK
],
783 KASSERT(!(rn
->rn_flags
& (RNF_ACTIVE
| RNF_ROOT
)),
784 ("rnh_deladdr returned flags 0x%x", rn
->rn_flags
));
785 rt
= (struct rtentry
*)rn
;
787 /* ref to prevent a deletion race */
790 /* Free any routes cloned from this one. */
791 if ((rt
->rt_flags
& (RTF_CLONING
| RTF_PRCLONING
)) &&
792 rt_mask(rt
) != NULL
) {
793 rnh
->rnh_walktree_from(rnh
, (char *)rt_key(rt
),
798 if (rt
->rt_gwroute
!= NULL
) {
799 RTFREE(rt
->rt_gwroute
);
800 rt
->rt_gwroute
= NULL
;
804 * NB: RTF_UP must be set during the search above,
805 * because we might delete the last ref, causing
806 * rt to get freed prematurely.
808 rt
->rt_flags
&= ~RTF_UP
;
812 rt_print(rtinfo
, rt
);
815 /* Give the protocol a chance to keep things in sync. */
816 if ((ifa
= rt
->rt_ifa
) && ifa
->ifa_rtrequest
)
817 ifa
->ifa_rtrequest(RTM_DELETE
, rt
);
820 * If the caller wants it, then it can have it,
821 * but it's up to it to free the rtentry as we won't be
824 KASSERT(rt
->rt_refcnt
>= 0,
825 ("rtrequest1(DELETE): refcnt %ld", rt
->rt_refcnt
));
826 if (ret_nrt
!= NULL
) {
827 /* leave ref intact for return */
830 /* deref / attempt to destroy */
836 if (ret_nrt
== NULL
|| (rt
= *ret_nrt
) == NULL
)
839 KASSERT(rt
->rt_cpuid
== mycpuid
,
840 ("rt resolve rt_cpuid %d, mycpuid %d",
841 rt
->rt_cpuid
, mycpuid
));
845 rt
->rt_flags
& ~(RTF_CLONING
| RTF_PRCLONING
| RTF_STATIC
);
846 rtinfo
->rti_flags
|= RTF_WASCLONED
;
847 rtinfo
->rti_info
[RTAX_GATEWAY
] = rt
->rt_gateway
;
848 if ((rtinfo
->rti_info
[RTAX_NETMASK
] = rt
->rt_genmask
) == NULL
)
849 rtinfo
->rti_flags
|= RTF_HOST
;
850 rtinfo
->rti_info
[RTAX_MPLS1
] = rt
->rt_shim
[0];
851 rtinfo
->rti_info
[RTAX_MPLS2
] = rt
->rt_shim
[1];
852 rtinfo
->rti_info
[RTAX_MPLS3
] = rt
->rt_shim
[2];
856 KASSERT(!(rtinfo
->rti_flags
& RTF_GATEWAY
) ||
857 rtinfo
->rti_info
[RTAX_GATEWAY
] != NULL
,
858 ("rtrequest: GATEWAY but no gateway"));
860 if (rtinfo
->rti_ifa
== NULL
&& (error
= rt_getifa(rtinfo
)))
862 ifa
= rtinfo
->rti_ifa
;
864 R_Malloc(rt
, struct rtentry
*, sizeof(struct rtentry
));
866 if (req
== RTM_ADD
) {
867 kprintf("rtrequest1: alloc rtentry failed on "
872 bzero(rt
, sizeof(struct rtentry
));
873 rt
->rt_flags
= RTF_UP
| rtinfo
->rti_flags
;
874 rt
->rt_cpuid
= mycpuid
;
876 if (mycpuid
!= 0 && req
== RTM_ADD
) {
877 /* For RTM_ADD, we have already sent rtmsg on CPU0. */
878 reportmsg
= RTL_DONTREPORT
;
881 * For RTM_ADD, we only send rtmsg on CPU0.
882 * For RTM_RESOLVE, we always send rtmsg. XXX
884 reportmsg
= RTL_REPORTMSG
;
886 error
= rt_setgate(rt
, dst
, rtinfo
->rti_info
[RTAX_GATEWAY
],
894 if (rtinfo
->rti_info
[RTAX_NETMASK
] != NULL
)
895 rt_maskedcopy(dst
, ndst
,
896 rtinfo
->rti_info
[RTAX_NETMASK
]);
898 bcopy(dst
, ndst
, dst
->sa_len
);
900 if (rtinfo
->rti_info
[RTAX_MPLS1
] != NULL
)
901 rt_setshims(rt
, rtinfo
->rti_info
);
904 * Note that we now have a reference to the ifa.
905 * This moved from below so that rnh->rnh_addaddr() can
906 * examine the ifa and ifa->ifa_ifp if it so desires.
910 rt
->rt_ifp
= ifa
->ifa_ifp
;
911 /* XXX mtu manipulation will be done in rnh_addaddr -- itojun */
913 rn
= rnh
->rnh_addaddr((char *)ndst
,
914 (char *)rtinfo
->rti_info
[RTAX_NETMASK
],
917 struct rtentry
*oldrt
;
920 * We already have one of these in the tree.
921 * We do a special hack: if the old route was
922 * cloned, then we blow it away and try
923 * re-inserting the new one.
925 oldrt
= rtpurelookup(ndst
);
928 if (oldrt
->rt_flags
& RTF_WASCLONED
) {
929 rtrequest(RTM_DELETE
, rt_key(oldrt
),
932 oldrt
->rt_flags
, NULL
);
933 rn
= rnh
->rnh_addaddr((char *)ndst
,
935 rtinfo
->rti_info
[RTAX_NETMASK
],
940 /* NOTE: rt_ifa may have been changed */
944 * If it still failed to go into the tree,
945 * then un-make it (this should be a function).
948 if (rt
->rt_gwroute
!= NULL
)
949 rtfree(rt
->rt_gwroute
);
957 * If we got here from RESOLVE, then we are cloning
958 * so clone the rest, and note that we
959 * are a clone (and increment the parent's references)
961 if (req
== RTM_RESOLVE
) {
962 rt
->rt_rmx
= (*ret_nrt
)->rt_rmx
; /* copy metrics */
963 rt
->rt_rmx
.rmx_pksent
= 0; /* reset packet counter */
964 if ((*ret_nrt
)->rt_flags
&
965 (RTF_CLONING
| RTF_PRCLONING
)) {
966 rt
->rt_parent
= *ret_nrt
;
967 (*ret_nrt
)->rt_refcnt
++;
972 * if this protocol has something to add to this then
973 * allow it to do that as well.
975 if (ifa
->ifa_rtrequest
!= NULL
)
976 ifa
->ifa_rtrequest(req
, rt
);
979 * We repeat the same procedure from rt_setgate() here because
980 * it doesn't fire when we call it there because the node
981 * hasn't been added to the tree yet.
983 if (req
== RTM_ADD
&& !(rt
->rt_flags
& RTF_HOST
) &&
984 rt_mask(rt
) != NULL
) {
985 struct rtfc_arg arg
= { rt
, rnh
};
987 rnh
->rnh_walktree_from(rnh
, (char *)rt_key(rt
),
994 rt_print(rtinfo
, rt
);
997 * Return the resulting rtentry,
998 * increasing the number of references by one.
1000 if (ret_nrt
!= NULL
) {
1012 kprintf("rti %p failed error %d\n", rtinfo
, error
);
1014 kprintf("rti %p succeeded\n", rtinfo
);
1022 * Called from rtrequest(RTM_DELETE, ...) to fix up the route's ``family''
1023 * (i.e., the routes related to it by the operation of cloning). This
1024 * routine is iterated over all potential former-child-routes by way of
1025 * rnh->rnh_walktree_from() above, and those that actually are children of
1026 * the late parent (passed in as VP here) are themselves deleted.
1029 rt_fixdelete(struct radix_node
*rn
, void *vp
)
1031 struct rtentry
*rt
= (struct rtentry
*)rn
;
1032 struct rtentry
*rt0
= vp
;
1034 if (rt
->rt_parent
== rt0
&&
1035 !(rt
->rt_flags
& (RTF_PINNED
| RTF_CLONING
| RTF_PRCLONING
))) {
1036 return rtrequest(RTM_DELETE
, rt_key(rt
), NULL
, rt_mask(rt
),
1037 rt
->rt_flags
, NULL
);
1043 * This routine is called from rt_setgate() to do the analogous thing for
1044 * adds and changes. There is the added complication in this case of a
1045 * middle insert; i.e., insertion of a new network route between an older
1046 * network route and (cloned) host routes. For this reason, a simple check
1047 * of rt->rt_parent is insufficient; each candidate route must be tested
1048 * against the (mask, value) of the new route (passed as before in vp)
1049 * to see if the new route matches it.
1051 * XXX - it may be possible to do fixdelete() for changes and reserve this
1052 * routine just for adds. I'm not sure why I thought it was necessary to do
1056 static int rtfcdebug
= 0;
1060 rt_fixchange(struct radix_node
*rn
, void *vp
)
1062 struct rtentry
*rt
= (struct rtentry
*)rn
;
1063 struct rtfc_arg
*ap
= vp
;
1064 struct rtentry
*rt0
= ap
->rt0
;
1065 struct radix_node_head
*rnh
= ap
->rnh
;
1066 u_char
*xk1
, *xm1
, *xk2
, *xmp
;
1071 kprintf("rt_fixchange: rt %p, rt0 %p\n", rt
, rt0
);
1074 if (rt
->rt_parent
== NULL
||
1075 (rt
->rt_flags
& (RTF_PINNED
| RTF_CLONING
| RTF_PRCLONING
))) {
1077 if (rtfcdebug
) kprintf("no parent, pinned or cloning\n");
1082 if (rt
->rt_parent
== rt0
) {
1084 if (rtfcdebug
) kprintf("parent match\n");
1086 return rtrequest(RTM_DELETE
, rt_key(rt
), NULL
, rt_mask(rt
),
1087 rt
->rt_flags
, NULL
);
1091 * There probably is a function somewhere which does this...
1092 * if not, there should be.
1094 len
= imin(rt_key(rt0
)->sa_len
, rt_key(rt
)->sa_len
);
1096 xk1
= (u_char
*)rt_key(rt0
);
1097 xm1
= (u_char
*)rt_mask(rt0
);
1098 xk2
= (u_char
*)rt_key(rt
);
1100 /* avoid applying a less specific route */
1101 xmp
= (u_char
*)rt_mask(rt
->rt_parent
);
1102 mlen
= rt_key(rt
->rt_parent
)->sa_len
;
1103 if (mlen
> rt_key(rt0
)->sa_len
) {
1106 kprintf("rt_fixchange: inserting a less "
1107 "specific route\n");
1111 for (i
= rnh
->rnh_treetop
->rn_offset
; i
< mlen
; i
++) {
1112 if ((xmp
[i
] & ~(xmp
[i
] ^ xm1
[i
])) != xmp
[i
]) {
1115 kprintf("rt_fixchange: inserting a less "
1116 "specific route\n");
1122 for (i
= rnh
->rnh_treetop
->rn_offset
; i
< len
; i
++) {
1123 if ((xk2
[i
] & xm1
[i
]) != xk1
[i
]) {
1125 if (rtfcdebug
) kprintf("no match\n");
1132 * OK, this node is a clone, and matches the node currently being
1133 * changed/added under the node's mask. So, get rid of it.
1136 if (rtfcdebug
) kprintf("deleting\n");
1138 return rtrequest(RTM_DELETE
, rt_key(rt
), NULL
, rt_mask(rt
),
1139 rt
->rt_flags
, NULL
);
1143 rt_setgate(struct rtentry
*rt0
, struct sockaddr
*dst
, struct sockaddr
*gate
,
1144 boolean_t generate_report
)
1146 char *space
, *oldspace
;
1147 int dlen
= RT_ROUNDUP(dst
->sa_len
), glen
= RT_ROUNDUP(gate
->sa_len
);
1148 struct rtentry
*rt
= rt0
;
1149 struct radix_node_head
*rnh
= rt_tables
[mycpuid
][dst
->sa_family
];
1151 ASSERT_NETISR_NCPUS(mycpuid
);
1154 * A host route with the destination equal to the gateway
1155 * will interfere with keeping LLINFO in the routing
1156 * table, so disallow it.
1158 if (((rt0
->rt_flags
& (RTF_HOST
| RTF_GATEWAY
| RTF_LLINFO
)) ==
1159 (RTF_HOST
| RTF_GATEWAY
)) &&
1160 dst
->sa_len
== gate
->sa_len
&&
1161 sa_equal(dst
, gate
)) {
1163 * The route might already exist if this is an RTM_CHANGE
1164 * or a routing redirect, so try to delete it.
1166 if (rt_key(rt0
) != NULL
)
1167 rtrequest(RTM_DELETE
, rt_key(rt0
), rt0
->rt_gateway
,
1168 rt_mask(rt0
), rt0
->rt_flags
, NULL
);
1169 return EADDRNOTAVAIL
;
1173 * Both dst and gateway are stored in the same malloc'ed chunk
1174 * (If I ever get my hands on....)
1175 * if we need to malloc a new chunk, then keep the old one around
1176 * till we don't need it any more.
1178 if (rt
->rt_gateway
== NULL
||
1179 glen
> RT_ROUNDUP(rt
->rt_gateway
->sa_len
)) {
1180 oldspace
= (char *)rt_key(rt
);
1181 R_Malloc(space
, char *, dlen
+ glen
);
1184 rt
->rt_nodes
->rn_key
= space
;
1186 space
= (char *)rt_key(rt
); /* Just use the old space. */
1190 /* Set the gateway value. */
1191 rt
->rt_gateway
= (struct sockaddr
*)(space
+ dlen
);
1192 bcopy(gate
, rt
->rt_gateway
, glen
);
1194 if (oldspace
!= NULL
) {
1196 * If we allocated a new chunk, preserve the original dst.
1197 * This way, rt_setgate() really just sets the gate
1198 * and leaves the dst field alone.
1200 bcopy(dst
, space
, dlen
);
1205 * If there is already a gwroute, it's now almost definitely wrong
1208 if (rt
->rt_gwroute
!= NULL
) {
1209 RTFREE(rt
->rt_gwroute
);
1210 rt
->rt_gwroute
= NULL
;
1212 if (rt
->rt_flags
& RTF_GATEWAY
) {
1214 * Cloning loop avoidance: In the presence of
1215 * protocol-cloning and bad configuration, it is
1216 * possible to get stuck in bottomless mutual recursion
1217 * (rtrequest rt_setgate rtlookup). We avoid this
1218 * by not allowing protocol-cloning to operate for
1219 * gateways (which is probably the correct choice
1220 * anyway), and avoid the resulting reference loops
1221 * by disallowing any route to run through itself as
1222 * a gateway. This is obviously mandatory when we
1223 * get rt->rt_output().
1225 * This breaks TTCP for hosts outside the gateway! XXX JH
1227 rt
->rt_gwroute
= _rtlookup(gate
, generate_report
,
1229 if (rt
->rt_gwroute
== rt
) {
1230 rt
->rt_gwroute
= NULL
;
1232 return EDQUOT
; /* failure */
1237 * This isn't going to do anything useful for host routes, so
1238 * don't bother. Also make sure we have a reasonable mask
1239 * (we don't yet have one during adds).
1241 if (!(rt
->rt_flags
& RTF_HOST
) && rt_mask(rt
) != NULL
) {
1242 struct rtfc_arg arg
= { rt
, rnh
};
1244 rnh
->rnh_walktree_from(rnh
, (char *)rt_key(rt
),
1245 (char *)rt_mask(rt
),
1246 rt_fixchange
, &arg
);
1254 struct sockaddr
*src
,
1255 struct sockaddr
*dst
,
1256 struct sockaddr
*netmask
)
1258 u_char
*cp1
= (u_char
*)src
;
1259 u_char
*cp2
= (u_char
*)dst
;
1260 u_char
*cp3
= (u_char
*)netmask
;
1261 u_char
*cplim
= cp2
+ *cp3
;
1262 u_char
*cplim2
= cp2
+ *cp1
;
1264 *cp2
++ = *cp1
++; *cp2
++ = *cp1
++; /* copies sa_len & sa_family */
1269 *cp2
++ = *cp1
++ & *cp3
++;
1271 bzero(cp2
, cplim2
- cp2
);
1275 rt_llroute(struct sockaddr
*dst
, struct rtentry
*rt0
, struct rtentry
**drt
)
1277 struct rtentry
*up_rt
, *rt
;
1279 ASSERT_NETISR_NCPUS(mycpuid
);
1281 if (!(rt0
->rt_flags
& RTF_UP
)) {
1282 up_rt
= rtlookup(dst
);
1284 return (EHOSTUNREACH
);
1288 if (up_rt
->rt_flags
& RTF_GATEWAY
) {
1289 if (up_rt
->rt_gwroute
== NULL
) {
1290 up_rt
->rt_gwroute
= rtlookup(up_rt
->rt_gateway
);
1291 if (up_rt
->rt_gwroute
== NULL
)
1292 return (EHOSTUNREACH
);
1293 } else if (!(up_rt
->rt_gwroute
->rt_flags
& RTF_UP
)) {
1294 rtfree(up_rt
->rt_gwroute
);
1295 up_rt
->rt_gwroute
= rtlookup(up_rt
->rt_gateway
);
1296 if (up_rt
->rt_gwroute
== NULL
)
1297 return (EHOSTUNREACH
);
1299 rt
= up_rt
->rt_gwroute
;
1302 if (rt
->rt_flags
& RTF_REJECT
&&
1303 (rt
->rt_rmx
.rmx_expire
== 0 || /* rt doesn't expire */
1304 time_uptime
< rt
->rt_rmx
.rmx_expire
)) /* rt not expired */
1305 return (rt
->rt_flags
& RTF_HOST
? EHOSTDOWN
: EHOSTUNREACH
);
1311 rt_setshims(struct rtentry
*rt
, struct sockaddr
**rt_shim
){
1314 for (i
=0; i
<3; i
++) {
1315 struct sockaddr
*shim
= rt_shim
[RTAX_MPLS1
+ i
];
1321 shimlen
= RT_ROUNDUP(shim
->sa_len
);
1322 R_Malloc(rt
->rt_shim
[i
], struct sockaddr
*, shimlen
);
1323 bcopy(shim
, rt
->rt_shim
[i
], shimlen
);
1332 * Print out a route table entry
1335 rt_print(struct rt_addrinfo
*rtinfo
, struct rtentry
*rn
)
1337 kprintf("rti %p cpu %d route %p flags %08lx: ",
1338 rtinfo
, mycpuid
, rn
, rn
->rt_flags
);
1339 sockaddr_print(rt_key(rn
));
1341 sockaddr_print(rt_mask(rn
));
1343 sockaddr_print(rn
->rt_gateway
);
1344 kprintf(" ifc \"%s\"", rn
->rt_ifp
? rn
->rt_ifp
->if_dname
: "?");
1345 kprintf(" ifa %p\n", rn
->rt_ifa
);
1349 rt_addrinfo_print(int cmd
, struct rt_addrinfo
*rti
)
1355 if (cmd
== RTM_DELETE
&& route_debug
> 1)
1356 print_backtrace(-1);
1370 kprintf("C%02d ", cmd
);
1373 kprintf("rti %p cpu %d ", rti
, mycpuid
);
1374 for (i
= 0; i
< rti
->rti_addrs
; ++i
) {
1375 if (rti
->rti_info
[i
] == NULL
)
1405 kprintf("(?%02d ", i
);
1408 sockaddr_print(rti
->rti_info
[i
]);
1416 sockaddr_print(struct sockaddr
*sa
)
1418 struct sockaddr_in
*sa4
;
1419 struct sockaddr_in6
*sa6
;
1428 len
= sa
->sa_len
- offsetof(struct sockaddr
, sa_data
[0]);
1430 switch(sa
->sa_family
) {
1434 switch(sa
->sa_family
) {
1436 sa4
= (struct sockaddr_in
*)sa
;
1437 kprintf("INET %d %d.%d.%d.%d",
1438 ntohs(sa4
->sin_port
),
1439 (ntohl(sa4
->sin_addr
.s_addr
) >> 24) & 255,
1440 (ntohl(sa4
->sin_addr
.s_addr
) >> 16) & 255,
1441 (ntohl(sa4
->sin_addr
.s_addr
) >> 8) & 255,
1442 (ntohl(sa4
->sin_addr
.s_addr
) >> 0) & 255
1446 sa6
= (struct sockaddr_in6
*)sa
;
1447 kprintf("INET6 %d %04x:%04x%04x:%04x:%04x:%04x:%04x:%04x",
1448 ntohs(sa6
->sin6_port
),
1449 sa6
->sin6_addr
.s6_addr16
[0],
1450 sa6
->sin6_addr
.s6_addr16
[1],
1451 sa6
->sin6_addr
.s6_addr16
[2],
1452 sa6
->sin6_addr
.s6_addr16
[3],
1453 sa6
->sin6_addr
.s6_addr16
[4],
1454 sa6
->sin6_addr
.s6_addr16
[5],
1455 sa6
->sin6_addr
.s6_addr16
[6],
1456 sa6
->sin6_addr
.s6_addr16
[7]
1460 kprintf("AF%d ", sa
->sa_family
);
1461 while (len
> 0 && sa
->sa_data
[len
-1] == 0)
1464 for (i
= 0; i
< len
; ++i
) {
1467 kprintf("%d", (unsigned char)sa
->sa_data
[i
]);
1477 * Set up a routing table entry, normally for an interface.
1480 rtinit(struct ifaddr
*ifa
, int cmd
, int flags
)
1482 struct sockaddr
*dst
, *deldst
, *netmask
;
1483 struct mbuf
*m
= NULL
;
1484 struct radix_node_head
*rnh
;
1485 struct radix_node
*rn
;
1486 struct rt_addrinfo rtinfo
;
1491 if (flags
& RTF_HOST
) {
1492 dst
= ifa
->ifa_dstaddr
;
1495 dst
= ifa
->ifa_addr
;
1496 netmask
= ifa
->ifa_netmask
;
1499 * If it's a delete, check that if it exists, it's on the correct
1500 * interface or we might scrub a route to another ifa which would
1501 * be confusing at best and possibly worse.
1503 if (cmd
== RTM_DELETE
) {
1505 * It's a delete, so it should already exist..
1506 * If it's a net, mask off the host bits
1507 * (Assuming we have a mask)
1509 if (netmask
!= NULL
) {
1510 m
= m_get(M_NOWAIT
, MT_SONAME
);
1514 deldst
= mtod(m
, struct sockaddr
*);
1515 rt_maskedcopy(dst
, deldst
, netmask
);
1519 * Look up an rtentry that is in the routing tree and
1520 * contains the correct info.
1522 if ((rnh
= rt_tables
[mycpuid
][dst
->sa_family
]) == NULL
||
1523 (rn
= rnh
->rnh_lookup((char *)dst
,
1524 (char *)netmask
, rnh
)) == NULL
||
1525 ((struct rtentry
*)rn
)->rt_ifa
!= ifa
||
1526 !sa_equal((struct sockaddr
*)rn
->rn_key
, dst
)) {
1529 return (flags
& RTF_HOST
? EHOSTUNREACH
: ENETUNREACH
);
1535 * One would think that as we are deleting, and we know
1536 * it doesn't exist, we could just return at this point
1537 * with an "ELSE" clause, but apparently not..
1539 return (flags
& RTF_HOST
? EHOSTUNREACH
: ENETUNREACH
);
1544 * Do the actual request
1546 bzero(&rtinfo
, sizeof(struct rt_addrinfo
));
1547 rtinfo
.rti_info
[RTAX_DST
] = dst
;
1548 rtinfo
.rti_info
[RTAX_GATEWAY
] = ifa
->ifa_addr
;
1549 rtinfo
.rti_info
[RTAX_NETMASK
] = netmask
;
1550 rtinfo
.rti_flags
= flags
| ifa
->ifa_flags
;
1551 rtinfo
.rti_ifa
= ifa
;
1552 error
= rtrequest1_global(cmd
, &rtinfo
, rtinit_rtrequest_callback
, ifa
,
1560 rtinit_rtrequest_callback(int cmd
, int error
,
1561 struct rt_addrinfo
*rtinfo
, struct rtentry
*rt
,
1564 struct ifaddr
*ifa
= arg
;
1566 if (error
== 0 && rt
) {
1569 rt_newaddrmsg(cmd
, ifa
, error
, rt
);
1572 if (cmd
== RTM_DELETE
) {
1573 if (rt
->rt_refcnt
== 0) {
1582 struct netmsg_base base
;
1584 struct rt_addrinfo
*rtinfo
;
1585 rtsearch_callback_func_t callback
;
1587 boolean_t exact_match
;
1592 rtsearch_global(int req
, struct rt_addrinfo
*rtinfo
,
1593 rtsearch_callback_func_t callback
, void *arg
, boolean_t exact_match
,
1596 struct netmsg_rts msg
;
1600 flags
= MSGF_PRIORITY
;
1601 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
, flags
,
1602 rtsearch_msghandler
);
1604 msg
.rtinfo
= rtinfo
;
1605 msg
.callback
= callback
;
1607 msg
.exact_match
= exact_match
;
1609 return (netisr_domsg_global(&msg
.base
));
1613 rtsearch_msghandler(netmsg_t msg
)
1615 struct netmsg_rts
*rmsg
= (void *)msg
;
1616 struct rt_addrinfo rtinfo
;
1617 struct radix_node_head
*rnh
;
1621 ASSERT_NETISR_NCPUS(mycpuid
);
1624 * Copy the rtinfo. We need to make sure that the original
1625 * rtinfo, which is setup by the caller, in the netmsg will
1626 * _not_ be changed; else the next CPU on the netmsg forwarding
1627 * path will see a different rtinfo than what this CPU has seen.
1629 rtinfo
= *rmsg
->rtinfo
;
1632 * Find the correct routing tree to use for this Address Family
1634 if ((rnh
= rt_tables
[mycpuid
][rtinfo
.rti_dst
->sa_family
]) == NULL
) {
1636 panic("partially initialized routing tables");
1637 netisr_replymsg(&rmsg
->base
, EAFNOSUPPORT
);
1642 * Correct rtinfo for the host route searching.
1644 if (rtinfo
.rti_flags
& RTF_HOST
) {
1645 rtinfo
.rti_netmask
= NULL
;
1646 rtinfo
.rti_flags
&= ~(RTF_CLONING
| RTF_PRCLONING
);
1649 rt
= (struct rtentry
*)
1650 rnh
->rnh_lookup((char *)rtinfo
.rti_dst
,
1651 (char *)rtinfo
.rti_netmask
, rnh
);
1654 * If we are asked to do the "exact match", we need to make sure
1655 * that host route searching got a host route while a network
1656 * route searching got a network route.
1658 if (rt
!= NULL
&& rmsg
->exact_match
&&
1659 ((rt
->rt_flags
^ rtinfo
.rti_flags
) & RTF_HOST
))
1664 * No matching routes have been found, don't count this
1665 * as a critical error (here, we set 'error' to 0), just
1666 * keep moving on, since at least prcloned routes are not
1667 * duplicated onto each CPU.
1674 error
= rmsg
->callback(rmsg
->req
, &rtinfo
, rt
, rmsg
->arg
,
1678 if (error
== EJUSTRETURN
) {
1679 netisr_replymsg(&rmsg
->base
, 0);
1685 KKASSERT(rmsg
->found_cnt
> 0);
1688 * Under following cases, unrecoverable error has
1690 * o Request is RTM_GET
1691 * o The first time that we find the route, but the
1692 * modification fails.
1694 if (rmsg
->req
!= RTM_GET
&& rmsg
->found_cnt
> 1) {
1695 panic("rtsearch_msghandler: unrecoverable error "
1698 netisr_replymsg(&rmsg
->base
, error
);
1700 if (rmsg
->found_cnt
== 0) {
1701 /* The requested route has not been seen ... */
1704 netisr_forwardmsg_error(&rmsg
->base
, mycpuid
+ 1, error
);
1709 rtmask_add_global(struct sockaddr
*mask
, boolean_t req_prio
)
1711 struct netmsg_base msg
;
1715 flags
= MSGF_PRIORITY
;
1716 netmsg_init(&msg
, NULL
, &curthread
->td_msgport
, flags
,
1717 rtmask_add_msghandler
);
1718 msg
.lmsg
.u
.ms_resultp
= mask
;
1720 return (netisr_domsg_global(&msg
));
1724 _rtmask_lookup(struct sockaddr
*mask
, boolean_t search
)
1726 struct radix_node
*n
;
1728 #define clen(s) (*(u_char *)(s))
1729 n
= rn_addmask((char *)mask
, search
, 1, rn_cpumaskhead(mycpuid
));
1731 mask
->sa_len
>= clen(n
->rn_key
) &&
1732 bcmp((char *)mask
+ 1,
1733 (char *)n
->rn_key
+ 1, clen(n
->rn_key
) - 1) == 0) {
1734 return (struct sockaddr
*)n
->rn_key
;
1742 rtmask_add_msghandler(netmsg_t msg
)
1744 struct sockaddr
*mask
= msg
->lmsg
.u
.ms_resultp
;
1746 ASSERT_NETISR_NCPUS(mycpuid
);
1748 if (rtmask_lookup(mask
) == NULL
) {
1749 netisr_replymsg(&msg
->base
, ENOBUFS
);
1752 netisr_forwardmsg(&msg
->base
, mycpuid
+ 1);
1755 /* This must be before ip6_init2(), which is now SI_ORDER_MIDDLE */
1756 SYSINIT(route
, SI_SUB_PROTO_DOMAIN
, SI_ORDER_THIRD
, route_init
, 0);
1758 struct rtchange_arg
{
1759 struct ifaddr
*old_ifa
;
1760 struct ifaddr
*new_ifa
;
1766 rtchange_ifa(struct rtentry
*rt
, struct rtchange_arg
*ap
)
1768 if (rt
->rt_ifa
->ifa_rtrequest
!= NULL
)
1769 rt
->rt_ifa
->ifa_rtrequest(RTM_DELETE
, rt
);
1770 IFAFREE(rt
->rt_ifa
);
1772 IFAREF(ap
->new_ifa
);
1773 rt
->rt_ifa
= ap
->new_ifa
;
1774 rt
->rt_ifp
= ap
->new_ifa
->ifa_ifp
;
1775 if (rt
->rt_ifa
->ifa_rtrequest
!= NULL
)
1776 rt
->rt_ifa
->ifa_rtrequest(RTM_ADD
, rt
);
1782 rtchange_callback(struct radix_node
*rn
, void *xap
)
1784 struct rtchange_arg
*ap
= xap
;
1785 struct rtentry
*rt
= (struct rtentry
*)rn
;
1787 if (rt
->rt_ifa
== ap
->old_ifa
) {
1788 if (rt
->rt_flags
& (RTF_CLONING
| RTF_PRCLONING
)) {
1790 * We could saw the branch off when we are
1791 * still sitting on it, if the ifa_rtrequest
1792 * DEL/ADD are called directly from here.
1797 rtchange_ifa(rt
, ap
);
1802 struct netmsg_rtchange
{
1803 struct netmsg_base base
;
1804 struct ifaddr
*old_ifa
;
1805 struct ifaddr
*new_ifa
;
1810 rtchange_dispatch(netmsg_t msg
)
1812 struct netmsg_rtchange
*rmsg
= (void *)msg
;
1813 struct radix_node_head
*rnh
;
1814 struct rtchange_arg arg
;
1818 ASSERT_NETISR_NCPUS(cpu
);
1820 memset(&arg
, 0, sizeof(arg
));
1821 arg
.old_ifa
= rmsg
->old_ifa
;
1822 arg
.new_ifa
= rmsg
->new_ifa
;
1824 rnh
= rt_tables
[cpu
][AF_INET
];
1828 KKASSERT(arg
.rt
== NULL
);
1829 error
= rnh
->rnh_walktree(rnh
, rtchange_callback
, &arg
);
1830 if (arg
.rt
!= NULL
) {
1835 rtchange_ifa(rt
, &arg
);
1843 netisr_forwardmsg(&rmsg
->base
, cpu
+ 1);
1847 rtchange(struct ifaddr
*old_ifa
, struct ifaddr
*new_ifa
)
1849 struct netmsg_rtchange msg
;
1852 * XXX individual requests are not independantly chained,
1853 * which means that the per-cpu route tables will not be
1854 * consistent in the middle of the operation. If routes
1855 * related to the interface are manipulated while we are
1856 * doing this the inconsistancy could trigger a panic.
1858 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
, MSGF_PRIORITY
,
1860 msg
.old_ifa
= old_ifa
;
1861 msg
.new_ifa
= new_ifa
;
1863 netisr_domsg_global(&msg
.base
);
1866 old_ifa
->ifa_flags
&= ~IFA_ROUTE
;
1867 new_ifa
->ifa_flags
|= IFA_ROUTE
;