4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 1990 Mentat Inc.
27 * This file contains routines that manipulate Internet Routing Entries (IREs).
29 #include <sys/types.h>
30 #include <sys/stream.h>
31 #include <sys/stropts.h>
33 #include <sys/cmn_err.h>
35 #include <sys/systm.h>
36 #include <sys/param.h>
37 #include <sys/socket.h>
39 #include <net/route.h>
40 #include <netinet/in.h>
41 #include <net/if_dl.h>
42 #include <netinet/ip6.h>
43 #include <netinet/icmp6.h>
45 #include <inet/common.h>
49 #include <inet/ip_ndp.h>
50 #include <inet/ip_if.h>
51 #include <inet/ip_ire.h>
52 #include <inet/ipclassifier.h>
54 #include <inet/tunables.h>
58 #define IS_DEFAULT_ROUTE_V6(ire) \
59 (((ire)->ire_type & IRE_DEFAULT) || \
60 (((ire)->ire_type & IRE_INTERFACE) && \
61 (IN6_IS_ADDR_UNSPECIFIED(&(ire)->ire_addr_v6))))
63 static ire_t ire_null
;
66 ire_ftable_lookup_impl_v6(const in6_addr_t
*addr
, const in6_addr_t
*mask
,
67 const in6_addr_t
*gateway
, int type
, const ill_t
*ill
,
68 zoneid_t zoneid
, int flags
, ip_stack_t
*ipst
);
71 * Initialize the ire that is specific to IPv6 part and call
72 * ire_init_common to finish it.
73 * Returns zero or errno.
76 ire_init_v6(ire_t
*ire
, const in6_addr_t
*v6addr
, const in6_addr_t
*v6mask
,
77 const in6_addr_t
*v6gateway
, ushort_t type
, ill_t
*ill
,
78 zoneid_t zoneid
, uint_t flags
, ip_stack_t
*ipst
)
82 BUMP_IRE_STATS(ipst
->ips_ire_stats_v6
, ire_stats_alloced
);
84 ire
->ire_addr_v6
= *v6addr
;
85 if (v6gateway
!= NULL
)
86 ire
->ire_gateway_addr_v6
= *v6gateway
;
88 /* Make sure we don't have stray values in some fields */
94 ire
->ire_mask_v6
= ipv6_all_ones
;
95 ire
->ire_masklen
= IPV6_ABITS
;
100 case IRE_IF_NORESOLVER
:
101 if (v6mask
!= NULL
) {
102 ire
->ire_mask_v6
= *v6mask
;
104 ip_mask_to_plen_v6(&ire
->ire_mask_v6
);
109 ASSERT(v6mask
== NULL
);
116 error
= ire_init_common(ire
, type
, ill
, zoneid
, flags
, IPV6_VERSION
,
121 /* Determine which function pointers to use */
122 ire
->ire_postfragfn
= ip_xmit
; /* Common case */
124 switch (ire
->ire_type
) {
126 ire
->ire_sendfn
= ire_send_local_v6
;
127 ire
->ire_recvfn
= ire_recv_local_v6
;
128 ASSERT(ire
->ire_ill
!= NULL
);
129 if (ire
->ire_ill
->ill_flags
& ILLF_NOACCEPT
)
130 ire
->ire_recvfn
= ire_recv_noaccept_v6
;
133 ire
->ire_sendfn
= ire_send_local_v6
;
134 ire
->ire_recvfn
= ire_recv_loopback_v6
;
137 ire
->ire_postfragfn
= ip_postfrag_loopcheck
;
138 ire
->ire_sendfn
= ire_send_multicast_v6
;
139 ire
->ire_recvfn
= ire_recv_multicast_v6
;
143 * For IRE_IF_ALL and IRE_OFFLINK we forward received
144 * packets by default.
146 ire
->ire_sendfn
= ire_send_wire_v6
;
147 ire
->ire_recvfn
= ire_recv_forward_v6
;
150 if (ire
->ire_flags
& (RTF_REJECT
|RTF_BLACKHOLE
)) {
151 ire
->ire_sendfn
= ire_send_noroute_v6
;
152 ire
->ire_recvfn
= ire_recv_noroute_v6
;
154 ire
->ire_nce_capable
= ire_determine_nce_capable(ire
);
159 * ire_create_v6 is called to allocate and initialize a new IRE.
161 * NOTE : This is called as writer sometimes though not required
166 ire_create_v6(const in6_addr_t
*v6addr
, const in6_addr_t
*v6mask
,
167 const in6_addr_t
*v6gateway
, ushort_t type
, ill_t
*ill
, zoneid_t zoneid
,
168 uint_t flags
, ip_stack_t
*ipst
)
173 ASSERT(!IN6_IS_ADDR_V4MAPPED(v6addr
));
175 ire
= kmem_cache_alloc(ire_cache
, KM_NOSLEEP
);
177 DTRACE_PROBE(kmem__cache__alloc
);
182 error
= ire_init_v6(ire
, v6addr
, v6mask
, v6gateway
,
183 type
, ill
, zoneid
, flags
, ipst
);
186 DTRACE_PROBE2(ire__init__v6
, ire_t
*, ire
, int, error
);
187 kmem_cache_free(ire_cache
, ire
);
194 * Find the ill matching a multicast group.
195 * Allows different routes for multicast addresses
196 * in the unicast routing table (akin to FF::0/8 but could be more specific)
197 * which point at different interfaces. This is used when IPV6_MULTICAST_IF
198 * isn't specified (when sending) and when IPV6_JOIN_GROUP doesn't
199 * specify the interface to join on.
201 * Supports link-local addresses by using ire_route_recursive which follows
202 * the ill when recursing.
204 * This is used in ip_set_destination etc to set ixa_postfragfn for multicast.
205 * We have a setsrcp argument for the same reason.
208 ire_lookup_multi_ill_v6(const in6_addr_t
*group
, zoneid_t zoneid
,
209 ip_stack_t
*ipst
, in6_addr_t
*setsrcp
)
214 ire
= ire_route_recursive_v6(group
, 0, NULL
, zoneid
,
215 MATCH_IRE_DSTONLY
, IRR_NONE
, 0, ipst
, setsrcp
, NULL
);
218 if (ire
->ire_flags
& (RTF_REJECT
|RTF_BLACKHOLE
)) {
223 ill
= ire_nexthop_ill(ire
);
229 * This function takes a mask and returns number of bits set in the
230 * mask (the represented prefix length). Assumes a contiguous mask.
233 ip_mask_to_plen_v6(const in6_addr_t
*v6mask
)
236 int plen
= IPV6_ABITS
;
239 for (i
= 3; i
>= 0; i
--) {
240 if (v6mask
->s6_addr32
[i
] == 0) {
244 bits
= ffs(ntohl(v6mask
->s6_addr32
[i
])) - 1;
254 * Convert a prefix length to the mask for that prefix.
255 * Returns the argument bitmask.
258 ip_plen_to_mask_v6(uint_t plen
, in6_addr_t
*bitmask
)
262 if (plen
< 0 || plen
> IPV6_ABITS
)
264 *bitmask
= ipv6_all_zeros
;
268 ptr
= (uint32_t *)bitmask
;
270 *ptr
++ = 0xffffffffU
;
273 *ptr
= htonl(0xffffffffU
<< (32 - plen
));
278 * Add a fully initialized IPv6 IRE to the forwarding table.
279 * This returns NULL on failure, or a held IRE on success.
280 * Normally the returned IRE is the same as the argument. But a different
281 * IRE will be returned if the added IRE is deemed identical to an existing
282 * one. In that case ire_identical_ref will be increased.
283 * The caller always needs to do an ire_refrele() on the returned IRE.
286 ire_add_v6(ire_t
*ire
)
289 int mask_table_index
;
294 ip_stack_t
*ipst
= ire
->ire_ipst
;
296 ASSERT(ire
->ire_ipversion
== IPV6_VERSION
);
298 /* Make sure the address is properly masked. */
299 V6_MASK_COPY(ire
->ire_addr_v6
, ire
->ire_mask_v6
, ire
->ire_addr_v6
);
301 mask_table_index
= ip_mask_to_plen_v6(&ire
->ire_mask_v6
);
302 if ((ipst
->ips_ip_forwarding_table_v6
[mask_table_index
]) == NULL
) {
306 ptr
= (irb_t
*)mi_zalloc((ipst
->ips_ip6_ftable_hash_size
*
312 for (i
= 0; i
< ipst
->ips_ip6_ftable_hash_size
; i
++) {
313 rw_init(&ptr
[i
].irb_lock
, NULL
, RW_DEFAULT
, NULL
);
314 ptr
[i
].irb_ipst
= ipst
;
316 mutex_enter(&ipst
->ips_ire_ft_init_lock
);
317 if (ipst
->ips_ip_forwarding_table_v6
[mask_table_index
] ==
319 ipst
->ips_ip_forwarding_table_v6
[mask_table_index
] =
321 mutex_exit(&ipst
->ips_ire_ft_init_lock
);
324 * Some other thread won the race in
325 * initializing the forwarding table at the
328 mutex_exit(&ipst
->ips_ire_ft_init_lock
);
329 for (i
= 0; i
< ipst
->ips_ip6_ftable_hash_size
; i
++) {
330 rw_destroy(&ptr
[i
].irb_lock
);
335 irb_ptr
= &(ipst
->ips_ip_forwarding_table_v6
[mask_table_index
][
336 IRE_ADDR_MASK_HASH_V6(ire
->ire_addr_v6
, ire
->ire_mask_v6
,
337 ipst
->ips_ip6_ftable_hash_size
)]);
339 match_flags
= (MATCH_IRE_MASK
| MATCH_IRE_TYPE
| MATCH_IRE_GW
);
340 if (ire
->ire_ill
!= NULL
)
341 match_flags
|= MATCH_IRE_ILL
;
343 * Start the atomic add of the ire. Grab the bucket lock and the
344 * ill lock. Check for condemned.
346 error
= ire_atomic_start(irb_ptr
, ire
);
353 * If we are creating a hidden IRE, make sure we search for
354 * hidden IREs when searching for duplicates below.
355 * Otherwise, we might find an IRE on some other interface
356 * that's not marked hidden.
358 if (ire
->ire_testhidden
)
359 match_flags
|= MATCH_IRE_TESTHIDDEN
;
362 * Atomically check for duplicate and insert in the table.
364 for (ire1
= irb_ptr
->irb_ire
; ire1
!= NULL
; ire1
= ire1
->ire_next
) {
365 if (IRE_IS_CONDEMNED(ire1
))
368 * Here we need an exact match on zoneid, i.e.,
369 * ire_match_args doesn't fit.
371 if (ire1
->ire_zoneid
!= ire
->ire_zoneid
)
374 if (ire1
->ire_type
!= ire
->ire_type
)
378 * Note: We do not allow multiple routes that differ only
379 * in the gateway security attributes; such routes are
380 * considered duplicates.
381 * To change that we explicitly have to treat them as
384 if (ire_match_args_v6(ire1
, &ire
->ire_addr_v6
,
385 &ire
->ire_mask_v6
, &ire
->ire_gateway_addr_v6
,
386 ire
->ire_type
, ire
->ire_ill
, ire
->ire_zoneid
,
389 * Return the old ire after doing a REFHOLD.
390 * As most of the callers continue to use the IRE
391 * after adding, we return a held ire. This will
392 * avoid a lookup in the caller again. If the callers
393 * don't want to use it, they need to do a REFRELE.
395 * We only allow exactly one IRE_IF_CLONE for any dst,
396 * so, if the is an IF_CLONE, return the ire without
397 * an identical_ref, but with an ire_ref held.
399 if (ire
->ire_type
!= IRE_IF_CLONE
) {
400 atomic_add_32(&ire1
->ire_identical_ref
, 1);
401 DTRACE_PROBE2(ire__add__exist
, ire_t
*, ire1
,
404 ip1dbg(("found dup ire existing %p new %p",
405 (void *)ire1
, (void *)ire
));
407 ire_atomic_end(irb_ptr
, ire
);
414 * Normally we do head insertion since most things do not care about
415 * the order of the IREs in the bucket.
416 * However, due to shared-IP zones (and restrict_interzone_loopback)
417 * we can have an IRE_LOCAL as well as IRE_IF_CLONE for the same
418 * address. For that reason we do tail insertion for IRE_IF_CLONE.
420 irep
= (ire_t
**)irb_ptr
;
421 if (ire
->ire_type
& IRE_IF_CLONE
) {
422 while ((ire1
= *irep
) != NULL
)
423 irep
= &ire1
->ire_next
;
425 /* Insert at *irep */
428 ire1
->ire_ptpn
= &ire
->ire_next
;
429 ire
->ire_next
= ire1
;
430 /* Link the new one in. */
431 ire
->ire_ptpn
= irep
;
433 * ire_walk routines de-reference ire_next without holding
434 * a lock. Before we point to the new ire, we want to make
435 * sure the store that sets the ire_next of the new ire
436 * reaches global visibility, so that ire_walk routines
437 * don't see a truncated list of ires i.e if the ire_next
438 * of the new ire gets set after we do "*irep = ire" due
439 * to re-ordering, the ire_walk thread will see a NULL
440 * once it accesses the ire_next of the new ire.
441 * membar_producer() makes sure that the following store
442 * happens *after* all of the above stores.
446 ire
->ire_bucket
= irb_ptr
;
448 * We return a bumped up IRE above. Keep it symmetrical
449 * so that the callers will always have to release. This
450 * helps the callers of this function because they continue
451 * to use the IRE after adding and hence they don't have to
452 * lookup again after we return the IRE.
454 * NOTE : We don't have to use atomics as this is appearing
455 * in the list for the first time and no one else can bump
456 * up the reference count on this yet.
458 ire_refhold_locked(ire
);
459 BUMP_IRE_STATS(ipst
->ips_ire_stats_v6
, ire_stats_inserted
);
460 irb_ptr
->irb_ire_cnt
++;
462 if (ire
->ire_ill
!= NULL
) {
463 DTRACE_PROBE3(ill__incr__cnt
, (ill_t
*), ire
->ire_ill
,
464 (char *), "ire", (void *), ire
);
465 ire
->ire_ill
->ill_ire_cnt
++;
466 ASSERT(ire
->ire_ill
->ill_ire_cnt
!= 0); /* Wraparound */
468 ire_atomic_end(irb_ptr
, ire
);
470 /* Make any caching of the IREs be notified or updated */
471 ire_flush_cache_v6(ire
, IRE_FLUSH_ADD
);
477 * Search for all HOST REDIRECT routes that are
478 * pointing at the specified gateway and
479 * delete them. This routine is called only
480 * when a default gateway is going away.
483 ire_delete_host_redirects_v6(const in6_addr_t
*gateway
, ip_stack_t
*ipst
)
488 in6_addr_t gw_addr_v6
;
491 /* get the hash table for HOST routes */
492 irb_ptr
= ipst
->ips_ip_forwarding_table_v6
[(IP6_MASK_TABLE_SIZE
- 1)];
495 for (i
= 0; (i
< ipst
->ips_ip6_ftable_hash_size
); i
++) {
498 for (ire
= irb
->irb_ire
; ire
!= NULL
; ire
= ire
->ire_next
) {
499 if (!(ire
->ire_flags
& RTF_DYNAMIC
))
501 mutex_enter(&ire
->ire_lock
);
502 gw_addr_v6
= ire
->ire_gateway_addr_v6
;
503 mutex_exit(&ire
->ire_lock
);
504 if (IN6_ARE_ADDR_EQUAL(&gw_addr_v6
, gateway
))
512 * Delete the specified IRE.
513 * All calls should use ire_delete().
514 * Sometimes called as writer though not required by this function.
516 * NOTE : This function is called only if the ire was added
520 ire_delete_v6(ire_t
*ire
)
522 in6_addr_t gw_addr_v6
;
523 ip_stack_t
*ipst
= ire
->ire_ipst
;
526 * Make sure ire_generation increases from ire_flush_cache happen
527 * after any lookup/reader has read ire_generation.
528 * Since the rw_enter makes us wait until any lookup/reader has
529 * completed we can exit the lock immediately.
531 rw_enter(&ipst
->ips_ip6_ire_head_lock
, RW_WRITER
);
532 rw_exit(&ipst
->ips_ip6_ire_head_lock
);
534 ASSERT(ire
->ire_refcnt
>= 1);
535 ASSERT(ire
->ire_ipversion
== IPV6_VERSION
);
537 ire_flush_cache_v6(ire
, IRE_FLUSH_DELETE
);
539 if (ire
->ire_type
== IRE_DEFAULT
) {
541 * when a default gateway is going away
542 * delete all the host redirects pointing at that
545 mutex_enter(&ire
->ire_lock
);
546 gw_addr_v6
= ire
->ire_gateway_addr_v6
;
547 mutex_exit(&ire
->ire_lock
);
548 ire_delete_host_redirects_v6(&gw_addr_v6
, ipst
);
552 * If we are deleting an IRE_INTERFACE then we make sure we also
553 * delete any IRE_IF_CLONE that has been created from it.
554 * Those are always in ire_dep_children.
556 if ((ire
->ire_type
& IRE_INTERFACE
) && ire
->ire_dep_children
!= 0)
557 ire_dep_delete_if_clone(ire
);
559 /* Remove from parent dependencies and child */
560 rw_enter(&ipst
->ips_ire_dep_lock
, RW_WRITER
);
561 if (ire
->ire_dep_parent
!= NULL
) {
564 while (ire
->ire_dep_children
!= NULL
)
565 ire_dep_remove(ire
->ire_dep_children
);
566 rw_exit(&ipst
->ips_ire_dep_lock
);
570 * When an IRE is added or deleted this routine is called to make sure
571 * any caching of IRE information is notified or updated.
573 * The flag argument indicates if the flush request is due to addition
574 * of new route (IRE_FLUSH_ADD), deletion of old route (IRE_FLUSH_DELETE),
575 * or a change to ire_gateway_addr (IRE_FLUSH_GWCHANGE).
578 ire_flush_cache_v6(ire_t
*ire
, int flag
)
580 ip_stack_t
*ipst
= ire
->ire_ipst
;
583 * IRE_IF_CLONE ire's don't provide any new information
584 * than the parent from which they are cloned, so don't
585 * perturb the generation numbers.
587 if (ire
->ire_type
& IRE_IF_CLONE
)
591 * Ensure that an ire_add during a lookup serializes the updates of
592 * the generation numbers under ire_head_lock so that the lookup gets
593 * either the old ire and old generation number, or a new ire and new
596 rw_enter(&ipst
->ips_ip6_ire_head_lock
, RW_WRITER
);
599 * If a route was just added, we need to notify everybody that
600 * has cached an IRE_NOROUTE since there might now be a better
603 if (flag
== IRE_FLUSH_ADD
) {
604 ire_increment_generation(ipst
->ips_ire_reject_v6
);
605 ire_increment_generation(ipst
->ips_ire_blackhole_v6
);
608 /* Adding a default can't otherwise provide a better route */
609 if (ire
->ire_type
== IRE_DEFAULT
&& flag
== IRE_FLUSH_ADD
) {
610 rw_exit(&ipst
->ips_ip6_ire_head_lock
);
615 case IRE_FLUSH_DELETE
:
616 case IRE_FLUSH_GWCHANGE
:
618 * Update ire_generation for all ire_dep_children chains
619 * starting with this IRE
621 ire_dep_incr_generation(ire
);
623 case IRE_FLUSH_ADD
: {
626 ip_stack_t
*ipst
= ire
->ire_ipst
;
630 * Find an IRE which is a shorter match than the ire to be added
631 * For any such IRE (which we repeat) we update the
632 * ire_generation the same way as in the delete case.
634 addr
= ire
->ire_addr_v6
;
635 mask
= ire
->ire_mask_v6
;
636 masklen
= ip_mask_to_plen_v6(&mask
);
638 ire
= ire_ftable_lookup_impl_v6(&addr
, &mask
, NULL
, 0, NULL
,
639 ALL_ZONES
, MATCH_IRE_SHORTERMASK
, ipst
);
640 while (ire
!= NULL
) {
641 /* We need to handle all in the same bucket */
642 irb_increment_generation(ire
->ire_bucket
);
644 mask
= ire
->ire_mask_v6
;
645 ASSERT(masklen
> ip_mask_to_plen_v6(&mask
));
646 masklen
= ip_mask_to_plen_v6(&mask
);
648 ire
= ire_ftable_lookup_impl_v6(&addr
, &mask
, NULL
, 0,
649 NULL
, ALL_ZONES
, MATCH_IRE_SHORTERMASK
, ipst
);
654 rw_exit(&ipst
->ips_ip6_ire_head_lock
);
658 * Matches the arguments passed with the values in the ire.
660 * Note: for match types that match using "ill" passed in, ill
661 * must be checked for non-NULL before calling this routine.
664 ire_match_args_v6(ire_t
*ire
, const in6_addr_t
*addr
, const in6_addr_t
*mask
,
665 const in6_addr_t
*gateway
, int type
, const ill_t
*ill
, zoneid_t zoneid
,
668 in6_addr_t gw_addr_v6
;
669 ill_t
*ire_ill
= NULL
, *dst_ill
;
670 ip_stack_t
*ipst
= ire
->ire_ipst
;
672 ASSERT(ire
->ire_ipversion
== IPV6_VERSION
);
673 ASSERT(addr
!= NULL
);
674 ASSERT(mask
!= NULL
);
675 ASSERT((!(match_flags
& MATCH_IRE_GW
)) || gateway
!= NULL
);
676 ASSERT((!(match_flags
& (MATCH_IRE_ILL
|MATCH_IRE_SRC_ILL
))) ||
677 (ill
!= NULL
&& ill
->ill_isv6
));
680 * If MATCH_IRE_TESTHIDDEN is set, then only return the IRE if it
681 * is in fact hidden, to ensure the caller gets the right one.
683 if (ire
->ire_testhidden
) {
684 if (!(match_flags
& MATCH_IRE_TESTHIDDEN
))
688 if (zoneid
!= ALL_ZONES
&& zoneid
!= ire
->ire_zoneid
&&
689 ire
->ire_zoneid
!= ALL_ZONES
) {
691 * If MATCH_IRE_ZONEONLY has been set and the supplied zoneid
692 * does not match that of ire_zoneid, a failure to
693 * match is reported at this point. Otherwise, since some IREs
694 * that are available in the global zone can be used in local
695 * zones, additional checks need to be performed:
698 * entries should never be matched in this situation.
699 * Each zone has its own IRE_LOOPBACK.
702 * We allow them for any zoneid. ire_route_recursive
703 * does additional checks when
704 * ip_restrict_interzone_loopback is set.
706 * If ill_usesrc_ifindex is set
707 * Then we check if the zone has a valid source address
710 * If ire_ill is set, then check that the zone has an ipif
713 * Outside of this function (in ire_round_robin) we check
714 * that any IRE_OFFLINK has a gateway that reachable from the
715 * zone when we have multiple choices (ECMP).
717 if (match_flags
& MATCH_IRE_ZONEONLY
)
719 if (ire
->ire_type
& IRE_LOOPBACK
)
722 if (ire
->ire_type
& IRE_LOCAL
)
726 * The normal case of IRE_ONLINK has a matching zoneid.
727 * Here we handle the case when shared-IP zones have been
728 * configured with IP addresses on vniN. In that case it
729 * is ok for traffic from a zone to use IRE_ONLINK routes
730 * if the ill has a usesrc pointing at vniN
731 * Applies to IRE_INTERFACE.
733 dst_ill
= ire
->ire_ill
;
734 if (ire
->ire_type
& IRE_ONLINK
) {
738 * Note there is no IRE_INTERFACE on vniN thus
739 * can't do an IRE lookup for a matching route.
741 ifindex
= dst_ill
->ill_usesrc_ifindex
;
746 * If there is a usable source address in the
747 * zone, then it's ok to return this IRE_INTERFACE
749 if (!ipif_zone_avail(ifindex
, dst_ill
->ill_isv6
,
751 ip3dbg(("ire_match_args: no usrsrc for zone"
752 " dst_ill %p\n", (void *)dst_ill
));
758 * route add 11.0.0.0 gw1 -ifp bge0
759 * route add 11.0.0.0 gw2 -ifp bge1
760 * this code would differentiate based on
761 * where the sending zone has addresses.
762 * Only if the zone has an address on bge0 can it use the first
763 * route. It isn't clear if this behavior is documented
766 if (dst_ill
!= NULL
&& (ire
->ire_type
& IRE_OFFLINK
)) {
769 mutex_enter(&dst_ill
->ill_lock
);
770 for (tipif
= dst_ill
->ill_ipif
;
771 tipif
!= NULL
; tipif
= tipif
->ipif_next
) {
772 if (!IPIF_IS_CONDEMNED(tipif
) &&
773 (tipif
->ipif_flags
& IPIF_UP
) &&
774 (tipif
->ipif_zoneid
== zoneid
||
775 tipif
->ipif_zoneid
== ALL_ZONES
))
778 mutex_exit(&dst_ill
->ill_lock
);
785 ire_ill
= ire
->ire_ill
;
786 if (match_flags
& MATCH_IRE_GW
) {
787 mutex_enter(&ire
->ire_lock
);
788 gw_addr_v6
= ire
->ire_gateway_addr_v6
;
789 mutex_exit(&ire
->ire_lock
);
791 if (match_flags
& MATCH_IRE_ILL
) {
794 * If asked to match an ill, we *must* match
795 * on the ire_ill for ipmp test addresses, or
796 * any of the ill in the group for data addresses.
797 * If we don't, we may as well fail.
798 * However, we need an exception for IRE_LOCALs to ensure
799 * we loopback packets even sent to test addresses on different
800 * interfaces in the group.
802 if ((match_flags
& MATCH_IRE_TESTHIDDEN
) &&
803 !(ire
->ire_type
& IRE_LOCAL
)) {
804 if (ire
->ire_ill
!= ill
)
807 match_flags
&= ~MATCH_IRE_TESTHIDDEN
;
809 * We know that ill is not NULL, but ire_ill could be
812 if (ire_ill
== NULL
|| !IS_ON_SAME_LAN(ill
, ire_ill
))
816 if (match_flags
& MATCH_IRE_SRC_ILL
) {
819 if (!IS_ON_SAME_LAN(ill
, ire_ill
)) {
820 if (ire_ill
->ill_usesrc_ifindex
== 0 ||
821 (ire_ill
->ill_usesrc_ifindex
!=
822 ill
->ill_phyint
->phyint_ifindex
))
827 /* No ire_addr_v6 bits set past the mask */
828 ASSERT(V6_MASK_EQ(ire
->ire_addr_v6
, ire
->ire_mask_v6
,
830 if (V6_MASK_EQ(*addr
, *mask
, ire
->ire_addr_v6
) &&
831 ((!(match_flags
& MATCH_IRE_GW
)) ||
832 ((!(match_flags
& MATCH_IRE_DIRECT
)) ||
833 !(ire
->ire_flags
& RTF_INDIRECT
)) &&
834 IN6_ARE_ADDR_EQUAL(&gw_addr_v6
, gateway
)) &&
835 ((!(match_flags
& MATCH_IRE_TYPE
)) || (ire
->ire_type
& type
)) &&
836 ((!(match_flags
& MATCH_IRE_TESTHIDDEN
)) || ire
->ire_testhidden
) &&
837 ((!(match_flags
& MATCH_IRE_MASK
)) ||
838 (IN6_ARE_ADDR_EQUAL(&ire
->ire_mask_v6
, mask
)))) {
839 /* We found the matched IRE */
846 * Check if the zoneid (not ALL_ZONES) has an IRE_INTERFACE for the specified
847 * gateway address. If ill is non-NULL we also match on it.
848 * The caller must hold a read lock on RADIX_NODE_HEAD if lock_held is set.
851 ire_gateway_ok_zone_v6(const in6_addr_t
*gateway
, zoneid_t zoneid
, ill_t
*ill
,
852 ip_stack_t
*ipst
, boolean_t lock_held
)
858 ASSERT(RW_READ_HELD(&ipst
->ips_ip6_ire_head_lock
));
860 rw_enter(&ipst
->ips_ip6_ire_head_lock
, RW_READER
);
862 match_flags
= MATCH_IRE_TYPE
;
864 match_flags
|= MATCH_IRE_ILL
;
866 ire
= ire_ftable_lookup_impl_v6(gateway
, &ipv6_all_zeros
,
867 &ipv6_all_zeros
, IRE_INTERFACE
, ill
, zoneid
, match_flags
,
871 rw_exit(&ipst
->ips_ip6_ire_head_lock
);
881 * Lookup a route in forwarding table.
882 * specific lookup is indicated by passing the
883 * required parameters and indicating the
884 * match required in flag field.
886 * Supports link-local addresses by following the ipif/ill when recursing.
889 ire_ftable_lookup_v6(const in6_addr_t
*addr
, const in6_addr_t
*mask
,
890 const in6_addr_t
*gateway
, int type
, const ill_t
*ill
,
891 zoneid_t zoneid
, int flags
, uint32_t xmit_hint
, ip_stack_t
*ipst
,
896 ASSERT(addr
!= NULL
);
897 ASSERT((!(flags
& MATCH_IRE_MASK
)) || mask
!= NULL
);
898 ASSERT((!(flags
& MATCH_IRE_GW
)) || gateway
!= NULL
);
899 ASSERT(ill
== NULL
|| ill
->ill_isv6
);
901 ASSERT(!IN6_IS_ADDR_V4MAPPED(addr
));
904 * ire_match_args_v6() will dereference ill if MATCH_IRE_ILL
905 * or MATCH_IRE_SRC_ILL is set.
907 if ((flags
& (MATCH_IRE_ILL
|MATCH_IRE_SRC_ILL
)) && (ill
== NULL
))
910 rw_enter(&ipst
->ips_ip6_ire_head_lock
, RW_READER
);
911 ire
= ire_ftable_lookup_impl_v6(addr
, mask
, gateway
, type
, ill
, zoneid
,
914 rw_exit(&ipst
->ips_ip6_ire_head_lock
);
919 * round-robin only if we have more than one route in the bucket.
920 * ips_ip_ecmp_behavior controls when we do ECMP
922 * 1: for IRE_DEFAULT and /0 IRE_INTERFACE
925 * Note: if we found an IRE_IF_CLONE we won't look at the bucket with
926 * other ECMP IRE_INTERFACEs since the IRE_IF_CLONE is a /128 match
927 * and the IRE_INTERFACESs are likely to be shorter matches.
929 if (ire
->ire_bucket
->irb_ire_cnt
> 1 && !(flags
& MATCH_IRE_GW
)) {
930 if (ipst
->ips_ip_ecmp_behavior
== 2 ||
931 (ipst
->ips_ip_ecmp_behavior
== 1 &&
932 IS_DEFAULT_ROUTE_V6(ire
))) {
934 ire_ftable_args_t margs
;
936 bzero(&margs
, sizeof (margs
));
937 margs
.ift_addr_v6
= *addr
;
939 margs
.ift_mask_v6
= *mask
;
941 margs
.ift_gateway_v6
= *gateway
;
942 margs
.ift_type
= type
;
944 margs
.ift_zoneid
= zoneid
;
945 margs
.ift_flags
= flags
;
947 next_ire
= ire_round_robin(ire
->ire_bucket
, &margs
,
948 xmit_hint
, ire
, ipst
);
949 if (next_ire
== NULL
) {
950 /* keep ire if next_ire is null */
959 /* Return generation before dropping lock */
960 if (generationp
!= NULL
)
961 *generationp
= ire
->ire_generation
;
963 rw_exit(&ipst
->ips_ip6_ire_head_lock
);
966 * For shared-IP zones we need additional checks to what was
967 * done in ire_match_args to make sure IRE_LOCALs are handled.
969 * When ip_restrict_interzone_loopback is set, then
970 * we ensure that IRE_LOCAL are only used for loopback
971 * between zones when the logical "Ethernet" would
972 * have looped them back. That is, if in the absense of
973 * the IRE_LOCAL we would have sent to packet out the
976 if ((ire
->ire_type
& IRE_LOCAL
) && zoneid
!= ALL_ZONES
&&
977 ire
->ire_zoneid
!= zoneid
&& ire
->ire_zoneid
!= ALL_ZONES
&&
978 ipst
->ips_ip_restrict_interzone_loopback
) {
979 ire
= ire_alt_local(ire
, zoneid
, ill
, generationp
);
987 * Look up a single ire. The caller holds either the read or write lock.
990 ire_ftable_lookup_impl_v6(const in6_addr_t
*addr
, const in6_addr_t
*mask
,
991 const in6_addr_t
*gateway
, int type
, const ill_t
*ill
,
992 zoneid_t zoneid
, int flags
, ip_stack_t
*ipst
)
998 ASSERT(RW_LOCK_HELD(&ipst
->ips_ip6_ire_head_lock
));
1001 * If the mask is known, the lookup
1002 * is simple, if the mask is not known
1003 * we need to search.
1005 if (flags
& MATCH_IRE_MASK
) {
1008 masklen
= ip_mask_to_plen_v6(mask
);
1009 if (ipst
->ips_ip_forwarding_table_v6
[masklen
] == NULL
) {
1012 irb_ptr
= &(ipst
->ips_ip_forwarding_table_v6
[masklen
][
1013 IRE_ADDR_MASK_HASH_V6(*addr
, *mask
,
1014 ipst
->ips_ip6_ftable_hash_size
)]);
1015 rw_enter(&irb_ptr
->irb_lock
, RW_READER
);
1016 for (ire
= irb_ptr
->irb_ire
; ire
!= NULL
;
1017 ire
= ire
->ire_next
) {
1018 if (IRE_IS_CONDEMNED(ire
))
1020 if (ire_match_args_v6(ire
, addr
, mask
, gateway
, type
,
1021 ill
, zoneid
, flags
))
1024 rw_exit(&irb_ptr
->irb_lock
);
1029 * In this case we don't know the mask, we need to
1030 * search the table assuming different mask sizes.
1032 if (flags
& MATCH_IRE_SHORTERMASK
) {
1033 masklen
= ip_mask_to_plen_v6(mask
);
1035 /* Nothing shorter than zero */
1040 masklen
= IP6_MASK_TABLE_SIZE
- 1;
1043 for (i
= masklen
; i
>= 0; i
--) {
1046 if ((ipst
->ips_ip_forwarding_table_v6
[i
]) == NULL
)
1048 (void) ip_plen_to_mask_v6(i
, &tmpmask
);
1049 irb_ptr
= &ipst
->ips_ip_forwarding_table_v6
[i
][
1050 IRE_ADDR_MASK_HASH_V6(*addr
, tmpmask
,
1051 ipst
->ips_ip6_ftable_hash_size
)];
1052 rw_enter(&irb_ptr
->irb_lock
, RW_READER
);
1053 for (ire
= irb_ptr
->irb_ire
; ire
!= NULL
;
1054 ire
= ire
->ire_next
) {
1055 if (IRE_IS_CONDEMNED(ire
))
1057 if (ire_match_args_v6(ire
, addr
,
1058 &ire
->ire_mask_v6
, gateway
, type
, ill
,
1062 rw_exit(&irb_ptr
->irb_lock
);
1065 ASSERT(ire
== NULL
);
1066 ip1dbg(("ire_ftable_lookup_v6: returning NULL ire"));
1071 rw_exit(&irb_ptr
->irb_lock
);
1077 * This function is called by
1078 * ip_input/ire_route_recursive when doing a route lookup on only the
1079 * destination address.
1081 * The optimizations of this function over ire_ftable_lookup are:
1082 * o removing unnecessary flag matching
1083 * o doing longest prefix match instead of overloading it further
1084 * with the unnecessary "best_prefix_match"
1086 * If no route is found we return IRE_NOROUTE.
1089 ire_ftable_lookup_simple_v6(const in6_addr_t
*addr
, uint32_t xmit_hint
,
1090 ip_stack_t
*ipst
, uint_t
*generationp
)
1094 ire
= ire_ftable_lookup_v6(addr
, NULL
, NULL
, 0, NULL
, ALL_ZONES
,
1095 MATCH_IRE_DSTONLY
, xmit_hint
, ipst
, generationp
);
1097 ire
= ire_reject(ipst
, B_TRUE
);
1098 if (generationp
!= NULL
)
1099 *generationp
= IRE_GENERATION_VERIFY
;
1101 /* ftable_lookup did round robin */
1106 ip_select_route_v6(const in6_addr_t
*dst
, const in6_addr_t src
,
1107 ip_xmit_attr_t
*ixa
, uint_t
*generationp
, in6_addr_t
*setsrcp
,
1110 ASSERT(!(ixa
->ixa_flags
& IXAF_IS_IPV4
));
1112 return (ip_select_route(dst
, src
, ixa
, generationp
, setsrcp
, errorp
));
1116 * Recursively look for a route to the destination. Can also match on
1117 * the zoneid and ill. Used for the data paths. See also
1118 * ire_route_recursive_dstonly.
1120 * If IRR_ALLOCATE is not set then we will only inspect the existing IREs; never
1121 * create an IRE_IF_CLONE. This is used on the receive side when we are not
1123 * If IRR_INCOMPLETE is set then we return the IRE even if we can't correctly
1124 * resolve the gateway.
1126 * Note that this function never returns NULL. It returns an IRE_NOROUTE
1129 * If we find any IRE_LOCAL|BROADCAST etc past the first iteration it
1131 * Allow at most one RTF_INDIRECT.
1134 ire_route_recursive_impl_v6(ire_t
*ire
,
1135 const in6_addr_t
*nexthop
, uint_t ire_type
, const ill_t
*ill_arg
,
1136 zoneid_t zoneid
, uint_t match_args
,
1137 uint_t irr_flags
, uint32_t xmit_hint
, ip_stack_t
*ipst
,
1138 in6_addr_t
*setsrcp
, uint_t
*generationp
)
1141 in6_addr_t v6nexthop
= *nexthop
;
1142 ire_t
*ires
[MAX_IRE_RECURSION
];
1144 uint_t generations
[MAX_IRE_RECURSION
];
1145 boolean_t need_refrele
= B_FALSE
;
1146 boolean_t invalidate
= B_FALSE
;
1148 uint_t maskoff
= (IRE_LOCAL
|IRE_LOOPBACK
);
1150 if (setsrcp
!= NULL
)
1151 ASSERT(IN6_IS_ADDR_UNSPECIFIED(setsrcp
));
1154 * We iterate up to three times to resolve a route, even though
1155 * we have four slots in the array. The extra slot is for an
1156 * IRE_IF_CLONE we might need to create.
1159 while (i
< MAX_IRE_RECURSION
- 1) {
1160 /* ire_ftable_lookup handles round-robin/ECMP */
1162 ire
= ire_ftable_lookup_v6(&v6nexthop
, 0, 0, ire_type
,
1163 (ill
!= NULL
? ill
: ill_arg
), zoneid
, match_args
,
1164 xmit_hint
, ipst
, &generation
);
1166 /* Caller passed it; extra hold since we will rele */
1168 if (generationp
!= NULL
)
1169 generation
= *generationp
;
1171 generation
= IRE_GENERATION_VERIFY
;
1175 if (i
> 0 && (irr_flags
& IRR_INCOMPLETE
)) {
1179 ire
= ire_reject(ipst
, B_TRUE
);
1184 /* Need to return the ire with RTF_REJECT|BLACKHOLE */
1185 if (ire
->ire_flags
& (RTF_REJECT
|RTF_BLACKHOLE
))
1188 ASSERT(!(ire
->ire_type
& IRE_MULTICAST
)); /* Not in ftable */
1190 * Verify that the IRE_IF_CLONE has a consistent generation
1193 if ((ire
->ire_type
& IRE_IF_CLONE
) && !ire_clone_verify(ire
)) {
1200 * Don't allow anything unusual past the first iteration.
1201 * After the first lookup, we should no longer look for
1202 * (IRE_LOCAL|IRE_LOOPBACK) or RTF_INDIRECT routes.
1204 * In addition, after we have found a direct IRE_OFFLINK,
1205 * we should only look for interface or clone routes.
1207 match_args
|= MATCH_IRE_DIRECT
; /* no more RTF_INDIRECTs */
1208 if ((ire
->ire_type
& IRE_OFFLINK
) &&
1209 !(ire
->ire_flags
& RTF_INDIRECT
)) {
1210 ire_type
= IRE_IF_ALL
;
1212 if (!(match_args
& MATCH_IRE_TYPE
))
1213 ire_type
= (IRE_OFFLINK
|IRE_ONLINK
);
1214 ire_type
&= ~maskoff
; /* no more LOCAL, LOOPBACK */
1216 match_args
|= MATCH_IRE_TYPE
;
1217 /* We have a usable IRE */
1219 generations
[i
] = generation
;
1222 /* The first RTF_SETSRC address is passed back if setsrcp */
1223 if ((ire
->ire_flags
& RTF_SETSRC
) &&
1224 setsrcp
!= NULL
&& IN6_IS_ADDR_UNSPECIFIED(setsrcp
)) {
1225 ASSERT(!IN6_IS_ADDR_UNSPECIFIED(
1226 &ire
->ire_setsrc_addr_v6
));
1227 *setsrcp
= ire
->ire_setsrc_addr_v6
;
1231 * Check if we have a short-cut pointer to an IRE for this
1232 * destination, and that the cached dependency isn't stale.
1233 * In that case we've rejoined an existing tree towards a
1234 * parent, thus we don't need to continue the loop to
1235 * discover the rest of the tree.
1237 mutex_enter(&ire
->ire_lock
);
1238 if (ire
->ire_dep_parent
!= NULL
&&
1239 ire
->ire_dep_parent
->ire_generation
==
1240 ire
->ire_dep_parent_generation
) {
1241 mutex_exit(&ire
->ire_lock
);
1245 mutex_exit(&ire
->ire_lock
);
1248 * If this type should have an ire_nce_cache (even if it
1249 * doesn't yet have one) then we are done. Includes
1250 * IRE_INTERFACE with a full 128 bit mask.
1252 if (ire
->ire_nce_capable
) {
1256 ASSERT(!(ire
->ire_type
& IRE_IF_CLONE
));
1258 * For an IRE_INTERFACE we create an IRE_IF_CLONE for this
1259 * particular destination
1261 if (ire
->ire_type
& IRE_INTERFACE
) {
1264 ASSERT(ire
->ire_masklen
!= IPV6_ABITS
);
1267 * In the case of ip_input and ILLF_FORWARDING not
1268 * being set, and in the case of RTM_GET, there is
1269 * no point in allocating an IRE_IF_CLONE. We return
1270 * the IRE_INTERFACE. Note that !IRR_ALLOCATE can
1271 * result in a ire_dep_parent which is IRE_IF_*
1272 * without an IRE_IF_CLONE.
1273 * We recover from that when we need to send packets
1274 * by ensuring that the generations become
1275 * IRE_GENERATION_VERIFY in this case.
1277 if (!(irr_flags
& IRR_ALLOCATE
)) {
1278 invalidate
= B_TRUE
;
1283 clone
= ire_create_if_clone(ire
, &v6nexthop
,
1285 if (clone
== NULL
) {
1287 * Temporary failure - no memory.
1288 * Don't want caller to cache IRE_NOROUTE.
1290 invalidate
= B_TRUE
;
1291 ire
= ire_blackhole(ipst
, B_TRUE
);
1295 * Make clone next to last entry and the
1296 * IRE_INTERFACE the last in the dependency
1297 * chain since the clone depends on the
1301 ASSERT(i
< MAX_IRE_RECURSION
);
1303 ires
[i
] = ires
[i
-1];
1304 generations
[i
] = generations
[i
-1];
1306 generations
[i
-1] = generation
;
1314 * We only match on the type and optionally ILL when
1315 * recursing. The type match is used by some callers
1316 * to exclude certain types (such as IRE_IF_CLONE or
1317 * IRE_LOCAL|IRE_LOOPBACK).
1319 * In the MATCH_IRE_SRC_ILL case, ill_arg may be the 'srcof'
1320 * ire->ire_ill, and we want to find the IRE_INTERFACE for
1321 * ire_ill, so we set ill to the ire_ill
1323 match_args
&= (MATCH_IRE_TYPE
| MATCH_IRE_DIRECT
);
1324 v6nexthop
= ire
->ire_gateway_addr_v6
;
1325 if (ill
== NULL
&& ire
->ire_ill
!= NULL
) {
1327 need_refrele
= B_TRUE
;
1329 match_args
|= MATCH_IRE_ILL
;
1333 ASSERT(ire
== NULL
);
1334 ire
= ire_reject(ipst
, B_TRUE
);
1337 ASSERT(ire
!= NULL
);
1342 /* cleanup ires[i] */
1343 ire_dep_unbuild(ires
, i
);
1344 for (j
= 0; j
< i
; j
++)
1345 ire_refrele(ires
[j
]);
1347 ASSERT((ire
->ire_flags
& (RTF_REJECT
|RTF_BLACKHOLE
)) ||
1348 (irr_flags
& IRR_INCOMPLETE
));
1350 * Use IRE_GENERATION_VERIFY to ensure that ip_output will redo the
1351 * ip_select_route since the reject or lack of memory might be gone.
1353 if (generationp
!= NULL
)
1354 *generationp
= IRE_GENERATION_VERIFY
;
1358 ASSERT(ire
== NULL
);
1362 /* Build dependencies */
1363 if (i
> 1 && !ire_dep_build(ires
, generations
, i
)) {
1364 /* Something in chain was condemned; tear it apart */
1365 ire
= ire_blackhole(ipst
, B_TRUE
);
1370 * Release all refholds except the one for ires[0] that we
1371 * will return to the caller.
1373 for (j
= 1; j
< i
; j
++)
1374 ire_refrele(ires
[j
]);
1378 * Since we needed to allocate but couldn't we need to make
1379 * sure that the dependency chain is rebuilt the next time.
1381 ire_dep_invalidate_generations(ires
[0]);
1382 generation
= IRE_GENERATION_VERIFY
;
1385 * IREs can have been added or deleted while we did the
1386 * recursive lookup and we can't catch those until we've built
1387 * the dependencies. We verify the stored
1388 * ire_dep_parent_generation to catch any such changes and
1389 * return IRE_GENERATION_VERIFY (which will cause
1390 * ip_select_route to be called again so we can redo the
1391 * recursive lookup next time we send a packet.
1393 if (ires
[0]->ire_dep_parent
== NULL
)
1394 generation
= ires
[0]->ire_generation
;
1396 generation
= ire_dep_validate_generations(ires
[0]);
1397 if (generations
[0] != ires
[0]->ire_generation
) {
1398 /* Something changed at the top */
1399 generation
= IRE_GENERATION_VERIFY
;
1402 if (generationp
!= NULL
)
1403 *generationp
= generation
;
1409 ire_route_recursive_v6(const in6_addr_t
*nexthop
, uint_t ire_type
,
1410 const ill_t
*ill
, zoneid_t zoneid
, uint_t match_args
, uint_t irr_flags
,
1411 uint32_t xmit_hint
, ip_stack_t
*ipst
, in6_addr_t
*setsrcp
,
1412 uint_t
*generationp
)
1414 return (ire_route_recursive_impl_v6(NULL
, nexthop
, ire_type
, ill
,
1415 zoneid
, match_args
, irr_flags
, xmit_hint
, ipst
, setsrcp
,
1420 * Recursively look for a route to the destination.
1421 * We only handle a destination match here, yet we have the same arguments
1422 * as the full match to allow function pointers to select between the two.
1424 * Note that this function never returns NULL. It returns an IRE_NOROUTE
1427 * If we find any IRE_LOCAL|BROADCAST etc past the first iteration it
1429 * Allow at most one RTF_INDIRECT.
1432 ire_route_recursive_dstonly_v6(const in6_addr_t
*nexthop
, uint_t irr_flags
,
1433 uint32_t xmit_hint
, ip_stack_t
*ipst
)
1439 /* ire_ftable_lookup handles round-robin/ECMP */
1440 ire
= ire_ftable_lookup_simple_v6(nexthop
, xmit_hint
, ipst
,
1442 ASSERT(ire
!= NULL
);
1445 * If the IRE has a current cached parent we know that the whole
1446 * parent chain is current, hence we don't need to discover and
1447 * build any dependencies by doing a recursive lookup.
1449 mutex_enter(&ire
->ire_lock
);
1450 if (ire
->ire_dep_parent
!= NULL
) {
1451 if (ire
->ire_dep_parent
->ire_generation
==
1452 ire
->ire_dep_parent_generation
) {
1453 mutex_exit(&ire
->ire_lock
);
1456 mutex_exit(&ire
->ire_lock
);
1458 mutex_exit(&ire
->ire_lock
);
1460 * If this type should have an ire_nce_cache (even if it
1461 * doesn't yet have one) then we are done. Includes
1462 * IRE_INTERFACE with a full 128 bit mask.
1464 if (ire
->ire_nce_capable
)
1469 * Fallback to loop in the normal code starting with the ire
1470 * we found. Normally this would return the same ire.
1472 ire1
= ire_route_recursive_impl_v6(ire
, nexthop
, 0, NULL
, ALL_ZONES
,
1473 MATCH_IRE_DSTONLY
, irr_flags
, xmit_hint
, ipst
, NULL
, &generation
);