2 * Copyright (c) 1980, 1986, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)if.c 8.3 (Berkeley) 1/4/94
30 * $FreeBSD: src/sys/net/if.c,v 1.185 2004/03/13 02:35:03 brooks Exp $
33 #include "opt_inet6.h"
35 #include "opt_ifpoll.h"
37 #include <sys/param.h>
38 #include <sys/malloc.h>
40 #include <sys/systm.h>
43 #include <sys/protosw.h>
44 #include <sys/socket.h>
45 #include <sys/socketvar.h>
46 #include <sys/socketops.h>
47 #include <sys/kernel.h>
49 #include <sys/mutex.h>
50 #include <sys/sockio.h>
51 #include <sys/syslog.h>
52 #include <sys/sysctl.h>
53 #include <sys/domain.h>
54 #include <sys/thread.h>
55 #include <sys/serialize.h>
58 #include <sys/thread2.h>
59 #include <sys/msgport2.h>
60 #include <sys/mutex2.h>
63 #include <net/if_arp.h>
64 #include <net/if_dl.h>
65 #include <net/if_types.h>
66 #include <net/if_var.h>
67 #include <net/if_ringmap.h>
68 #include <net/ifq_var.h>
69 #include <net/radix.h>
70 #include <net/route.h>
71 #include <net/if_clone.h>
72 #include <net/netisr2.h>
73 #include <net/netmsg2.h>
75 #include <machine/atomic.h>
76 #include <machine/stdarg.h>
77 #include <machine/smp.h>
79 #if defined(INET) || defined(INET6)
81 #include <netinet/in.h>
82 #include <netinet/in_var.h>
83 #include <netinet/if_ether.h>
85 #include <netinet6/in6_var.h>
86 #include <netinet6/in6_ifattach.h>
90 struct netmsg_ifaddr
{
91 struct netmsg_base base
;
97 struct ifsubq_stage_head
{
98 TAILQ_HEAD(, ifsubq_stage
) stg_head
;
107 #define RINGMAP_FLAG_NONE 0x0
108 #define RINGMAP_FLAG_POWEROF2 0x1
111 * System initialization
113 static void if_attachdomain(void *);
114 static void if_attachdomain1(struct ifnet
*);
115 static int ifconf(u_long
, caddr_t
, struct ucred
*);
116 static void ifinit(void *);
117 static void ifnetinit(void *);
118 static void if_slowtimo(void *);
119 static void link_rtrequest(int, struct rtentry
*);
120 static int if_rtdel(struct radix_node
*, void *);
121 static void if_slowtimo_dispatch(netmsg_t
);
123 /* Helper functions */
124 static void ifsq_watchdog_reset(struct ifsubq_watchdog
*);
125 static int if_delmulti_serialized(struct ifnet
*, struct sockaddr
*);
126 static struct ifnet_array
*ifnet_array_alloc(int);
127 static void ifnet_array_free(struct ifnet_array
*);
128 static struct ifnet_array
*ifnet_array_add(struct ifnet
*,
129 const struct ifnet_array
*);
130 static struct ifnet_array
*ifnet_array_del(struct ifnet
*,
131 const struct ifnet_array
*);
135 * XXX: declare here to avoid to include many inet6 related files..
136 * should be more generalized?
138 extern void nd6_setmtu(struct ifnet
*);
141 SYSCTL_NODE(_net
, PF_LINK
, link
, CTLFLAG_RW
, 0, "Link layers");
142 SYSCTL_NODE(_net_link
, 0, generic
, CTLFLAG_RW
, 0, "Generic link-management");
143 SYSCTL_NODE(_net_link
, OID_AUTO
, ringmap
, CTLFLAG_RW
, 0, "link ringmap");
145 static int ifsq_stage_cntmax
= 4;
146 TUNABLE_INT("net.link.stage_cntmax", &ifsq_stage_cntmax
);
147 SYSCTL_INT(_net_link
, OID_AUTO
, stage_cntmax
, CTLFLAG_RW
,
148 &ifsq_stage_cntmax
, 0, "ifq staging packet count max");
150 static int if_stats_compat
= 0;
151 SYSCTL_INT(_net_link
, OID_AUTO
, stats_compat
, CTLFLAG_RW
,
152 &if_stats_compat
, 0, "Compat the old ifnet stats");
154 static int if_ringmap_dumprdr
= 0;
155 SYSCTL_INT(_net_link_ringmap
, OID_AUTO
, dump_rdr
, CTLFLAG_RW
,
156 &if_ringmap_dumprdr
, 0, "dump redirect table");
158 SYSINIT(interfaces
, SI_SUB_PROTO_IF
, SI_ORDER_FIRST
, ifinit
, NULL
);
159 SYSINIT(ifnet
, SI_SUB_PRE_DRIVERS
, SI_ORDER_ANY
, ifnetinit
, NULL
);
161 static if_com_alloc_t
*if_com_alloc
[256];
162 static if_com_free_t
*if_com_free
[256];
164 MALLOC_DEFINE(M_IFADDR
, "ifaddr", "interface address");
165 MALLOC_DEFINE(M_IFMADDR
, "ether_multi", "link-level multicast address");
166 MALLOC_DEFINE(M_IFNET
, "ifnet", "interface structure");
168 int ifqmaxlen
= IFQ_MAXLEN
;
169 struct ifnethead ifnet
= TAILQ_HEAD_INITIALIZER(ifnet
);
171 static struct ifnet_array ifnet_array0
;
172 static struct ifnet_array
*ifnet_array
= &ifnet_array0
;
174 static struct callout if_slowtimo_timer
;
175 static struct netmsg_base if_slowtimo_netmsg
;
178 struct ifnet
**ifindex2ifnet
= NULL
;
179 static struct mtx ifnet_mtx
= MTX_INITIALIZER("ifnet");
181 static struct ifsubq_stage_head ifsubq_stage_heads
[MAXCPU
];
184 #define IFQ_KTR_STRING "ifq=%p"
185 #define IFQ_KTR_ARGS struct ifaltq *ifq
187 #define KTR_IFQ KTR_ALL
189 KTR_INFO_MASTER(ifq
);
190 KTR_INFO(KTR_IFQ
, ifq
, enqueue
, 0, IFQ_KTR_STRING
, IFQ_KTR_ARGS
);
191 KTR_INFO(KTR_IFQ
, ifq
, dequeue
, 1, IFQ_KTR_STRING
, IFQ_KTR_ARGS
);
192 #define logifq(name, arg) KTR_LOG(ifq_ ## name, arg)
194 #define IF_START_KTR_STRING "ifp=%p"
195 #define IF_START_KTR_ARGS struct ifnet *ifp
197 #define KTR_IF_START KTR_ALL
199 KTR_INFO_MASTER(if_start
);
200 KTR_INFO(KTR_IF_START
, if_start
, run
, 0,
201 IF_START_KTR_STRING
, IF_START_KTR_ARGS
);
202 KTR_INFO(KTR_IF_START
, if_start
, sched
, 1,
203 IF_START_KTR_STRING
, IF_START_KTR_ARGS
);
204 KTR_INFO(KTR_IF_START
, if_start
, avoid
, 2,
205 IF_START_KTR_STRING
, IF_START_KTR_ARGS
);
206 KTR_INFO(KTR_IF_START
, if_start
, contend_sched
, 3,
207 IF_START_KTR_STRING
, IF_START_KTR_ARGS
);
208 KTR_INFO(KTR_IF_START
, if_start
, chase_sched
, 4,
209 IF_START_KTR_STRING
, IF_START_KTR_ARGS
);
210 #define logifstart(name, arg) KTR_LOG(if_start_ ## name, arg)
213 TAILQ_HEAD(, ifg_group
) ifg_head
= TAILQ_HEAD_INITIALIZER(ifg_head
);
216 * Network interface utility routines.
218 * Routines with ifa_ifwith* names take sockaddr *'s as
227 callout_init_mp(&if_slowtimo_timer
);
228 netmsg_init(&if_slowtimo_netmsg
, NULL
, &netisr_adone_rport
,
229 MSGF_PRIORITY
, if_slowtimo_dispatch
);
231 /* XXX is this necessary? */
233 TAILQ_FOREACH(ifp
, &ifnetlist
, if_link
) {
234 if (ifp
->if_snd
.altq_maxlen
== 0) {
235 if_printf(ifp
, "XXX: driver didn't set altq_maxlen\n");
236 ifq_set_maxlen(&ifp
->if_snd
, ifqmaxlen
);
241 /* Start if_slowtimo */
242 lwkt_sendmsg(netisr_cpuport(0), &if_slowtimo_netmsg
.lmsg
);
246 ifsq_ifstart_ipifunc(void *arg
)
248 struct ifaltq_subque
*ifsq
= arg
;
249 struct lwkt_msg
*lmsg
= ifsq_get_ifstart_lmsg(ifsq
, mycpuid
);
252 if (lmsg
->ms_flags
& MSGF_DONE
)
253 lwkt_sendmsg_oncpu(netisr_cpuport(mycpuid
), lmsg
);
258 ifsq_stage_remove(struct ifsubq_stage_head
*head
, struct ifsubq_stage
*stage
)
260 KKASSERT(stage
->stg_flags
& IFSQ_STAGE_FLAG_QUED
);
261 TAILQ_REMOVE(&head
->stg_head
, stage
, stg_link
);
262 stage
->stg_flags
&= ~(IFSQ_STAGE_FLAG_QUED
| IFSQ_STAGE_FLAG_SCHED
);
268 ifsq_stage_insert(struct ifsubq_stage_head
*head
, struct ifsubq_stage
*stage
)
270 KKASSERT((stage
->stg_flags
&
271 (IFSQ_STAGE_FLAG_QUED
| IFSQ_STAGE_FLAG_SCHED
)) == 0);
272 stage
->stg_flags
|= IFSQ_STAGE_FLAG_QUED
;
273 TAILQ_INSERT_TAIL(&head
->stg_head
, stage
, stg_link
);
277 * Schedule ifnet.if_start on the subqueue owner CPU
280 ifsq_ifstart_schedule(struct ifaltq_subque
*ifsq
, int force
)
284 if (!force
&& curthread
->td_type
== TD_TYPE_NETISR
&&
285 ifsq_stage_cntmax
> 0) {
286 struct ifsubq_stage
*stage
= ifsq_get_stage(ifsq
, mycpuid
);
290 if ((stage
->stg_flags
& IFSQ_STAGE_FLAG_QUED
) == 0)
291 ifsq_stage_insert(&ifsubq_stage_heads
[mycpuid
], stage
);
292 stage
->stg_flags
|= IFSQ_STAGE_FLAG_SCHED
;
296 cpu
= ifsq_get_cpuid(ifsq
);
298 lwkt_send_ipiq(globaldata_find(cpu
), ifsq_ifstart_ipifunc
, ifsq
);
300 ifsq_ifstart_ipifunc(ifsq
);
305 * This function will release ifnet.if_start subqueue interlock,
306 * if ifnet.if_start for the subqueue does not need to be scheduled
309 ifsq_ifstart_need_schedule(struct ifaltq_subque
*ifsq
, int running
)
311 if (!running
|| ifsq_is_empty(ifsq
)
313 || ifsq
->ifsq_altq
->altq_tbr
!= NULL
318 * ifnet.if_start subqueue interlock is released, if:
319 * 1) Hardware can not take any packets, due to
320 * o interface is marked down
321 * o hardware queue is full (ifsq_is_oactive)
322 * Under the second situation, hardware interrupt
323 * or polling(4) will call/schedule ifnet.if_start
324 * on the subqueue when hardware queue is ready
325 * 2) There is no packet in the subqueue.
326 * Further ifq_dispatch or ifq_handoff will call/
327 * schedule ifnet.if_start on the subqueue.
328 * 3) TBR is used and it does not allow further
330 * TBR callout will call ifnet.if_start on the
333 if (!running
|| !ifsq_data_ready(ifsq
)) {
334 ifsq_clr_started(ifsq
);
335 ALTQ_SQ_UNLOCK(ifsq
);
338 ALTQ_SQ_UNLOCK(ifsq
);
344 ifsq_ifstart_dispatch(netmsg_t msg
)
346 struct lwkt_msg
*lmsg
= &msg
->base
.lmsg
;
347 struct ifaltq_subque
*ifsq
= lmsg
->u
.ms_resultp
;
348 struct ifnet
*ifp
= ifsq_get_ifp(ifsq
);
349 struct globaldata
*gd
= mycpu
;
350 int running
= 0, need_sched
;
354 lwkt_replymsg(lmsg
, 0); /* reply ASAP */
356 if (gd
->gd_cpuid
!= ifsq_get_cpuid(ifsq
)) {
358 * We need to chase the subqueue owner CPU change.
360 ifsq_ifstart_schedule(ifsq
, 1);
365 ifsq_serialize_hw(ifsq
);
366 if ((ifp
->if_flags
& IFF_RUNNING
) && !ifsq_is_oactive(ifsq
)) {
367 ifp
->if_start(ifp
, ifsq
);
368 if ((ifp
->if_flags
& IFF_RUNNING
) && !ifsq_is_oactive(ifsq
))
371 need_sched
= ifsq_ifstart_need_schedule(ifsq
, running
);
372 ifsq_deserialize_hw(ifsq
);
376 * More data need to be transmitted, ifnet.if_start is
377 * scheduled on the subqueue owner CPU, and we keep going.
378 * NOTE: ifnet.if_start subqueue interlock is not released.
380 ifsq_ifstart_schedule(ifsq
, 0);
386 /* Device driver ifnet.if_start helper function */
388 ifsq_devstart(struct ifaltq_subque
*ifsq
)
390 struct ifnet
*ifp
= ifsq_get_ifp(ifsq
);
393 ASSERT_ALTQ_SQ_SERIALIZED_HW(ifsq
);
396 if (ifsq_is_started(ifsq
) || !ifsq_data_ready(ifsq
)) {
397 ALTQ_SQ_UNLOCK(ifsq
);
400 ifsq_set_started(ifsq
);
401 ALTQ_SQ_UNLOCK(ifsq
);
403 ifp
->if_start(ifp
, ifsq
);
405 if ((ifp
->if_flags
& IFF_RUNNING
) && !ifsq_is_oactive(ifsq
))
408 if (ifsq_ifstart_need_schedule(ifsq
, running
)) {
410 * More data need to be transmitted, ifnet.if_start is
411 * scheduled on ifnet's CPU, and we keep going.
412 * NOTE: ifnet.if_start interlock is not released.
414 ifsq_ifstart_schedule(ifsq
, 0);
419 if_devstart(struct ifnet
*ifp
)
421 ifsq_devstart(ifq_get_subq_default(&ifp
->if_snd
));
424 /* Device driver ifnet.if_start schedule helper function */
426 ifsq_devstart_sched(struct ifaltq_subque
*ifsq
)
428 ifsq_ifstart_schedule(ifsq
, 1);
432 if_devstart_sched(struct ifnet
*ifp
)
434 ifsq_devstart_sched(ifq_get_subq_default(&ifp
->if_snd
));
438 if_default_serialize(struct ifnet
*ifp
, enum ifnet_serialize slz __unused
)
440 lwkt_serialize_enter(ifp
->if_serializer
);
444 if_default_deserialize(struct ifnet
*ifp
, enum ifnet_serialize slz __unused
)
446 lwkt_serialize_exit(ifp
->if_serializer
);
450 if_default_tryserialize(struct ifnet
*ifp
, enum ifnet_serialize slz __unused
)
452 return lwkt_serialize_try(ifp
->if_serializer
);
457 if_default_serialize_assert(struct ifnet
*ifp
,
458 enum ifnet_serialize slz __unused
,
459 boolean_t serialized
)
462 ASSERT_SERIALIZED(ifp
->if_serializer
);
464 ASSERT_NOT_SERIALIZED(ifp
->if_serializer
);
469 * Attach an interface to the list of "active" interfaces.
471 * The serializer is optional.
474 if_attach(struct ifnet
*ifp
, lwkt_serialize_t serializer
)
477 int namelen
, masklen
;
478 struct sockaddr_dl
*sdl
, *sdl_addr
;
481 struct ifnet
**old_ifindex2ifnet
= NULL
;
482 struct ifnet_array
*old_ifnet_array
;
485 static int if_indexlim
= 8;
487 if (ifp
->if_serialize
!= NULL
) {
488 KASSERT(ifp
->if_deserialize
!= NULL
&&
489 ifp
->if_tryserialize
!= NULL
&&
490 ifp
->if_serialize_assert
!= NULL
,
491 ("serialize functions are partially setup"));
494 * If the device supplies serialize functions,
495 * then clear if_serializer to catch any invalid
496 * usage of this field.
498 KASSERT(serializer
== NULL
,
499 ("both serialize functions and default serializer "
501 ifp
->if_serializer
= NULL
;
503 KASSERT(ifp
->if_deserialize
== NULL
&&
504 ifp
->if_tryserialize
== NULL
&&
505 ifp
->if_serialize_assert
== NULL
,
506 ("serialize functions are partially setup"));
507 ifp
->if_serialize
= if_default_serialize
;
508 ifp
->if_deserialize
= if_default_deserialize
;
509 ifp
->if_tryserialize
= if_default_tryserialize
;
511 ifp
->if_serialize_assert
= if_default_serialize_assert
;
515 * The serializer can be passed in from the device,
516 * allowing the same serializer to be used for both
517 * the interrupt interlock and the device queue.
518 * If not specified, the netif structure will use an
519 * embedded serializer.
521 if (serializer
== NULL
) {
522 serializer
= &ifp
->if_default_serializer
;
523 lwkt_serialize_init(serializer
);
525 ifp
->if_serializer
= serializer
;
529 * Make if_addrhead available on all CPUs, since they
530 * could be accessed by any threads.
532 ifp
->if_addrheads
= kmalloc(ncpus
* sizeof(struct ifaddrhead
),
533 M_IFADDR
, M_WAITOK
| M_ZERO
);
534 for (i
= 0; i
< ncpus
; ++i
)
535 TAILQ_INIT(&ifp
->if_addrheads
[i
]);
537 TAILQ_INIT(&ifp
->if_multiaddrs
);
538 TAILQ_INIT(&ifp
->if_groups
);
539 getmicrotime(&ifp
->if_lastchange
);
542 * create a Link Level name for this device
544 namelen
= strlen(ifp
->if_xname
);
545 masklen
= offsetof(struct sockaddr_dl
, sdl_data
[0]) + namelen
;
546 socksize
= masklen
+ ifp
->if_addrlen
;
547 if (socksize
< sizeof(*sdl
))
548 socksize
= sizeof(*sdl
);
549 socksize
= RT_ROUNDUP(socksize
);
550 ifa
= ifa_create(sizeof(struct ifaddr
) + 2 * socksize
);
551 sdl
= sdl_addr
= (struct sockaddr_dl
*)(ifa
+ 1);
552 sdl
->sdl_len
= socksize
;
553 sdl
->sdl_family
= AF_LINK
;
554 bcopy(ifp
->if_xname
, sdl
->sdl_data
, namelen
);
555 sdl
->sdl_nlen
= namelen
;
556 sdl
->sdl_type
= ifp
->if_type
;
557 ifp
->if_lladdr
= ifa
;
559 ifa
->ifa_rtrequest
= link_rtrequest
;
560 ifa
->ifa_addr
= (struct sockaddr
*)sdl
;
561 sdl
= (struct sockaddr_dl
*)(socksize
+ (caddr_t
)sdl
);
562 ifa
->ifa_netmask
= (struct sockaddr
*)sdl
;
563 sdl
->sdl_len
= masklen
;
565 sdl
->sdl_data
[--namelen
] = 0xff;
566 ifa_iflink(ifa
, ifp
, 0 /* Insert head */);
569 * Make if_data available on all CPUs, since they could
570 * be updated by hardware interrupt routing, which could
571 * be bound to any CPU.
573 ifp
->if_data_pcpu
= kmalloc_cachealign(
574 ncpus
* sizeof(struct ifdata_pcpu
), M_DEVBUF
, M_WAITOK
| M_ZERO
);
576 if (ifp
->if_mapsubq
== NULL
)
577 ifp
->if_mapsubq
= ifq_mapsubq_default
;
581 ifq
->altq_disc
= NULL
;
582 ifq
->altq_flags
&= ALTQF_CANTCHANGE
;
583 ifq
->altq_tbr
= NULL
;
586 if (ifq
->altq_subq_cnt
<= 0)
587 ifq
->altq_subq_cnt
= 1;
588 ifq
->altq_subq
= kmalloc_cachealign(
589 ifq
->altq_subq_cnt
* sizeof(struct ifaltq_subque
),
590 M_DEVBUF
, M_WAITOK
| M_ZERO
);
592 if (ifq
->altq_maxlen
== 0) {
593 if_printf(ifp
, "driver didn't set altq_maxlen\n");
594 ifq_set_maxlen(ifq
, ifqmaxlen
);
597 for (q
= 0; q
< ifq
->altq_subq_cnt
; ++q
) {
598 struct ifaltq_subque
*ifsq
= &ifq
->altq_subq
[q
];
600 ALTQ_SQ_LOCK_INIT(ifsq
);
601 ifsq
->ifsq_index
= q
;
603 ifsq
->ifsq_altq
= ifq
;
604 ifsq
->ifsq_ifp
= ifp
;
606 ifsq
->ifsq_maxlen
= ifq
->altq_maxlen
;
607 ifsq
->ifsq_maxbcnt
= ifsq
->ifsq_maxlen
* MCLBYTES
;
608 ifsq
->ifsq_prepended
= NULL
;
609 ifsq
->ifsq_started
= 0;
610 ifsq
->ifsq_hw_oactive
= 0;
611 ifsq_set_cpuid(ifsq
, 0);
612 if (ifp
->if_serializer
!= NULL
)
613 ifsq_set_hw_serialize(ifsq
, ifp
->if_serializer
);
615 /* XXX: netisr_ncpus */
617 kmalloc_cachealign(ncpus
* sizeof(struct ifsubq_stage
),
618 M_DEVBUF
, M_WAITOK
| M_ZERO
);
619 for (i
= 0; i
< ncpus
; ++i
)
620 ifsq
->ifsq_stage
[i
].stg_subq
= ifsq
;
623 * Allocate one if_start message for each CPU, since
624 * the hardware TX ring could be assigned to any CPU.
627 * If the hardware TX ring polling CPU and the hardware
628 * TX ring interrupt CPU are same, one if_start message
631 ifsq
->ifsq_ifstart_nmsg
=
632 kmalloc(ncpus
* sizeof(struct netmsg_base
),
633 M_LWKTMSG
, M_WAITOK
);
634 for (i
= 0; i
< ncpus
; ++i
) {
635 netmsg_init(&ifsq
->ifsq_ifstart_nmsg
[i
], NULL
,
636 &netisr_adone_rport
, 0, ifsq_ifstart_dispatch
);
637 ifsq
->ifsq_ifstart_nmsg
[i
].lmsg
.u
.ms_resultp
= ifsq
;
640 ifq_set_classic(ifq
);
643 * Increase mbuf cluster/jcluster limits for the mbufs that
644 * could sit on the device queues for quite some time.
646 if (ifp
->if_nmbclusters
> 0)
647 mcl_inclimit(ifp
->if_nmbclusters
);
648 if (ifp
->if_nmbjclusters
> 0)
649 mjcl_inclimit(ifp
->if_nmbjclusters
);
652 * Install this ifp into ifindex2inet, ifnet queue and ifnet
653 * array after it is setup.
655 * Protect ifindex2ifnet, ifnet queue and ifnet array changes
656 * by ifnet lock, so that non-netisr threads could get a
661 /* Don't update if_index until ifindex2ifnet is setup */
662 ifp
->if_index
= if_index
+ 1;
663 sdl_addr
->sdl_index
= ifp
->if_index
;
666 * Install this ifp into ifindex2ifnet
668 if (ifindex2ifnet
== NULL
|| ifp
->if_index
>= if_indexlim
) {
676 n
= if_indexlim
* sizeof(*q
);
677 q
= kmalloc(n
, M_IFADDR
, M_WAITOK
| M_ZERO
);
678 if (ifindex2ifnet
!= NULL
) {
679 bcopy(ifindex2ifnet
, q
, n
/2);
680 /* Free old ifindex2ifnet after sync all netisrs */
681 old_ifindex2ifnet
= ifindex2ifnet
;
685 ifindex2ifnet
[ifp
->if_index
] = ifp
;
687 * Update if_index after this ifp is installed into ifindex2ifnet,
688 * so that netisrs could get a consistent view of ifindex2ifnet.
691 if_index
= ifp
->if_index
;
694 * Install this ifp into ifnet array.
696 /* Free old ifnet array after sync all netisrs */
697 old_ifnet_array
= ifnet_array
;
698 ifnet_array
= ifnet_array_add(ifp
, old_ifnet_array
);
701 * Install this ifp into ifnet queue.
703 TAILQ_INSERT_TAIL(&ifnetlist
, ifp
, if_link
);
708 * Sync all netisrs so that the old ifindex2ifnet and ifnet array
709 * are no longer accessed and we can free them safely later on.
711 netmsg_service_sync();
712 if (old_ifindex2ifnet
!= NULL
)
713 kfree(old_ifindex2ifnet
, M_IFADDR
);
714 ifnet_array_free(old_ifnet_array
);
716 if (!SLIST_EMPTY(&domains
))
717 if_attachdomain1(ifp
);
719 /* Announce the interface. */
720 EVENTHANDLER_INVOKE(ifnet_attach_event
, ifp
);
721 devctl_notify("IFNET", ifp
->if_xname
, "ATTACH", NULL
);
722 rt_ifannouncemsg(ifp
, IFAN_ARRIVAL
);
726 if_attachdomain(void *dummy
)
731 TAILQ_FOREACH(ifp
, &ifnetlist
, if_list
)
732 if_attachdomain1(ifp
);
735 SYSINIT(domainifattach
, SI_SUB_PROTO_IFATTACHDOMAIN
, SI_ORDER_FIRST
,
736 if_attachdomain
, NULL
);
739 if_attachdomain1(struct ifnet
*ifp
)
745 /* address family dependent data region */
746 bzero(ifp
->if_afdata
, sizeof(ifp
->if_afdata
));
747 SLIST_FOREACH(dp
, &domains
, dom_next
)
748 if (dp
->dom_ifattach
)
749 ifp
->if_afdata
[dp
->dom_family
] =
750 (*dp
->dom_ifattach
)(ifp
);
755 * Purge all addresses whose type is _not_ AF_LINK
758 if_purgeaddrs_nolink_dispatch(netmsg_t nmsg
)
760 struct ifnet
*ifp
= nmsg
->lmsg
.u
.ms_resultp
;
761 struct ifaddr_container
*ifac
, *next
;
766 * The ifaddr processing in the following loop will block,
767 * however, this function is called in netisr0, in which
768 * ifaddr list changes happen, so we don't care about the
769 * blockness of the ifaddr processing here.
771 TAILQ_FOREACH_MUTABLE(ifac
, &ifp
->if_addrheads
[mycpuid
],
773 struct ifaddr
*ifa
= ifac
->ifa
;
776 if (ifa
->ifa_addr
->sa_family
== AF_UNSPEC
)
779 /* Leave link ifaddr as it is */
780 if (ifa
->ifa_addr
->sa_family
== AF_LINK
)
783 /* XXX: Ugly!! ad hoc just for INET */
784 if (ifa
->ifa_addr
->sa_family
== AF_INET
) {
785 struct ifaliasreq ifr
;
786 struct sockaddr_in saved_addr
, saved_dst
;
787 #ifdef IFADDR_DEBUG_VERBOSE
790 kprintf("purge in4 addr %p: ", ifa
);
791 for (i
= 0; i
< ncpus
; ++i
) {
793 ifa
->ifa_containers
[i
].ifa_refcnt
);
798 /* Save information for panic. */
799 memcpy(&saved_addr
, ifa
->ifa_addr
, sizeof(saved_addr
));
800 if (ifa
->ifa_dstaddr
!= NULL
) {
801 memcpy(&saved_dst
, ifa
->ifa_dstaddr
,
804 memset(&saved_dst
, 0, sizeof(saved_dst
));
807 bzero(&ifr
, sizeof ifr
);
808 ifr
.ifra_addr
= *ifa
->ifa_addr
;
809 if (ifa
->ifa_dstaddr
)
810 ifr
.ifra_broadaddr
= *ifa
->ifa_dstaddr
;
811 if (in_control(SIOCDIFADDR
, (caddr_t
)&ifr
, ifp
,
815 /* MUST NOT HAPPEN */
816 panic("%s: in_control failed %x, dst %x", ifp
->if_xname
,
817 ntohl(saved_addr
.sin_addr
.s_addr
),
818 ntohl(saved_dst
.sin_addr
.s_addr
));
822 if (ifa
->ifa_addr
->sa_family
== AF_INET6
) {
823 #ifdef IFADDR_DEBUG_VERBOSE
826 kprintf("purge in6 addr %p: ", ifa
);
827 for (i
= 0; i
< ncpus
; ++i
) {
829 ifa
->ifa_containers
[i
].ifa_refcnt
);
835 /* ifp_addrhead is already updated */
839 if_printf(ifp
, "destroy ifaddr family %d\n",
840 ifa
->ifa_addr
->sa_family
);
841 ifa_ifunlink(ifa
, ifp
);
845 netisr_replymsg(&nmsg
->base
, 0);
849 if_purgeaddrs_nolink(struct ifnet
*ifp
)
851 struct netmsg_base nmsg
;
853 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
, 0,
854 if_purgeaddrs_nolink_dispatch
);
855 nmsg
.lmsg
.u
.ms_resultp
= ifp
;
856 netisr_domsg(&nmsg
, 0);
860 ifq_stage_detach_handler(netmsg_t nmsg
)
862 struct ifaltq
*ifq
= nmsg
->lmsg
.u
.ms_resultp
;
865 for (q
= 0; q
< ifq
->altq_subq_cnt
; ++q
) {
866 struct ifaltq_subque
*ifsq
= &ifq
->altq_subq
[q
];
867 struct ifsubq_stage
*stage
= ifsq_get_stage(ifsq
, mycpuid
);
869 if (stage
->stg_flags
& IFSQ_STAGE_FLAG_QUED
)
870 ifsq_stage_remove(&ifsubq_stage_heads
[mycpuid
], stage
);
872 lwkt_replymsg(&nmsg
->lmsg
, 0);
876 ifq_stage_detach(struct ifaltq
*ifq
)
878 struct netmsg_base base
;
881 netmsg_init(&base
, NULL
, &curthread
->td_msgport
, 0,
882 ifq_stage_detach_handler
);
883 base
.lmsg
.u
.ms_resultp
= ifq
;
885 /* XXX netisr_ncpus */
886 for (cpu
= 0; cpu
< ncpus
; ++cpu
)
887 lwkt_domsg(netisr_cpuport(cpu
), &base
.lmsg
, 0);
890 struct netmsg_if_rtdel
{
891 struct netmsg_base base
;
896 if_rtdel_dispatch(netmsg_t msg
)
898 struct netmsg_if_rtdel
*rmsg
= (void *)msg
;
902 ASSERT_NETISR_NCPUS(cpu
);
904 for (i
= 1; i
<= AF_MAX
; i
++) {
905 struct radix_node_head
*rnh
;
907 if ((rnh
= rt_tables
[cpu
][i
]) == NULL
)
909 rnh
->rnh_walktree(rnh
, if_rtdel
, rmsg
->ifp
);
911 netisr_forwardmsg(&msg
->base
, cpu
+ 1);
915 * Detach an interface, removing it from the
916 * list of "active" interfaces.
919 if_detach(struct ifnet
*ifp
)
921 struct ifnet_array
*old_ifnet_array
;
922 struct netmsg_if_rtdel msg
;
926 /* Announce that the interface is gone. */
927 EVENTHANDLER_INVOKE(ifnet_detach_event
, ifp
);
928 rt_ifannouncemsg(ifp
, IFAN_DEPARTURE
);
929 devctl_notify("IFNET", ifp
->if_xname
, "DETACH", NULL
);
932 * Remove this ifp from ifindex2inet, ifnet queue and ifnet
933 * array before it is whacked.
935 * Protect ifindex2ifnet, ifnet queue and ifnet array changes
936 * by ifnet lock, so that non-netisr threads could get a
942 * Remove this ifp from ifindex2ifnet and maybe decrement if_index.
944 ifindex2ifnet
[ifp
->if_index
] = NULL
;
945 while (if_index
> 0 && ifindex2ifnet
[if_index
] == NULL
)
949 * Remove this ifp from ifnet queue.
951 TAILQ_REMOVE(&ifnetlist
, ifp
, if_link
);
954 * Remove this ifp from ifnet array.
956 /* Free old ifnet array after sync all netisrs */
957 old_ifnet_array
= ifnet_array
;
958 ifnet_array
= ifnet_array_del(ifp
, old_ifnet_array
);
963 * Sync all netisrs so that the old ifnet array is no longer
964 * accessed and we can free it safely later on.
966 netmsg_service_sync();
967 ifnet_array_free(old_ifnet_array
);
970 * Remove routes and flush queues.
974 if (ifp
->if_flags
& IFF_NPOLLING
)
975 ifpoll_deregister(ifp
);
979 /* Decrease the mbuf clusters/jclusters limits increased by us */
980 if (ifp
->if_nmbclusters
> 0)
981 mcl_inclimit(-ifp
->if_nmbclusters
);
982 if (ifp
->if_nmbjclusters
> 0)
983 mjcl_inclimit(-ifp
->if_nmbjclusters
);
986 if (ifq_is_enabled(&ifp
->if_snd
))
987 altq_disable(&ifp
->if_snd
);
988 if (ifq_is_attached(&ifp
->if_snd
))
989 altq_detach(&ifp
->if_snd
);
993 * Clean up all addresses.
995 ifp
->if_lladdr
= NULL
;
997 if_purgeaddrs_nolink(ifp
);
998 if (!TAILQ_EMPTY(&ifp
->if_addrheads
[mycpuid
])) {
1001 ifa
= TAILQ_FIRST(&ifp
->if_addrheads
[mycpuid
])->ifa
;
1002 KASSERT(ifa
->ifa_addr
->sa_family
== AF_LINK
,
1003 ("non-link ifaddr is left on if_addrheads"));
1005 ifa_ifunlink(ifa
, ifp
);
1007 KASSERT(TAILQ_EMPTY(&ifp
->if_addrheads
[mycpuid
]),
1008 ("there are still ifaddrs left on if_addrheads"));
1013 * Remove all IPv4 kernel structures related to ifp.
1020 * Remove all IPv6 kernel structs related to ifp. This should be done
1021 * before removing routing entries below, since IPv6 interface direct
1022 * routes are expected to be removed by the IPv6-specific kernel API.
1023 * Otherwise, the kernel will detect some inconsistency and bark it.
1029 * Delete all remaining routes using this interface
1031 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
, MSGF_PRIORITY
,
1034 netisr_domsg_global(&msg
.base
);
1036 SLIST_FOREACH(dp
, &domains
, dom_next
)
1037 if (dp
->dom_ifdetach
&& ifp
->if_afdata
[dp
->dom_family
])
1038 (*dp
->dom_ifdetach
)(ifp
,
1039 ifp
->if_afdata
[dp
->dom_family
]);
1041 kfree(ifp
->if_addrheads
, M_IFADDR
);
1043 lwkt_synchronize_ipiqs("if_detach");
1044 ifq_stage_detach(&ifp
->if_snd
);
1046 for (q
= 0; q
< ifp
->if_snd
.altq_subq_cnt
; ++q
) {
1047 struct ifaltq_subque
*ifsq
= &ifp
->if_snd
.altq_subq
[q
];
1049 kfree(ifsq
->ifsq_ifstart_nmsg
, M_LWKTMSG
);
1050 kfree(ifsq
->ifsq_stage
, M_DEVBUF
);
1052 kfree(ifp
->if_snd
.altq_subq
, M_DEVBUF
);
1054 kfree(ifp
->if_data_pcpu
, M_DEVBUF
);
1060 * Create interface group without members
1063 if_creategroup(const char *groupname
)
1065 struct ifg_group
*ifg
= NULL
;
1067 if ((ifg
= (struct ifg_group
*)kmalloc(sizeof(struct ifg_group
),
1068 M_TEMP
, M_NOWAIT
)) == NULL
)
1071 strlcpy(ifg
->ifg_group
, groupname
, sizeof(ifg
->ifg_group
));
1072 ifg
->ifg_refcnt
= 0;
1073 ifg
->ifg_carp_demoted
= 0;
1074 TAILQ_INIT(&ifg
->ifg_members
);
1076 pfi_attach_ifgroup(ifg
);
1078 TAILQ_INSERT_TAIL(&ifg_head
, ifg
, ifg_next
);
1084 * Add a group to an interface
1087 if_addgroup(struct ifnet
*ifp
, const char *groupname
)
1089 struct ifg_list
*ifgl
;
1090 struct ifg_group
*ifg
= NULL
;
1091 struct ifg_member
*ifgm
;
1093 if (groupname
[0] && groupname
[strlen(groupname
) - 1] >= '0' &&
1094 groupname
[strlen(groupname
) - 1] <= '9')
1097 TAILQ_FOREACH(ifgl
, &ifp
->if_groups
, ifgl_next
)
1098 if (!strcmp(ifgl
->ifgl_group
->ifg_group
, groupname
))
1101 if ((ifgl
= kmalloc(sizeof(*ifgl
), M_TEMP
, M_NOWAIT
)) == NULL
)
1104 if ((ifgm
= kmalloc(sizeof(*ifgm
), M_TEMP
, M_NOWAIT
)) == NULL
) {
1105 kfree(ifgl
, M_TEMP
);
1109 TAILQ_FOREACH(ifg
, &ifg_head
, ifg_next
)
1110 if (!strcmp(ifg
->ifg_group
, groupname
))
1113 if (ifg
== NULL
&& (ifg
= if_creategroup(groupname
)) == NULL
) {
1114 kfree(ifgl
, M_TEMP
);
1115 kfree(ifgm
, M_TEMP
);
1120 ifgl
->ifgl_group
= ifg
;
1121 ifgm
->ifgm_ifp
= ifp
;
1123 TAILQ_INSERT_TAIL(&ifg
->ifg_members
, ifgm
, ifgm_next
);
1124 TAILQ_INSERT_TAIL(&ifp
->if_groups
, ifgl
, ifgl_next
);
1127 pfi_group_change(groupname
);
1134 * Remove a group from an interface
1137 if_delgroup(struct ifnet
*ifp
, const char *groupname
)
1139 struct ifg_list
*ifgl
;
1140 struct ifg_member
*ifgm
;
1142 TAILQ_FOREACH(ifgl
, &ifp
->if_groups
, ifgl_next
)
1143 if (!strcmp(ifgl
->ifgl_group
->ifg_group
, groupname
))
1148 TAILQ_REMOVE(&ifp
->if_groups
, ifgl
, ifgl_next
);
1150 TAILQ_FOREACH(ifgm
, &ifgl
->ifgl_group
->ifg_members
, ifgm_next
)
1151 if (ifgm
->ifgm_ifp
== ifp
)
1155 TAILQ_REMOVE(&ifgl
->ifgl_group
->ifg_members
, ifgm
, ifgm_next
);
1156 kfree(ifgm
, M_TEMP
);
1159 if (--ifgl
->ifgl_group
->ifg_refcnt
== 0) {
1160 TAILQ_REMOVE(&ifg_head
, ifgl
->ifgl_group
, ifg_next
);
1162 pfi_detach_ifgroup(ifgl
->ifgl_group
);
1164 kfree(ifgl
->ifgl_group
, M_TEMP
);
1167 kfree(ifgl
, M_TEMP
);
1170 pfi_group_change(groupname
);
1177 * Stores all groups from an interface in memory pointed
1181 if_getgroup(caddr_t data
, struct ifnet
*ifp
)
1184 struct ifg_list
*ifgl
;
1185 struct ifg_req ifgrq
, *ifgp
;
1186 struct ifgroupreq
*ifgr
= (struct ifgroupreq
*)data
;
1188 if (ifgr
->ifgr_len
== 0) {
1189 TAILQ_FOREACH(ifgl
, &ifp
->if_groups
, ifgl_next
)
1190 ifgr
->ifgr_len
+= sizeof(struct ifg_req
);
1194 len
= ifgr
->ifgr_len
;
1195 ifgp
= ifgr
->ifgr_groups
;
1196 TAILQ_FOREACH(ifgl
, &ifp
->if_groups
, ifgl_next
) {
1197 if (len
< sizeof(ifgrq
))
1199 bzero(&ifgrq
, sizeof ifgrq
);
1200 strlcpy(ifgrq
.ifgrq_group
, ifgl
->ifgl_group
->ifg_group
,
1201 sizeof(ifgrq
.ifgrq_group
));
1202 if ((error
= copyout((caddr_t
)&ifgrq
, (caddr_t
)ifgp
,
1203 sizeof(struct ifg_req
))))
1205 len
-= sizeof(ifgrq
);
1213 * Stores all members of a group in memory pointed to by data
1216 if_getgroupmembers(caddr_t data
)
1218 struct ifgroupreq
*ifgr
= (struct ifgroupreq
*)data
;
1219 struct ifg_group
*ifg
;
1220 struct ifg_member
*ifgm
;
1221 struct ifg_req ifgrq
, *ifgp
;
1224 TAILQ_FOREACH(ifg
, &ifg_head
, ifg_next
)
1225 if (!strcmp(ifg
->ifg_group
, ifgr
->ifgr_name
))
1230 if (ifgr
->ifgr_len
== 0) {
1231 TAILQ_FOREACH(ifgm
, &ifg
->ifg_members
, ifgm_next
)
1232 ifgr
->ifgr_len
+= sizeof(ifgrq
);
1236 len
= ifgr
->ifgr_len
;
1237 ifgp
= ifgr
->ifgr_groups
;
1238 TAILQ_FOREACH(ifgm
, &ifg
->ifg_members
, ifgm_next
) {
1239 if (len
< sizeof(ifgrq
))
1241 bzero(&ifgrq
, sizeof ifgrq
);
1242 strlcpy(ifgrq
.ifgrq_member
, ifgm
->ifgm_ifp
->if_xname
,
1243 sizeof(ifgrq
.ifgrq_member
));
1244 if ((error
= copyout((caddr_t
)&ifgrq
, (caddr_t
)ifgp
,
1245 sizeof(struct ifg_req
))))
1247 len
-= sizeof(ifgrq
);
1255 * Delete Routes for a Network Interface
1257 * Called for each routing entry via the rnh->rnh_walktree() call above
1258 * to delete all route entries referencing a detaching network interface.
1261 * rn pointer to node in the routing table
1262 * arg argument passed to rnh->rnh_walktree() - detaching interface
1266 * errno failed - reason indicated
1270 if_rtdel(struct radix_node
*rn
, void *arg
)
1272 struct rtentry
*rt
= (struct rtentry
*)rn
;
1273 struct ifnet
*ifp
= arg
;
1276 if (rt
->rt_ifp
== ifp
) {
1279 * Protect (sorta) against walktree recursion problems
1280 * with cloned routes
1282 if (!(rt
->rt_flags
& RTF_UP
))
1285 err
= rtrequest(RTM_DELETE
, rt_key(rt
), rt
->rt_gateway
,
1286 rt_mask(rt
), rt
->rt_flags
,
1289 log(LOG_WARNING
, "if_rtdel: error %d\n", err
);
1296 static __inline boolean_t
1297 ifa_prefer(const struct ifaddr
*cur_ifa
, const struct ifaddr
*old_ifa
)
1299 if (old_ifa
== NULL
)
1302 if ((old_ifa
->ifa_ifp
->if_flags
& IFF_UP
) == 0 &&
1303 (cur_ifa
->ifa_ifp
->if_flags
& IFF_UP
))
1305 if ((old_ifa
->ifa_flags
& IFA_ROUTE
) == 0 &&
1306 (cur_ifa
->ifa_flags
& IFA_ROUTE
))
1312 * Locate an interface based on a complete address.
1315 ifa_ifwithaddr(struct sockaddr
*addr
)
1317 const struct ifnet_array
*arr
;
1320 arr
= ifnet_array_get();
1321 for (i
= 0; i
< arr
->ifnet_count
; ++i
) {
1322 struct ifnet
*ifp
= arr
->ifnet_arr
[i
];
1323 struct ifaddr_container
*ifac
;
1325 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
1326 struct ifaddr
*ifa
= ifac
->ifa
;
1328 if (ifa
->ifa_addr
->sa_family
!= addr
->sa_family
)
1330 if (sa_equal(addr
, ifa
->ifa_addr
))
1332 if ((ifp
->if_flags
& IFF_BROADCAST
) &&
1333 ifa
->ifa_broadaddr
&&
1334 /* IPv6 doesn't have broadcast */
1335 ifa
->ifa_broadaddr
->sa_len
!= 0 &&
1336 sa_equal(ifa
->ifa_broadaddr
, addr
))
1344 * Locate the point to point interface with a given destination address.
1347 ifa_ifwithdstaddr(struct sockaddr
*addr
)
1349 const struct ifnet_array
*arr
;
1352 arr
= ifnet_array_get();
1353 for (i
= 0; i
< arr
->ifnet_count
; ++i
) {
1354 struct ifnet
*ifp
= arr
->ifnet_arr
[i
];
1355 struct ifaddr_container
*ifac
;
1357 if (!(ifp
->if_flags
& IFF_POINTOPOINT
))
1360 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
1361 struct ifaddr
*ifa
= ifac
->ifa
;
1363 if (ifa
->ifa_addr
->sa_family
!= addr
->sa_family
)
1365 if (ifa
->ifa_dstaddr
&&
1366 sa_equal(addr
, ifa
->ifa_dstaddr
))
1374 * Find an interface on a specific network. If many, choice
1375 * is most specific found.
1378 ifa_ifwithnet(struct sockaddr
*addr
)
1380 struct ifaddr
*ifa_maybe
= NULL
;
1381 u_int af
= addr
->sa_family
;
1382 char *addr_data
= addr
->sa_data
, *cplim
;
1383 const struct ifnet_array
*arr
;
1387 * AF_LINK addresses can be looked up directly by their index number,
1388 * so do that if we can.
1390 if (af
== AF_LINK
) {
1391 struct sockaddr_dl
*sdl
= (struct sockaddr_dl
*)addr
;
1393 if (sdl
->sdl_index
&& sdl
->sdl_index
<= if_index
)
1394 return (ifindex2ifnet
[sdl
->sdl_index
]->if_lladdr
);
1398 * Scan though each interface, looking for ones that have
1399 * addresses in this address family.
1401 arr
= ifnet_array_get();
1402 for (i
= 0; i
< arr
->ifnet_count
; ++i
) {
1403 struct ifnet
*ifp
= arr
->ifnet_arr
[i
];
1404 struct ifaddr_container
*ifac
;
1406 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
1407 struct ifaddr
*ifa
= ifac
->ifa
;
1408 char *cp
, *cp2
, *cp3
;
1410 if (ifa
->ifa_addr
->sa_family
!= af
)
1412 if (af
== AF_INET
&& ifp
->if_flags
& IFF_POINTOPOINT
) {
1414 * This is a bit broken as it doesn't
1415 * take into account that the remote end may
1416 * be a single node in the network we are
1418 * The trouble is that we don't know the
1419 * netmask for the remote end.
1421 if (ifa
->ifa_dstaddr
!= NULL
&&
1422 sa_equal(addr
, ifa
->ifa_dstaddr
))
1426 * if we have a special address handler,
1427 * then use it instead of the generic one.
1429 if (ifa
->ifa_claim_addr
) {
1430 if ((*ifa
->ifa_claim_addr
)(ifa
, addr
)) {
1438 * Scan all the bits in the ifa's address.
1439 * If a bit dissagrees with what we are
1440 * looking for, mask it with the netmask
1441 * to see if it really matters.
1442 * (A byte at a time)
1444 if (ifa
->ifa_netmask
== 0)
1447 cp2
= ifa
->ifa_addr
->sa_data
;
1448 cp3
= ifa
->ifa_netmask
->sa_data
;
1449 cplim
= ifa
->ifa_netmask
->sa_len
+
1450 (char *)ifa
->ifa_netmask
;
1452 if ((*cp
++ ^ *cp2
++) & *cp3
++)
1453 goto next
; /* next address! */
1455 * If the netmask of what we just found
1456 * is more specific than what we had before
1457 * (if we had one) then remember the new one
1458 * before continuing to search for an even
1459 * better one. If the netmasks are equal,
1460 * we prefer the this ifa based on the result
1463 if (ifa_maybe
== NULL
||
1464 rn_refines((char *)ifa
->ifa_netmask
,
1465 (char *)ifa_maybe
->ifa_netmask
) ||
1466 (sa_equal(ifa_maybe
->ifa_netmask
,
1467 ifa
->ifa_netmask
) &&
1468 ifa_prefer(ifa
, ifa_maybe
)))
1477 * Find an interface address specific to an interface best matching
1481 ifaof_ifpforaddr(struct sockaddr
*addr
, struct ifnet
*ifp
)
1483 struct ifaddr_container
*ifac
;
1484 char *cp
, *cp2
, *cp3
;
1486 struct ifaddr
*ifa_maybe
= NULL
;
1487 u_int af
= addr
->sa_family
;
1491 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
1492 struct ifaddr
*ifa
= ifac
->ifa
;
1494 if (ifa
->ifa_addr
->sa_family
!= af
)
1496 if (ifa_maybe
== NULL
)
1498 if (ifa
->ifa_netmask
== NULL
) {
1499 if (sa_equal(addr
, ifa
->ifa_addr
) ||
1500 (ifa
->ifa_dstaddr
!= NULL
&&
1501 sa_equal(addr
, ifa
->ifa_dstaddr
)))
1505 if (ifp
->if_flags
& IFF_POINTOPOINT
) {
1506 if (sa_equal(addr
, ifa
->ifa_dstaddr
))
1510 cp2
= ifa
->ifa_addr
->sa_data
;
1511 cp3
= ifa
->ifa_netmask
->sa_data
;
1512 cplim
= ifa
->ifa_netmask
->sa_len
+ (char *)ifa
->ifa_netmask
;
1513 for (; cp3
< cplim
; cp3
++)
1514 if ((*cp
++ ^ *cp2
++) & *cp3
)
1524 * Default action when installing a route with a Link Level gateway.
1525 * Lookup an appropriate real ifa to point to.
1526 * This should be moved to /sys/net/link.c eventually.
1529 link_rtrequest(int cmd
, struct rtentry
*rt
)
1532 struct sockaddr
*dst
;
1535 if (cmd
!= RTM_ADD
|| (ifa
= rt
->rt_ifa
) == NULL
||
1536 (ifp
= ifa
->ifa_ifp
) == NULL
|| (dst
= rt_key(rt
)) == NULL
)
1538 ifa
= ifaof_ifpforaddr(dst
, ifp
);
1540 IFAFREE(rt
->rt_ifa
);
1543 if (ifa
->ifa_rtrequest
&& ifa
->ifa_rtrequest
!= link_rtrequest
)
1544 ifa
->ifa_rtrequest(cmd
, rt
);
1548 struct netmsg_ifroute
{
1549 struct netmsg_base base
;
1556 * Mark an interface down and notify protocols of the transition.
1559 if_unroute_dispatch(netmsg_t nmsg
)
1561 struct netmsg_ifroute
*msg
= (struct netmsg_ifroute
*)nmsg
;
1562 struct ifnet
*ifp
= msg
->ifp
;
1563 int flag
= msg
->flag
, fam
= msg
->fam
;
1564 struct ifaddr_container
*ifac
;
1568 ifp
->if_flags
&= ~flag
;
1569 getmicrotime(&ifp
->if_lastchange
);
1571 * The ifaddr processing in the following loop will block,
1572 * however, this function is called in netisr0, in which
1573 * ifaddr list changes happen, so we don't care about the
1574 * blockness of the ifaddr processing here.
1576 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
1577 struct ifaddr
*ifa
= ifac
->ifa
;
1580 if (ifa
->ifa_addr
->sa_family
== AF_UNSPEC
)
1583 if (fam
== PF_UNSPEC
|| (fam
== ifa
->ifa_addr
->sa_family
))
1584 kpfctlinput(PRC_IFDOWN
, ifa
->ifa_addr
);
1586 ifq_purge_all(&ifp
->if_snd
);
1589 netisr_replymsg(&nmsg
->base
, 0);
1593 if_unroute(struct ifnet
*ifp
, int flag
, int fam
)
1595 struct netmsg_ifroute msg
;
1597 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
, 0,
1598 if_unroute_dispatch
);
1602 netisr_domsg(&msg
.base
, 0);
1606 * Mark an interface up and notify protocols of the transition.
1609 if_route_dispatch(netmsg_t nmsg
)
1611 struct netmsg_ifroute
*msg
= (struct netmsg_ifroute
*)nmsg
;
1612 struct ifnet
*ifp
= msg
->ifp
;
1613 int flag
= msg
->flag
, fam
= msg
->fam
;
1614 struct ifaddr_container
*ifac
;
1618 ifq_purge_all(&ifp
->if_snd
);
1619 ifp
->if_flags
|= flag
;
1620 getmicrotime(&ifp
->if_lastchange
);
1622 * The ifaddr processing in the following loop will block,
1623 * however, this function is called in netisr0, in which
1624 * ifaddr list changes happen, so we don't care about the
1625 * blockness of the ifaddr processing here.
1627 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
1628 struct ifaddr
*ifa
= ifac
->ifa
;
1631 if (ifa
->ifa_addr
->sa_family
== AF_UNSPEC
)
1634 if (fam
== PF_UNSPEC
|| (fam
== ifa
->ifa_addr
->sa_family
))
1635 kpfctlinput(PRC_IFUP
, ifa
->ifa_addr
);
1642 netisr_replymsg(&nmsg
->base
, 0);
1646 if_route(struct ifnet
*ifp
, int flag
, int fam
)
1648 struct netmsg_ifroute msg
;
1650 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
, 0,
1655 netisr_domsg(&msg
.base
, 0);
1659 * Mark an interface down and notify protocols of the transition. An
1660 * interface going down is also considered to be a synchronizing event.
1661 * We must ensure that all packet processing related to the interface
1662 * has completed before we return so e.g. the caller can free the ifnet
1663 * structure that the mbufs may be referencing.
1665 * NOTE: must be called at splnet or eqivalent.
1668 if_down(struct ifnet
*ifp
)
1670 if_unroute(ifp
, IFF_UP
, AF_UNSPEC
);
1671 netmsg_service_sync();
1675 * Mark an interface up and notify protocols of
1677 * NOTE: must be called at splnet or eqivalent.
1680 if_up(struct ifnet
*ifp
)
1682 if_route(ifp
, IFF_UP
, AF_UNSPEC
);
1686 * Process a link state change.
1687 * NOTE: must be called at splsoftnet or equivalent.
1690 if_link_state_change(struct ifnet
*ifp
)
1692 int link_state
= ifp
->if_link_state
;
1695 devctl_notify("IFNET", ifp
->if_xname
,
1696 (link_state
== LINK_STATE_UP
) ? "LINK_UP" : "LINK_DOWN", NULL
);
1700 * Handle interface watchdog timer routines. Called
1701 * from softclock, we decrement timers (if set) and
1702 * call the appropriate interface routine on expiration.
1705 if_slowtimo_dispatch(netmsg_t nmsg
)
1707 struct globaldata
*gd
= mycpu
;
1708 const struct ifnet_array
*arr
;
1714 lwkt_replymsg(&nmsg
->lmsg
, 0); /* reply ASAP */
1717 arr
= ifnet_array_get();
1718 for (i
= 0; i
< arr
->ifnet_count
; ++i
) {
1719 struct ifnet
*ifp
= arr
->ifnet_arr
[i
];
1723 if (if_stats_compat
) {
1724 IFNET_STAT_GET(ifp
, ipackets
, ifp
->if_ipackets
);
1725 IFNET_STAT_GET(ifp
, ierrors
, ifp
->if_ierrors
);
1726 IFNET_STAT_GET(ifp
, opackets
, ifp
->if_opackets
);
1727 IFNET_STAT_GET(ifp
, oerrors
, ifp
->if_oerrors
);
1728 IFNET_STAT_GET(ifp
, collisions
, ifp
->if_collisions
);
1729 IFNET_STAT_GET(ifp
, ibytes
, ifp
->if_ibytes
);
1730 IFNET_STAT_GET(ifp
, obytes
, ifp
->if_obytes
);
1731 IFNET_STAT_GET(ifp
, imcasts
, ifp
->if_imcasts
);
1732 IFNET_STAT_GET(ifp
, omcasts
, ifp
->if_omcasts
);
1733 IFNET_STAT_GET(ifp
, iqdrops
, ifp
->if_iqdrops
);
1734 IFNET_STAT_GET(ifp
, noproto
, ifp
->if_noproto
);
1735 IFNET_STAT_GET(ifp
, oqdrops
, ifp
->if_oqdrops
);
1738 if (ifp
->if_timer
== 0 || --ifp
->if_timer
) {
1742 if (ifp
->if_watchdog
) {
1743 if (ifnet_tryserialize_all(ifp
)) {
1744 (*ifp
->if_watchdog
)(ifp
);
1745 ifnet_deserialize_all(ifp
);
1747 /* try again next timeout */
1755 callout_reset(&if_slowtimo_timer
, hz
/ IFNET_SLOWHZ
, if_slowtimo
, NULL
);
1759 if_slowtimo(void *arg __unused
)
1761 struct lwkt_msg
*lmsg
= &if_slowtimo_netmsg
.lmsg
;
1763 KASSERT(mycpuid
== 0, ("not on cpu0"));
1765 if (lmsg
->ms_flags
& MSGF_DONE
)
1766 lwkt_sendmsg_oncpu(netisr_cpuport(0), lmsg
);
1771 * Map interface name to
1772 * interface structure pointer.
1775 ifunit(const char *name
)
1780 * Search all the interfaces for this name/number
1782 KASSERT(mtx_owned(&ifnet_mtx
), ("ifnet is not locked"));
1784 TAILQ_FOREACH(ifp
, &ifnetlist
, if_link
) {
1785 if (strncmp(ifp
->if_xname
, name
, IFNAMSIZ
) == 0)
1792 ifunit_netisr(const char *name
)
1794 const struct ifnet_array
*arr
;
1798 * Search all the interfaces for this name/number
1801 arr
= ifnet_array_get();
1802 for (i
= 0; i
< arr
->ifnet_count
; ++i
) {
1803 struct ifnet
*ifp
= arr
->ifnet_arr
[i
];
1805 if (strncmp(ifp
->if_xname
, name
, IFNAMSIZ
) == 0)
1815 ifioctl(struct socket
*so
, u_long cmd
, caddr_t data
, struct ucred
*cred
)
1820 int error
, do_ifup
= 0;
1823 size_t namelen
, onamelen
;
1824 char new_name
[IFNAMSIZ
];
1826 struct sockaddr_dl
*sdl
;
1831 return (ifconf(cmd
, data
, cred
));
1836 ifr
= (struct ifreq
*)data
;
1841 if ((error
= priv_check_cred(cred
, PRIV_ROOT
, 0)) != 0)
1843 return (if_clone_create(ifr
->ifr_name
, sizeof(ifr
->ifr_name
),
1844 cmd
== SIOCIFCREATE2
? ifr
->ifr_data
: NULL
));
1846 if ((error
= priv_check_cred(cred
, PRIV_ROOT
, 0)) != 0)
1848 return (if_clone_destroy(ifr
->ifr_name
));
1849 case SIOCIFGCLONERS
:
1850 return (if_clone_list((struct if_clonereq
*)data
));
1856 * Nominal ioctl through interface, lookup the ifp and obtain a
1857 * lock to serialize the ifconfig ioctl operation.
1861 ifp
= ifunit(ifr
->ifr_name
);
1870 ifr
->ifr_index
= ifp
->if_index
;
1874 ifr
->ifr_flags
= ifp
->if_flags
;
1875 ifr
->ifr_flagshigh
= ifp
->if_flags
>> 16;
1879 ifr
->ifr_reqcap
= ifp
->if_capabilities
;
1880 ifr
->ifr_curcap
= ifp
->if_capenable
;
1884 ifr
->ifr_metric
= ifp
->if_metric
;
1888 ifr
->ifr_mtu
= ifp
->if_mtu
;
1892 ifr
->ifr_tsolen
= ifp
->if_tsolen
;
1896 error
= copyout((caddr_t
)&ifp
->if_data
, ifr
->ifr_data
,
1897 sizeof(ifp
->if_data
));
1901 ifr
->ifr_phys
= ifp
->if_physical
;
1904 case SIOCGIFPOLLCPU
:
1905 ifr
->ifr_pollcpu
= -1;
1908 case SIOCSIFPOLLCPU
:
1912 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1915 new_flags
= (ifr
->ifr_flags
& 0xffff) |
1916 (ifr
->ifr_flagshigh
<< 16);
1917 if (ifp
->if_flags
& IFF_SMART
) {
1918 /* Smart drivers twiddle their own routes */
1919 } else if (ifp
->if_flags
& IFF_UP
&&
1920 (new_flags
& IFF_UP
) == 0) {
1922 } else if (new_flags
& IFF_UP
&&
1923 (ifp
->if_flags
& IFF_UP
) == 0) {
1927 #ifdef IFPOLL_ENABLE
1928 if ((new_flags
^ ifp
->if_flags
) & IFF_NPOLLING
) {
1929 if (new_flags
& IFF_NPOLLING
)
1930 ifpoll_register(ifp
);
1932 ifpoll_deregister(ifp
);
1936 ifp
->if_flags
= (ifp
->if_flags
& IFF_CANTCHANGE
) |
1937 (new_flags
&~ IFF_CANTCHANGE
);
1938 if (new_flags
& IFF_PPROMISC
) {
1939 /* Permanently promiscuous mode requested */
1940 ifp
->if_flags
|= IFF_PROMISC
;
1941 } else if (ifp
->if_pcount
== 0) {
1942 ifp
->if_flags
&= ~IFF_PROMISC
;
1944 if (ifp
->if_ioctl
) {
1945 ifnet_serialize_all(ifp
);
1946 ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
1947 ifnet_deserialize_all(ifp
);
1951 getmicrotime(&ifp
->if_lastchange
);
1955 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1958 if (ifr
->ifr_reqcap
& ~ifp
->if_capabilities
) {
1962 ifnet_serialize_all(ifp
);
1963 ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
1964 ifnet_deserialize_all(ifp
);
1968 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1971 error
= copyinstr(ifr
->ifr_data
, new_name
, IFNAMSIZ
, NULL
);
1974 if (new_name
[0] == '\0') {
1978 if (ifunit(new_name
) != NULL
) {
1983 EVENTHANDLER_INVOKE(ifnet_detach_event
, ifp
);
1985 /* Announce the departure of the interface. */
1986 rt_ifannouncemsg(ifp
, IFAN_DEPARTURE
);
1988 strlcpy(ifp
->if_xname
, new_name
, sizeof(ifp
->if_xname
));
1989 ifa
= TAILQ_FIRST(&ifp
->if_addrheads
[mycpuid
])->ifa
;
1990 sdl
= (struct sockaddr_dl
*)ifa
->ifa_addr
;
1991 namelen
= strlen(new_name
);
1992 onamelen
= sdl
->sdl_nlen
;
1994 * Move the address if needed. This is safe because we
1995 * allocate space for a name of length IFNAMSIZ when we
1996 * create this in if_attach().
1998 if (namelen
!= onamelen
) {
1999 bcopy(sdl
->sdl_data
+ onamelen
,
2000 sdl
->sdl_data
+ namelen
, sdl
->sdl_alen
);
2002 bcopy(new_name
, sdl
->sdl_data
, namelen
);
2003 sdl
->sdl_nlen
= namelen
;
2004 sdl
= (struct sockaddr_dl
*)ifa
->ifa_netmask
;
2005 bzero(sdl
->sdl_data
, onamelen
);
2006 while (namelen
!= 0)
2007 sdl
->sdl_data
[--namelen
] = 0xff;
2009 EVENTHANDLER_INVOKE(ifnet_attach_event
, ifp
);
2011 /* Announce the return of the interface. */
2012 rt_ifannouncemsg(ifp
, IFAN_ARRIVAL
);
2016 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
2019 ifp
->if_metric
= ifr
->ifr_metric
;
2020 getmicrotime(&ifp
->if_lastchange
);
2024 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
2027 if (ifp
->if_ioctl
== NULL
) {
2031 ifnet_serialize_all(ifp
);
2032 error
= ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
2033 ifnet_deserialize_all(ifp
);
2035 getmicrotime(&ifp
->if_lastchange
);
2040 u_long oldmtu
= ifp
->if_mtu
;
2042 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
2045 if (ifp
->if_ioctl
== NULL
) {
2049 if (ifr
->ifr_mtu
< IF_MINMTU
|| ifr
->ifr_mtu
> IF_MAXMTU
) {
2053 ifnet_serialize_all(ifp
);
2054 error
= ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
2055 ifnet_deserialize_all(ifp
);
2057 getmicrotime(&ifp
->if_lastchange
);
2061 * If the link MTU changed, do network layer specific procedure.
2063 if (ifp
->if_mtu
!= oldmtu
) {
2072 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
2076 /* XXX need driver supplied upper limit */
2077 if (ifr
->ifr_tsolen
<= 0) {
2081 ifp
->if_tsolen
= ifr
->ifr_tsolen
;
2086 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
2090 /* Don't allow group membership on non-multicast interfaces. */
2091 if ((ifp
->if_flags
& IFF_MULTICAST
) == 0) {
2096 /* Don't let users screw up protocols' entries. */
2097 if (ifr
->ifr_addr
.sa_family
!= AF_LINK
) {
2102 if (cmd
== SIOCADDMULTI
) {
2103 struct ifmultiaddr
*ifma
;
2104 error
= if_addmulti(ifp
, &ifr
->ifr_addr
, &ifma
);
2106 error
= if_delmulti(ifp
, &ifr
->ifr_addr
);
2109 getmicrotime(&ifp
->if_lastchange
);
2112 case SIOCSIFPHYADDR
:
2113 case SIOCDIFPHYADDR
:
2115 case SIOCSIFPHYADDR_IN6
:
2117 case SIOCSLIFPHYADDR
:
2119 case SIOCSIFGENERIC
:
2120 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
2123 if (ifp
->if_ioctl
== 0) {
2127 ifnet_serialize_all(ifp
);
2128 error
= ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
2129 ifnet_deserialize_all(ifp
);
2131 getmicrotime(&ifp
->if_lastchange
);
2135 ifs
= (struct ifstat
*)data
;
2136 ifs
->ascii
[0] = '\0';
2138 case SIOCGIFPSRCADDR
:
2139 case SIOCGIFPDSTADDR
:
2140 case SIOCGLIFPHYADDR
:
2142 case SIOCGIFGENERIC
:
2143 if (ifp
->if_ioctl
== NULL
) {
2147 ifnet_serialize_all(ifp
);
2148 error
= ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
2149 ifnet_deserialize_all(ifp
);
2153 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
2156 error
= if_setlladdr(ifp
, ifr
->ifr_addr
.sa_data
,
2157 ifr
->ifr_addr
.sa_len
);
2158 EVENTHANDLER_INVOKE(iflladdr_event
, ifp
);
2162 oif_flags
= ifp
->if_flags
;
2163 if (so
->so_proto
== 0) {
2167 error
= so_pru_control_direct(so
, cmd
, data
, ifp
);
2169 if ((oif_flags
^ ifp
->if_flags
) & IFF_UP
) {
2171 DELAY(100);/* XXX: temporary workaround for fxp issue*/
2172 if (ifp
->if_flags
& IFF_UP
) {
2187 * Set/clear promiscuous mode on interface ifp based on the truth value
2188 * of pswitch. The calls are reference counted so that only the first
2189 * "on" request actually has an effect, as does the final "off" request.
2190 * Results are undefined if the "off" and "on" requests are not matched.
2193 ifpromisc(struct ifnet
*ifp
, int pswitch
)
2199 oldflags
= ifp
->if_flags
;
2200 if (ifp
->if_flags
& IFF_PPROMISC
) {
2201 /* Do nothing if device is in permanently promiscuous mode */
2202 ifp
->if_pcount
+= pswitch
? 1 : -1;
2207 * If the device is not configured up, we cannot put it in
2210 if ((ifp
->if_flags
& IFF_UP
) == 0)
2212 if (ifp
->if_pcount
++ != 0)
2214 ifp
->if_flags
|= IFF_PROMISC
;
2215 log(LOG_INFO
, "%s: promiscuous mode enabled\n",
2218 if (--ifp
->if_pcount
> 0)
2220 ifp
->if_flags
&= ~IFF_PROMISC
;
2221 log(LOG_INFO
, "%s: promiscuous mode disabled\n",
2224 ifr
.ifr_flags
= ifp
->if_flags
;
2225 ifr
.ifr_flagshigh
= ifp
->if_flags
>> 16;
2226 ifnet_serialize_all(ifp
);
2227 error
= ifp
->if_ioctl(ifp
, SIOCSIFFLAGS
, (caddr_t
)&ifr
, NULL
);
2228 ifnet_deserialize_all(ifp
);
2232 ifp
->if_flags
= oldflags
;
2237 * Return interface configuration
2238 * of system. List may be used
2239 * in later ioctl's (above) to get
2240 * other information.
2243 ifconf(u_long cmd
, caddr_t data
, struct ucred
*cred
)
2245 struct ifconf
*ifc
= (struct ifconf
*)data
;
2247 struct sockaddr
*sa
;
2248 struct ifreq ifr
, *ifrp
;
2249 int space
= ifc
->ifc_len
, error
= 0;
2251 ifrp
= ifc
->ifc_req
;
2254 TAILQ_FOREACH(ifp
, &ifnetlist
, if_link
) {
2255 struct ifaddr_container
*ifac
, *ifac_mark
;
2256 struct ifaddr_marker mark
;
2257 struct ifaddrhead
*head
;
2260 if (space
<= sizeof ifr
)
2264 * Zero the stack declared structure first to prevent
2265 * memory disclosure.
2267 bzero(&ifr
, sizeof(ifr
));
2268 if (strlcpy(ifr
.ifr_name
, ifp
->if_xname
, sizeof(ifr
.ifr_name
))
2269 >= sizeof(ifr
.ifr_name
)) {
2270 error
= ENAMETOOLONG
;
2275 * Add a marker, since copyout() could block and during that
2276 * period the list could be changed. Inserting the marker to
2277 * the header of the list will not cause trouble for the code
2278 * assuming that the first element of the list is AF_LINK; the
2279 * marker will be moved to the next position w/o blocking.
2281 ifa_marker_init(&mark
, ifp
);
2282 ifac_mark
= &mark
.ifac
;
2283 head
= &ifp
->if_addrheads
[mycpuid
];
2286 TAILQ_INSERT_HEAD(head
, ifac_mark
, ifa_link
);
2287 while ((ifac
= TAILQ_NEXT(ifac_mark
, ifa_link
)) != NULL
) {
2288 struct ifaddr
*ifa
= ifac
->ifa
;
2290 TAILQ_REMOVE(head
, ifac_mark
, ifa_link
);
2291 TAILQ_INSERT_AFTER(head
, ifac
, ifac_mark
, ifa_link
);
2294 if (ifa
->ifa_addr
->sa_family
== AF_UNSPEC
)
2297 if (space
<= sizeof ifr
)
2300 if (cred
->cr_prison
&&
2301 prison_if(cred
, sa
))
2305 * Keep a reference on this ifaddr, so that it will
2306 * not be destroyed when its address is copied to
2307 * the userland, which could block.
2310 if (sa
->sa_len
<= sizeof(*sa
)) {
2312 error
= copyout(&ifr
, ifrp
, sizeof ifr
);
2315 if (space
< (sizeof ifr
) + sa
->sa_len
-
2320 space
-= sa
->sa_len
- sizeof(*sa
);
2321 error
= copyout(&ifr
, ifrp
,
2322 sizeof ifr
.ifr_name
);
2324 error
= copyout(sa
, &ifrp
->ifr_addr
,
2326 ifrp
= (struct ifreq
*)
2327 (sa
->sa_len
+ (caddr_t
)&ifrp
->ifr_addr
);
2332 space
-= sizeof ifr
;
2334 TAILQ_REMOVE(head
, ifac_mark
, ifa_link
);
2338 bzero(&ifr
.ifr_addr
, sizeof ifr
.ifr_addr
);
2339 error
= copyout(&ifr
, ifrp
, sizeof ifr
);
2342 space
-= sizeof ifr
;
2348 ifc
->ifc_len
-= space
;
2353 * Just like if_promisc(), but for all-multicast-reception mode.
2356 if_allmulti(struct ifnet
*ifp
, int onswitch
)
2364 if (ifp
->if_amcount
++ == 0) {
2365 ifp
->if_flags
|= IFF_ALLMULTI
;
2366 ifr
.ifr_flags
= ifp
->if_flags
;
2367 ifr
.ifr_flagshigh
= ifp
->if_flags
>> 16;
2368 ifnet_serialize_all(ifp
);
2369 error
= ifp
->if_ioctl(ifp
, SIOCSIFFLAGS
, (caddr_t
)&ifr
,
2371 ifnet_deserialize_all(ifp
);
2374 if (ifp
->if_amcount
> 1) {
2377 ifp
->if_amcount
= 0;
2378 ifp
->if_flags
&= ~IFF_ALLMULTI
;
2379 ifr
.ifr_flags
= ifp
->if_flags
;
2380 ifr
.ifr_flagshigh
= ifp
->if_flags
>> 16;
2381 ifnet_serialize_all(ifp
);
2382 error
= ifp
->if_ioctl(ifp
, SIOCSIFFLAGS
, (caddr_t
)&ifr
,
2384 ifnet_deserialize_all(ifp
);
2396 * Add a multicast listenership to the interface in question.
2397 * The link layer provides a routine which converts
2400 if_addmulti_serialized(struct ifnet
*ifp
, struct sockaddr
*sa
,
2401 struct ifmultiaddr
**retifma
)
2403 struct sockaddr
*llsa
, *dupsa
;
2405 struct ifmultiaddr
*ifma
;
2407 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
2410 * If the matching multicast address already exists
2411 * then don't add a new one, just add a reference
2413 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
2414 if (sa_equal(sa
, ifma
->ifma_addr
)) {
2415 ifma
->ifma_refcount
++;
2423 * Give the link layer a chance to accept/reject it, and also
2424 * find out which AF_LINK address this maps to, if it isn't one
2427 if (ifp
->if_resolvemulti
) {
2428 error
= ifp
->if_resolvemulti(ifp
, &llsa
, sa
);
2435 ifma
= kmalloc(sizeof *ifma
, M_IFMADDR
, M_INTWAIT
);
2436 dupsa
= kmalloc(sa
->sa_len
, M_IFMADDR
, M_INTWAIT
);
2437 bcopy(sa
, dupsa
, sa
->sa_len
);
2439 ifma
->ifma_addr
= dupsa
;
2440 ifma
->ifma_lladdr
= llsa
;
2441 ifma
->ifma_ifp
= ifp
;
2442 ifma
->ifma_refcount
= 1;
2443 ifma
->ifma_protospec
= NULL
;
2444 rt_newmaddrmsg(RTM_NEWMADDR
, ifma
);
2446 TAILQ_INSERT_HEAD(&ifp
->if_multiaddrs
, ifma
, ifma_link
);
2451 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
2452 if (sa_equal(ifma
->ifma_addr
, llsa
))
2456 ifma
->ifma_refcount
++;
2458 ifma
= kmalloc(sizeof *ifma
, M_IFMADDR
, M_INTWAIT
);
2459 dupsa
= kmalloc(llsa
->sa_len
, M_IFMADDR
, M_INTWAIT
);
2460 bcopy(llsa
, dupsa
, llsa
->sa_len
);
2461 ifma
->ifma_addr
= dupsa
;
2462 ifma
->ifma_ifp
= ifp
;
2463 ifma
->ifma_refcount
= 1;
2464 TAILQ_INSERT_HEAD(&ifp
->if_multiaddrs
, ifma
, ifma_link
);
2468 * We are certain we have added something, so call down to the
2469 * interface to let them know about it.
2472 ifp
->if_ioctl(ifp
, SIOCADDMULTI
, 0, NULL
);
2478 if_addmulti(struct ifnet
*ifp
, struct sockaddr
*sa
,
2479 struct ifmultiaddr
**retifma
)
2483 ifnet_serialize_all(ifp
);
2484 error
= if_addmulti_serialized(ifp
, sa
, retifma
);
2485 ifnet_deserialize_all(ifp
);
2491 * Remove a reference to a multicast address on this interface. Yell
2492 * if the request does not match an existing membership.
2495 if_delmulti_serialized(struct ifnet
*ifp
, struct sockaddr
*sa
)
2497 struct ifmultiaddr
*ifma
;
2499 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
2501 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
)
2502 if (sa_equal(sa
, ifma
->ifma_addr
))
2507 if (ifma
->ifma_refcount
> 1) {
2508 ifma
->ifma_refcount
--;
2512 rt_newmaddrmsg(RTM_DELMADDR
, ifma
);
2513 sa
= ifma
->ifma_lladdr
;
2514 TAILQ_REMOVE(&ifp
->if_multiaddrs
, ifma
, ifma_link
);
2516 * Make sure the interface driver is notified
2517 * in the case of a link layer mcast group being left.
2519 if (ifma
->ifma_addr
->sa_family
== AF_LINK
&& sa
== NULL
)
2520 ifp
->if_ioctl(ifp
, SIOCDELMULTI
, 0, NULL
);
2521 kfree(ifma
->ifma_addr
, M_IFMADDR
);
2522 kfree(ifma
, M_IFMADDR
);
2527 * Now look for the link-layer address which corresponds to
2528 * this network address. It had been squirreled away in
2529 * ifma->ifma_lladdr for this purpose (so we don't have
2530 * to call ifp->if_resolvemulti() again), and we saved that
2531 * value in sa above. If some nasty deleted the
2532 * link-layer address out from underneath us, we can deal because
2533 * the address we stored was is not the same as the one which was
2534 * in the record for the link-layer address. (So we don't complain
2537 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
)
2538 if (sa_equal(sa
, ifma
->ifma_addr
))
2543 if (ifma
->ifma_refcount
> 1) {
2544 ifma
->ifma_refcount
--;
2548 TAILQ_REMOVE(&ifp
->if_multiaddrs
, ifma
, ifma_link
);
2549 ifp
->if_ioctl(ifp
, SIOCDELMULTI
, 0, NULL
);
2550 kfree(ifma
->ifma_addr
, M_IFMADDR
);
2551 kfree(sa
, M_IFMADDR
);
2552 kfree(ifma
, M_IFMADDR
);
2558 if_delmulti(struct ifnet
*ifp
, struct sockaddr
*sa
)
2562 ifnet_serialize_all(ifp
);
2563 error
= if_delmulti_serialized(ifp
, sa
);
2564 ifnet_deserialize_all(ifp
);
2570 * Delete all multicast group membership for an interface.
2571 * Should be used to quickly flush all multicast filters.
2574 if_delallmulti_serialized(struct ifnet
*ifp
)
2576 struct ifmultiaddr
*ifma
, mark
;
2579 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
2581 bzero(&sa
, sizeof(sa
));
2582 sa
.sa_family
= AF_UNSPEC
;
2583 sa
.sa_len
= sizeof(sa
);
2585 bzero(&mark
, sizeof(mark
));
2586 mark
.ifma_addr
= &sa
;
2588 TAILQ_INSERT_HEAD(&ifp
->if_multiaddrs
, &mark
, ifma_link
);
2589 while ((ifma
= TAILQ_NEXT(&mark
, ifma_link
)) != NULL
) {
2590 TAILQ_REMOVE(&ifp
->if_multiaddrs
, &mark
, ifma_link
);
2591 TAILQ_INSERT_AFTER(&ifp
->if_multiaddrs
, ifma
, &mark
,
2594 if (ifma
->ifma_addr
->sa_family
== AF_UNSPEC
)
2597 if_delmulti_serialized(ifp
, ifma
->ifma_addr
);
2599 TAILQ_REMOVE(&ifp
->if_multiaddrs
, &mark
, ifma_link
);
2604 * Set the link layer address on an interface.
2606 * At this time we only support certain types of interfaces,
2607 * and we don't allow the length of the address to change.
2610 if_setlladdr(struct ifnet
*ifp
, const u_char
*lladdr
, int len
)
2612 struct sockaddr_dl
*sdl
;
2615 sdl
= IF_LLSOCKADDR(ifp
);
2618 if (len
!= sdl
->sdl_alen
) /* don't allow length to change */
2620 switch (ifp
->if_type
) {
2621 case IFT_ETHER
: /* these types use struct arpcom */
2624 case IFT_IEEE8023ADLAG
:
2625 bcopy(lladdr
, ((struct arpcom
*)ifp
->if_softc
)->ac_enaddr
, len
);
2626 bcopy(lladdr
, LLADDR(sdl
), len
);
2632 * If the interface is already up, we need
2633 * to re-init it in order to reprogram its
2636 ifnet_serialize_all(ifp
);
2637 if ((ifp
->if_flags
& IFF_UP
) != 0) {
2639 struct ifaddr_container
*ifac
;
2642 ifp
->if_flags
&= ~IFF_UP
;
2643 ifr
.ifr_flags
= ifp
->if_flags
;
2644 ifr
.ifr_flagshigh
= ifp
->if_flags
>> 16;
2645 ifp
->if_ioctl(ifp
, SIOCSIFFLAGS
, (caddr_t
)&ifr
,
2647 ifp
->if_flags
|= IFF_UP
;
2648 ifr
.ifr_flags
= ifp
->if_flags
;
2649 ifr
.ifr_flagshigh
= ifp
->if_flags
>> 16;
2650 ifp
->if_ioctl(ifp
, SIOCSIFFLAGS
, (caddr_t
)&ifr
,
2654 * Also send gratuitous ARPs to notify other nodes about
2655 * the address change.
2657 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
2658 struct ifaddr
*ifa
= ifac
->ifa
;
2660 if (ifa
->ifa_addr
!= NULL
&&
2661 ifa
->ifa_addr
->sa_family
== AF_INET
)
2662 arp_gratuitous(ifp
, ifa
);
2666 ifnet_deserialize_all(ifp
);
2670 struct ifmultiaddr
*
2671 ifmaof_ifpforaddr(struct sockaddr
*sa
, struct ifnet
*ifp
)
2673 struct ifmultiaddr
*ifma
;
2675 /* TODO: need ifnet_serialize_main */
2676 ifnet_serialize_all(ifp
);
2677 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
)
2678 if (sa_equal(ifma
->ifma_addr
, sa
))
2680 ifnet_deserialize_all(ifp
);
2686 * This function locates the first real ethernet MAC from a network
2687 * card and loads it into node, returning 0 on success or ENOENT if
2688 * no suitable interfaces were found. It is used by the uuid code to
2689 * generate a unique 6-byte number.
2692 if_getanyethermac(uint16_t *node
, int minlen
)
2695 struct sockaddr_dl
*sdl
;
2698 TAILQ_FOREACH(ifp
, &ifnetlist
, if_link
) {
2699 if (ifp
->if_type
!= IFT_ETHER
)
2701 sdl
= IF_LLSOCKADDR(ifp
);
2702 if (sdl
->sdl_alen
< minlen
)
2704 bcopy(((struct arpcom
*)ifp
->if_softc
)->ac_enaddr
, node
,
2714 * The name argument must be a pointer to storage which will last as
2715 * long as the interface does. For physical devices, the result of
2716 * device_get_name(dev) is a good choice and for pseudo-devices a
2717 * static string works well.
2720 if_initname(struct ifnet
*ifp
, const char *name
, int unit
)
2722 ifp
->if_dname
= name
;
2723 ifp
->if_dunit
= unit
;
2724 if (unit
!= IF_DUNIT_NONE
)
2725 ksnprintf(ifp
->if_xname
, IFNAMSIZ
, "%s%d", name
, unit
);
2727 strlcpy(ifp
->if_xname
, name
, IFNAMSIZ
);
2731 if_printf(struct ifnet
*ifp
, const char *fmt
, ...)
2736 retval
= kprintf("%s: ", ifp
->if_xname
);
2737 __va_start(ap
, fmt
);
2738 retval
+= kvprintf(fmt
, ap
);
2744 if_alloc(uint8_t type
)
2750 * XXX temporary hack until arpcom is setup in if_l2com
2752 if (type
== IFT_ETHER
)
2753 size
= sizeof(struct arpcom
);
2755 size
= sizeof(struct ifnet
);
2757 ifp
= kmalloc(size
, M_IFNET
, M_WAITOK
|M_ZERO
);
2759 ifp
->if_type
= type
;
2761 if (if_com_alloc
[type
] != NULL
) {
2762 ifp
->if_l2com
= if_com_alloc
[type
](type
, ifp
);
2763 if (ifp
->if_l2com
== NULL
) {
2764 kfree(ifp
, M_IFNET
);
2772 if_free(struct ifnet
*ifp
)
2774 kfree(ifp
, M_IFNET
);
2778 ifq_set_classic(struct ifaltq
*ifq
)
2780 ifq_set_methods(ifq
, ifq
->altq_ifp
->if_mapsubq
,
2781 ifsq_classic_enqueue
, ifsq_classic_dequeue
, ifsq_classic_request
);
2785 ifq_set_methods(struct ifaltq
*ifq
, altq_mapsubq_t mapsubq
,
2786 ifsq_enqueue_t enqueue
, ifsq_dequeue_t dequeue
, ifsq_request_t request
)
2790 KASSERT(mapsubq
!= NULL
, ("mapsubq is not specified"));
2791 KASSERT(enqueue
!= NULL
, ("enqueue is not specified"));
2792 KASSERT(dequeue
!= NULL
, ("dequeue is not specified"));
2793 KASSERT(request
!= NULL
, ("request is not specified"));
2795 ifq
->altq_mapsubq
= mapsubq
;
2796 for (q
= 0; q
< ifq
->altq_subq_cnt
; ++q
) {
2797 struct ifaltq_subque
*ifsq
= &ifq
->altq_subq
[q
];
2799 ifsq
->ifsq_enqueue
= enqueue
;
2800 ifsq
->ifsq_dequeue
= dequeue
;
2801 ifsq
->ifsq_request
= request
;
2806 ifsq_norm_enqueue(struct ifaltq_subque
*ifsq
, struct mbuf
*m
)
2809 classq_add(&ifsq
->ifsq_norm
, m
);
2810 ALTQ_SQ_CNTR_INC(ifsq
, m
->m_pkthdr
.len
);
2814 ifsq_prio_enqueue(struct ifaltq_subque
*ifsq
, struct mbuf
*m
)
2817 classq_add(&ifsq
->ifsq_prio
, m
);
2818 ALTQ_SQ_CNTR_INC(ifsq
, m
->m_pkthdr
.len
);
2819 ALTQ_SQ_PRIO_CNTR_INC(ifsq
, m
->m_pkthdr
.len
);
2822 static struct mbuf
*
2823 ifsq_norm_dequeue(struct ifaltq_subque
*ifsq
)
2827 m
= classq_get(&ifsq
->ifsq_norm
);
2829 ALTQ_SQ_CNTR_DEC(ifsq
, m
->m_pkthdr
.len
);
2833 static struct mbuf
*
2834 ifsq_prio_dequeue(struct ifaltq_subque
*ifsq
)
2838 m
= classq_get(&ifsq
->ifsq_prio
);
2840 ALTQ_SQ_CNTR_DEC(ifsq
, m
->m_pkthdr
.len
);
2841 ALTQ_SQ_PRIO_CNTR_DEC(ifsq
, m
->m_pkthdr
.len
);
2847 ifsq_classic_enqueue(struct ifaltq_subque
*ifsq
, struct mbuf
*m
,
2848 struct altq_pktattr
*pa __unused
)
2853 if (ifsq
->ifsq_len
>= ifsq
->ifsq_maxlen
||
2854 ifsq
->ifsq_bcnt
>= ifsq
->ifsq_maxbcnt
) {
2855 struct mbuf
*m_drop
;
2857 if (m
->m_flags
& M_PRIO
) {
2859 if (ifsq
->ifsq_prio_len
< (ifsq
->ifsq_maxlen
>> 1) &&
2860 ifsq
->ifsq_prio_bcnt
< (ifsq
->ifsq_maxbcnt
>> 1)) {
2861 /* Try dropping some from normal queue. */
2862 m_drop
= ifsq_norm_dequeue(ifsq
);
2865 m_drop
= ifsq_prio_dequeue(ifsq
);
2867 m_drop
= ifsq_norm_dequeue(ifsq
);
2869 if (m_drop
!= NULL
) {
2870 IFNET_STAT_INC(ifsq
->ifsq_ifp
, oqdrops
, 1);
2875 * No old packets could be dropped!
2876 * NOTE: Caller increases oqdrops.
2881 if (m
->m_flags
& M_PRIO
)
2882 ifsq_prio_enqueue(ifsq
, m
);
2884 ifsq_norm_enqueue(ifsq
, m
);
2890 ifsq_classic_dequeue(struct ifaltq_subque
*ifsq
, int op
)
2896 m
= classq_head(&ifsq
->ifsq_prio
);
2898 m
= classq_head(&ifsq
->ifsq_norm
);
2902 m
= ifsq_prio_dequeue(ifsq
);
2904 m
= ifsq_norm_dequeue(ifsq
);
2908 panic("unsupported ALTQ dequeue op: %d", op
);
2914 ifsq_classic_request(struct ifaltq_subque
*ifsq
, int req
, void *arg
)
2921 m
= ifsq_classic_dequeue(ifsq
, ALTDQ_REMOVE
);
2929 panic("unsupported ALTQ request: %d", req
);
2935 ifsq_ifstart_try(struct ifaltq_subque
*ifsq
, int force_sched
)
2937 struct ifnet
*ifp
= ifsq_get_ifp(ifsq
);
2938 int running
= 0, need_sched
;
2941 * Try to do direct ifnet.if_start on the subqueue first, if there is
2942 * contention on the subqueue hardware serializer, ifnet.if_start on
2943 * the subqueue will be scheduled on the subqueue owner CPU.
2945 if (!ifsq_tryserialize_hw(ifsq
)) {
2947 * Subqueue hardware serializer contention happened,
2948 * ifnet.if_start on the subqueue is scheduled on
2949 * the subqueue owner CPU, and we keep going.
2951 ifsq_ifstart_schedule(ifsq
, 1);
2955 if ((ifp
->if_flags
& IFF_RUNNING
) && !ifsq_is_oactive(ifsq
)) {
2956 ifp
->if_start(ifp
, ifsq
);
2957 if ((ifp
->if_flags
& IFF_RUNNING
) && !ifsq_is_oactive(ifsq
))
2960 need_sched
= ifsq_ifstart_need_schedule(ifsq
, running
);
2962 ifsq_deserialize_hw(ifsq
);
2966 * More data need to be transmitted, ifnet.if_start on the
2967 * subqueue is scheduled on the subqueue owner CPU, and we
2969 * NOTE: ifnet.if_start subqueue interlock is not released.
2971 ifsq_ifstart_schedule(ifsq
, force_sched
);
2976 * Subqeue packets staging mechanism:
2978 * The packets enqueued into the subqueue are staged to a certain amount
2979 * before the ifnet.if_start on the subqueue is called. In this way, the
2980 * driver could avoid writing to hardware registers upon every packet,
2981 * instead, hardware registers could be written when certain amount of
2982 * packets are put onto hardware TX ring. The measurement on several modern
2983 * NICs (emx(4), igb(4), bnx(4), bge(4), jme(4)) shows that the hardware
2984 * registers writing aggregation could save ~20% CPU time when 18bytes UDP
2985 * datagrams are transmitted at 1.48Mpps. The performance improvement by
2986 * hardware registers writing aggeregation is also mentioned by Luigi Rizzo's
2987 * netmap paper (http://info.iet.unipi.it/~luigi/netmap/).
2989 * Subqueue packets staging is performed for two entry points into drivers'
2990 * transmission function:
2991 * - Direct ifnet.if_start calling on the subqueue, i.e. ifsq_ifstart_try()
2992 * - ifnet.if_start scheduling on the subqueue, i.e. ifsq_ifstart_schedule()
2994 * Subqueue packets staging will be stopped upon any of the following
2996 * - If the count of packets enqueued on the current CPU is great than or
2997 * equal to ifsq_stage_cntmax. (XXX this should be per-interface)
2998 * - If the total length of packets enqueued on the current CPU is great
2999 * than or equal to the hardware's MTU - max_protohdr. max_protohdr is
3000 * cut from the hardware's MTU mainly bacause a full TCP segment's size
3001 * is usually less than hardware's MTU.
3002 * - ifsq_ifstart_schedule() is not pending on the current CPU and
3003 * ifnet.if_start subqueue interlock (ifaltq_subq.ifsq_started) is not
3005 * - The if_start_rollup(), which is registered as low priority netisr
3006 * rollup function, is called; probably because no more work is pending
3010 * Currently subqueue packet staging is only performed in netisr threads.
3013 ifq_dispatch(struct ifnet
*ifp
, struct mbuf
*m
, struct altq_pktattr
*pa
)
3015 struct ifaltq
*ifq
= &ifp
->if_snd
;
3016 struct ifaltq_subque
*ifsq
;
3017 int error
, start
= 0, len
, mcast
= 0, avoid_start
= 0;
3018 struct ifsubq_stage_head
*head
= NULL
;
3019 struct ifsubq_stage
*stage
= NULL
;
3020 struct globaldata
*gd
= mycpu
;
3021 struct thread
*td
= gd
->gd_curthread
;
3023 crit_enter_quick(td
);
3025 ifsq
= ifq_map_subq(ifq
, gd
->gd_cpuid
);
3026 ASSERT_ALTQ_SQ_NOT_SERIALIZED_HW(ifsq
);
3028 len
= m
->m_pkthdr
.len
;
3029 if (m
->m_flags
& M_MCAST
)
3032 if (td
->td_type
== TD_TYPE_NETISR
) {
3033 head
= &ifsubq_stage_heads
[mycpuid
];
3034 stage
= ifsq_get_stage(ifsq
, mycpuid
);
3037 stage
->stg_len
+= len
;
3038 if (stage
->stg_cnt
< ifsq_stage_cntmax
&&
3039 stage
->stg_len
< (ifp
->if_mtu
- max_protohdr
))
3044 error
= ifsq_enqueue_locked(ifsq
, m
, pa
);
3046 IFNET_STAT_INC(ifp
, oqdrops
, 1);
3047 if (!ifsq_data_ready(ifsq
)) {
3048 ALTQ_SQ_UNLOCK(ifsq
);
3049 crit_exit_quick(td
);
3054 if (!ifsq_is_started(ifsq
)) {
3056 ALTQ_SQ_UNLOCK(ifsq
);
3059 if ((stage
->stg_flags
& IFSQ_STAGE_FLAG_QUED
) == 0)
3060 ifsq_stage_insert(head
, stage
);
3062 IFNET_STAT_INC(ifp
, obytes
, len
);
3064 IFNET_STAT_INC(ifp
, omcasts
, 1);
3065 crit_exit_quick(td
);
3070 * Hold the subqueue interlock of ifnet.if_start
3072 ifsq_set_started(ifsq
);
3075 ALTQ_SQ_UNLOCK(ifsq
);
3078 IFNET_STAT_INC(ifp
, obytes
, len
);
3080 IFNET_STAT_INC(ifp
, omcasts
, 1);
3083 if (stage
!= NULL
) {
3084 if (!start
&& (stage
->stg_flags
& IFSQ_STAGE_FLAG_SCHED
)) {
3085 KKASSERT(stage
->stg_flags
& IFSQ_STAGE_FLAG_QUED
);
3087 ifsq_stage_remove(head
, stage
);
3088 ifsq_ifstart_schedule(ifsq
, 1);
3090 crit_exit_quick(td
);
3094 if (stage
->stg_flags
& IFSQ_STAGE_FLAG_QUED
) {
3095 ifsq_stage_remove(head
, stage
);
3103 crit_exit_quick(td
);
3107 ifsq_ifstart_try(ifsq
, 0);
3109 crit_exit_quick(td
);
3114 ifa_create(int size
)
3119 KASSERT(size
>= sizeof(*ifa
), ("ifaddr size too small"));
3121 ifa
= kmalloc(size
, M_IFADDR
, M_INTWAIT
| M_ZERO
);
3124 * Make ifa_container availabel on all CPUs, since they
3125 * could be accessed by any threads.
3127 ifa
->ifa_containers
=
3128 kmalloc_cachealign(ncpus
* sizeof(struct ifaddr_container
),
3129 M_IFADDR
, M_INTWAIT
| M_ZERO
);
3131 ifa
->ifa_ncnt
= ncpus
;
3132 for (i
= 0; i
< ncpus
; ++i
) {
3133 struct ifaddr_container
*ifac
= &ifa
->ifa_containers
[i
];
3135 ifac
->ifa_magic
= IFA_CONTAINER_MAGIC
;
3137 ifac
->ifa_refcnt
= 1;
3140 kprintf("alloc ifa %p %d\n", ifa
, size
);
3146 ifac_free(struct ifaddr_container
*ifac
, int cpu_id
)
3148 struct ifaddr
*ifa
= ifac
->ifa
;
3150 KKASSERT(ifac
->ifa_magic
== IFA_CONTAINER_MAGIC
);
3151 KKASSERT(ifac
->ifa_refcnt
== 0);
3152 KASSERT(ifac
->ifa_listmask
== 0,
3153 ("ifa is still on %#x lists", ifac
->ifa_listmask
));
3155 ifac
->ifa_magic
= IFA_CONTAINER_DEAD
;
3157 #ifdef IFADDR_DEBUG_VERBOSE
3158 kprintf("try free ifa %p cpu_id %d\n", ifac
->ifa
, cpu_id
);
3161 KASSERT(ifa
->ifa_ncnt
> 0 && ifa
->ifa_ncnt
<= ncpus
,
3162 ("invalid # of ifac, %d", ifa
->ifa_ncnt
));
3163 if (atomic_fetchadd_int(&ifa
->ifa_ncnt
, -1) == 1) {
3165 kprintf("free ifa %p\n", ifa
);
3167 kfree(ifa
->ifa_containers
, M_IFADDR
);
3168 kfree(ifa
, M_IFADDR
);
3173 ifa_iflink_dispatch(netmsg_t nmsg
)
3175 struct netmsg_ifaddr
*msg
= (struct netmsg_ifaddr
*)nmsg
;
3176 struct ifaddr
*ifa
= msg
->ifa
;
3177 struct ifnet
*ifp
= msg
->ifp
;
3179 struct ifaddr_container
*ifac
;
3183 ifac
= &ifa
->ifa_containers
[cpu
];
3184 ASSERT_IFAC_VALID(ifac
);
3185 KASSERT((ifac
->ifa_listmask
& IFA_LIST_IFADDRHEAD
) == 0,
3186 ("ifaddr is on if_addrheads"));
3188 ifac
->ifa_listmask
|= IFA_LIST_IFADDRHEAD
;
3190 TAILQ_INSERT_TAIL(&ifp
->if_addrheads
[cpu
], ifac
, ifa_link
);
3192 TAILQ_INSERT_HEAD(&ifp
->if_addrheads
[cpu
], ifac
, ifa_link
);
3196 netisr_forwardmsg_all(&nmsg
->base
, cpu
+ 1);
3200 ifa_iflink(struct ifaddr
*ifa
, struct ifnet
*ifp
, int tail
)
3202 struct netmsg_ifaddr msg
;
3204 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
,
3205 0, ifa_iflink_dispatch
);
3210 netisr_domsg(&msg
.base
, 0);
3214 ifa_ifunlink_dispatch(netmsg_t nmsg
)
3216 struct netmsg_ifaddr
*msg
= (struct netmsg_ifaddr
*)nmsg
;
3217 struct ifaddr
*ifa
= msg
->ifa
;
3218 struct ifnet
*ifp
= msg
->ifp
;
3220 struct ifaddr_container
*ifac
;
3224 ifac
= &ifa
->ifa_containers
[cpu
];
3225 ASSERT_IFAC_VALID(ifac
);
3226 KASSERT(ifac
->ifa_listmask
& IFA_LIST_IFADDRHEAD
,
3227 ("ifaddr is not on if_addrhead"));
3229 TAILQ_REMOVE(&ifp
->if_addrheads
[cpu
], ifac
, ifa_link
);
3230 ifac
->ifa_listmask
&= ~IFA_LIST_IFADDRHEAD
;
3234 netisr_forwardmsg_all(&nmsg
->base
, cpu
+ 1);
3238 ifa_ifunlink(struct ifaddr
*ifa
, struct ifnet
*ifp
)
3240 struct netmsg_ifaddr msg
;
3242 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
,
3243 0, ifa_ifunlink_dispatch
);
3247 netisr_domsg(&msg
.base
, 0);
3251 ifa_destroy_dispatch(netmsg_t nmsg
)
3253 struct netmsg_ifaddr
*msg
= (struct netmsg_ifaddr
*)nmsg
;
3256 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
3260 ifa_destroy(struct ifaddr
*ifa
)
3262 struct netmsg_ifaddr msg
;
3264 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
,
3265 0, ifa_destroy_dispatch
);
3268 netisr_domsg(&msg
.base
, 0);
3272 if_start_rollup(void)
3274 struct ifsubq_stage_head
*head
= &ifsubq_stage_heads
[mycpuid
];
3275 struct ifsubq_stage
*stage
;
3279 while ((stage
= TAILQ_FIRST(&head
->stg_head
)) != NULL
) {
3280 struct ifaltq_subque
*ifsq
= stage
->stg_subq
;
3283 if (stage
->stg_flags
& IFSQ_STAGE_FLAG_SCHED
)
3285 ifsq_stage_remove(head
, stage
);
3288 ifsq_ifstart_schedule(ifsq
, 1);
3293 if (!ifsq_is_started(ifsq
)) {
3295 * Hold the subqueue interlock of
3298 ifsq_set_started(ifsq
);
3301 ALTQ_SQ_UNLOCK(ifsq
);
3304 ifsq_ifstart_try(ifsq
, 1);
3306 KKASSERT((stage
->stg_flags
&
3307 (IFSQ_STAGE_FLAG_QUED
| IFSQ_STAGE_FLAG_SCHED
)) == 0);
3314 ifnetinit(void *dummy __unused
)
3318 /* XXX netisr_ncpus */
3319 for (i
= 0; i
< ncpus
; ++i
)
3320 TAILQ_INIT(&ifsubq_stage_heads
[i
].stg_head
);
3321 netisr_register_rollup(if_start_rollup
, NETISR_ROLLUP_PRIO_IFSTART
);
3325 if_register_com_alloc(u_char type
,
3326 if_com_alloc_t
*a
, if_com_free_t
*f
)
3329 KASSERT(if_com_alloc
[type
] == NULL
,
3330 ("if_register_com_alloc: %d already registered", type
));
3331 KASSERT(if_com_free
[type
] == NULL
,
3332 ("if_register_com_alloc: %d free already registered", type
));
3334 if_com_alloc
[type
] = a
;
3335 if_com_free
[type
] = f
;
3339 if_deregister_com_alloc(u_char type
)
3342 KASSERT(if_com_alloc
[type
] != NULL
,
3343 ("if_deregister_com_alloc: %d not registered", type
));
3344 KASSERT(if_com_free
[type
] != NULL
,
3345 ("if_deregister_com_alloc: %d free not registered", type
));
3346 if_com_alloc
[type
] = NULL
;
3347 if_com_free
[type
] = NULL
;
3351 ifq_set_maxlen(struct ifaltq
*ifq
, int len
)
3353 ifq
->altq_maxlen
= len
+ (ncpus
* ifsq_stage_cntmax
);
3357 ifq_mapsubq_default(struct ifaltq
*ifq __unused
, int cpuid __unused
)
3359 return ALTQ_SUBQ_INDEX_DEFAULT
;
3363 ifq_mapsubq_modulo(struct ifaltq
*ifq
, int cpuid
)
3366 return (cpuid
% ifq
->altq_subq_mappriv
);
3370 ifsq_watchdog(void *arg
)
3372 struct ifsubq_watchdog
*wd
= arg
;
3375 if (__predict_true(wd
->wd_timer
== 0 || --wd
->wd_timer
))
3378 ifp
= ifsq_get_ifp(wd
->wd_subq
);
3379 if (ifnet_tryserialize_all(ifp
)) {
3380 wd
->wd_watchdog(wd
->wd_subq
);
3381 ifnet_deserialize_all(ifp
);
3383 /* try again next timeout */
3387 ifsq_watchdog_reset(wd
);
3391 ifsq_watchdog_reset(struct ifsubq_watchdog
*wd
)
3393 callout_reset_bycpu(&wd
->wd_callout
, hz
, ifsq_watchdog
, wd
,
3394 ifsq_get_cpuid(wd
->wd_subq
));
3398 ifsq_watchdog_init(struct ifsubq_watchdog
*wd
, struct ifaltq_subque
*ifsq
,
3399 ifsq_watchdog_t watchdog
)
3401 callout_init_mp(&wd
->wd_callout
);
3404 wd
->wd_watchdog
= watchdog
;
3408 ifsq_watchdog_start(struct ifsubq_watchdog
*wd
)
3411 ifsq_watchdog_reset(wd
);
3415 ifsq_watchdog_stop(struct ifsubq_watchdog
*wd
)
3418 callout_stop(&wd
->wd_callout
);
3424 KASSERT(curthread
->td_type
!= TD_TYPE_NETISR
,
3425 ("try holding ifnet lock in netisr"));
3426 mtx_lock(&ifnet_mtx
);
3432 KASSERT(curthread
->td_type
!= TD_TYPE_NETISR
,
3433 ("try holding ifnet lock in netisr"));
3434 mtx_unlock(&ifnet_mtx
);
3437 static struct ifnet_array
*
3438 ifnet_array_alloc(int count
)
3440 struct ifnet_array
*arr
;
3442 arr
= kmalloc(__offsetof(struct ifnet_array
, ifnet_arr
[count
]),
3444 arr
->ifnet_count
= count
;
3450 ifnet_array_free(struct ifnet_array
*arr
)
3452 if (arr
== &ifnet_array0
)
3454 kfree(arr
, M_IFNET
);
3457 static struct ifnet_array
*
3458 ifnet_array_add(struct ifnet
*ifp
, const struct ifnet_array
*old_arr
)
3460 struct ifnet_array
*arr
;
3463 KASSERT(old_arr
->ifnet_count
>= 0,
3464 ("invalid ifnet array count %d", old_arr
->ifnet_count
));
3465 count
= old_arr
->ifnet_count
+ 1;
3466 arr
= ifnet_array_alloc(count
);
3469 * Save the old ifnet array and append this ifp to the end of
3470 * the new ifnet array.
3472 for (i
= 0; i
< old_arr
->ifnet_count
; ++i
) {
3473 KASSERT(old_arr
->ifnet_arr
[i
] != ifp
,
3474 ("%s is already in ifnet array", ifp
->if_xname
));
3475 arr
->ifnet_arr
[i
] = old_arr
->ifnet_arr
[i
];
3477 KASSERT(i
== count
- 1,
3478 ("add %s, ifnet array index mismatch, should be %d, but got %d",
3479 ifp
->if_xname
, count
- 1, i
));
3480 arr
->ifnet_arr
[i
] = ifp
;
3485 static struct ifnet_array
*
3486 ifnet_array_del(struct ifnet
*ifp
, const struct ifnet_array
*old_arr
)
3488 struct ifnet_array
*arr
;
3489 int count
, i
, idx
, found
= 0;
3491 KASSERT(old_arr
->ifnet_count
> 0,
3492 ("invalid ifnet array count %d", old_arr
->ifnet_count
));
3493 count
= old_arr
->ifnet_count
- 1;
3494 arr
= ifnet_array_alloc(count
);
3497 * Save the old ifnet array, but skip this ifp.
3500 for (i
= 0; i
< old_arr
->ifnet_count
; ++i
) {
3501 if (old_arr
->ifnet_arr
[i
] == ifp
) {
3503 ("dup %s is in ifnet array", ifp
->if_xname
));
3507 KASSERT(idx
< count
,
3508 ("invalid ifnet array index %d, count %d", idx
, count
));
3509 arr
->ifnet_arr
[idx
] = old_arr
->ifnet_arr
[i
];
3512 KASSERT(found
, ("%s is not in ifnet array", ifp
->if_xname
));
3513 KASSERT(idx
== count
,
3514 ("del %s, ifnet array count mismatch, should be %d, but got %d ",
3515 ifp
->if_xname
, count
, idx
));
3520 const struct ifnet_array
*
3521 ifnet_array_get(void)
3523 const struct ifnet_array
*ret
;
3525 KASSERT(curthread
->td_type
== TD_TYPE_NETISR
, ("not in netisr"));
3527 /* Make sure 'ret' is really used. */
3533 ifnet_array_isempty(void)
3535 KASSERT(curthread
->td_type
== TD_TYPE_NETISR
, ("not in netisr"));
3536 if (ifnet_array
->ifnet_count
== 0)
3543 ifa_marker_init(struct ifaddr_marker
*mark
, struct ifnet
*ifp
)
3547 memset(mark
, 0, sizeof(*mark
));
3550 mark
->ifac
.ifa
= ifa
;
3552 ifa
->ifa_addr
= &mark
->addr
;
3553 ifa
->ifa_dstaddr
= &mark
->dstaddr
;
3554 ifa
->ifa_netmask
= &mark
->netmask
;
3559 if_ringcnt_fixup(int ring_cnt
, int ring_cntmax
)
3562 KASSERT(ring_cntmax
> 0, ("invalid ring count max %d", ring_cntmax
));
3564 if (ring_cnt
<= 0 || ring_cnt
> ring_cntmax
)
3565 ring_cnt
= ring_cntmax
;
3566 if (ring_cnt
> netisr_ncpus
)
3567 ring_cnt
= netisr_ncpus
;
3572 if_ringmap_set_grid(device_t dev
, struct if_ringmap
*rm
, int grid
)
3576 KASSERT(grid
> 0, ("invalid if_ringmap grid %d", grid
));
3577 KASSERT(grid
>= rm
->rm_cnt
, ("invalid if_ringmap grid %d, count %d",
3581 offset
= (rm
->rm_grid
* device_get_unit(dev
)) % netisr_ncpus
;
3582 for (i
= 0; i
< rm
->rm_cnt
; ++i
) {
3583 rm
->rm_cpumap
[i
] = offset
+ i
;
3584 KASSERT(rm
->rm_cpumap
[i
] < netisr_ncpus
,
3585 ("invalid cpumap[%d] = %d, offset %d", i
,
3586 rm
->rm_cpumap
[i
], offset
));
3590 static struct if_ringmap
*
3591 if_ringmap_alloc_flags(device_t dev
, int ring_cnt
, int ring_cntmax
,
3594 struct if_ringmap
*rm
;
3595 int i
, grid
= 0, prev_grid
;
3597 ring_cnt
= if_ringcnt_fixup(ring_cnt
, ring_cntmax
);
3598 rm
= kmalloc(__offsetof(struct if_ringmap
, rm_cpumap
[ring_cnt
]),
3599 M_DEVBUF
, M_WAITOK
| M_ZERO
);
3601 rm
->rm_cnt
= ring_cnt
;
3602 if (flags
& RINGMAP_FLAG_POWEROF2
)
3603 rm
->rm_cnt
= 1 << (fls(rm
->rm_cnt
) - 1);
3605 prev_grid
= netisr_ncpus
;
3606 for (i
= 0; i
< netisr_ncpus
; ++i
) {
3607 if (netisr_ncpus
% (i
+ 1) != 0)
3610 grid
= netisr_ncpus
/ (i
+ 1);
3611 if (rm
->rm_cnt
> grid
) {
3616 if (rm
->rm_cnt
> netisr_ncpus
/ (i
+ 2))
3620 if_ringmap_set_grid(dev
, rm
, grid
);
3626 if_ringmap_alloc(device_t dev
, int ring_cnt
, int ring_cntmax
)
3629 return (if_ringmap_alloc_flags(dev
, ring_cnt
, ring_cntmax
,
3630 RINGMAP_FLAG_NONE
));
3634 if_ringmap_alloc2(device_t dev
, int ring_cnt
, int ring_cntmax
)
3637 return (if_ringmap_alloc_flags(dev
, ring_cnt
, ring_cntmax
,
3638 RINGMAP_FLAG_POWEROF2
));
3642 if_ringmap_free(struct if_ringmap
*rm
)
3645 kfree(rm
, M_DEVBUF
);
3649 * Align the two ringmaps.
3651 * e.g. 8 netisrs, rm0 contains 4 rings, rm1 contains 2 rings.
3655 * CPU 0 1 2 3 4 5 6 7
3656 * NIC_RX n0 n1 n2 n3
3661 * CPU 0 1 2 3 4 5 6 7
3662 * NIC_RX n0 n1 n2 n3
3666 if_ringmap_align(device_t dev
, struct if_ringmap
*rm0
, struct if_ringmap
*rm1
)
3669 if (rm0
->rm_grid
> rm1
->rm_grid
)
3670 if_ringmap_set_grid(dev
, rm1
, rm0
->rm_grid
);
3671 else if (rm0
->rm_grid
< rm1
->rm_grid
)
3672 if_ringmap_set_grid(dev
, rm0
, rm1
->rm_grid
);
3676 if_ringmap_match(device_t dev
, struct if_ringmap
*rm0
, struct if_ringmap
*rm1
)
3678 int subset_grid
, cnt
, divisor
, mod
, offset
, i
;
3679 struct if_ringmap
*subset_rm
, *rm
;
3680 int old_rm0_grid
, old_rm1_grid
;
3682 if (rm0
->rm_grid
== rm1
->rm_grid
)
3685 /* Save grid for later use */
3686 old_rm0_grid
= rm0
->rm_grid
;
3687 old_rm1_grid
= rm1
->rm_grid
;
3689 if_ringmap_align(dev
, rm0
, rm1
);
3692 * Re-shuffle rings to get more even distribution.
3694 * e.g. 12 netisrs, rm0 contains 4 rings, rm1 contains 2 rings.
3696 * CPU 0 1 2 3 4 5 6 7 8 9 10 11
3698 * NIC_RX a0 a1 a2 a3 b0 b1 b2 b3 c0 c1 c2 c3
3699 * NIC_TX A0 A1 B0 B1 C0 C1
3701 * NIC_RX d0 d1 d2 d3 e0 e1 e2 e3 f0 f1 f2 f3
3702 * NIC_TX D0 D1 E0 E1 F0 F1
3705 if (rm0
->rm_cnt
>= (2 * old_rm1_grid
)) {
3707 subset_grid
= old_rm1_grid
;
3710 } else if (rm1
->rm_cnt
> (2 * old_rm0_grid
)) {
3712 subset_grid
= old_rm0_grid
;
3716 /* No space to shuffle. */
3720 mod
= cnt
/ subset_grid
;
3722 divisor
= netisr_ncpus
/ rm
->rm_grid
;
3723 offset
= ((device_get_unit(dev
) / divisor
) % mod
) * subset_grid
;
3725 for (i
= 0; i
< subset_rm
->rm_cnt
; ++i
) {
3726 subset_rm
->rm_cpumap
[i
] += offset
;
3727 KASSERT(subset_rm
->rm_cpumap
[i
] < netisr_ncpus
,
3728 ("match: invalid cpumap[%d] = %d, offset %d",
3729 i
, subset_rm
->rm_cpumap
[i
], offset
));
3732 for (i
= 0; i
< subset_rm
->rm_cnt
; ++i
) {
3735 for (j
= 0; j
< rm
->rm_cnt
; ++j
) {
3736 if (rm
->rm_cpumap
[j
] == subset_rm
->rm_cpumap
[i
])
3739 KASSERT(j
< rm
->rm_cnt
,
3740 ("subset cpumap[%d] = %d not found in superset",
3741 i
, subset_rm
->rm_cpumap
[i
]));
3747 if_ringmap_count(const struct if_ringmap
*rm
)
3750 return (rm
->rm_cnt
);
3754 if_ringmap_cpumap(const struct if_ringmap
*rm
, int ring
)
3757 KASSERT(ring
>= 0 && ring
< rm
->rm_cnt
, ("invalid ring %d", ring
));
3758 return (rm
->rm_cpumap
[ring
]);
3762 if_ringmap_rdrtable(const struct if_ringmap
*rm
, int table
[], int table_nent
)
3764 int i
, grid_idx
, grid_cnt
, patch_off
, patch_cnt
, ncopy
;
3766 KASSERT(table_nent
> 0 && (table_nent
& NETISR_CPUMASK
) == 0,
3767 ("invalid redirect table entries %d", table_nent
));
3770 for (i
= 0; i
< NETISR_CPUMAX
; ++i
) {
3771 table
[i
] = grid_idx
++ % rm
->rm_cnt
;
3773 if (grid_idx
== rm
->rm_grid
)
3778 * Make the ring distributed more evenly for the remainder
3781 * e.g. 12 netisrs, rm contains 8 rings.
3783 * Redirect table before:
3785 * 0 1 2 3 4 5 6 7 0 1 2 3 0 1 2 3
3786 * 4 5 6 7 0 1 2 3 0 1 2 3 4 5 6 7
3787 * 0 1 2 3 0 1 2 3 4 5 6 7 0 1 2 3
3790 * Redirect table after being patched (pX, patched entries):
3792 * 0 1 2 3 4 5 6 7 p0 p1 p2 p3 0 1 2 3
3793 * 4 5 6 7 p4 p5 p6 p7 0 1 2 3 4 5 6 7
3794 * p0 p1 p2 p3 0 1 2 3 4 5 6 7 p4 p5 p6 p7
3797 patch_cnt
= rm
->rm_grid
% rm
->rm_cnt
;
3800 patch_off
= rm
->rm_grid
- (rm
->rm_grid
% rm
->rm_cnt
);
3802 grid_cnt
= roundup(NETISR_CPUMAX
, rm
->rm_grid
) / rm
->rm_grid
;
3804 for (i
= 0; i
< grid_cnt
; ++i
) {
3807 for (j
= 0; j
< patch_cnt
; ++j
) {
3810 fix_idx
= (i
* rm
->rm_grid
) + patch_off
+ j
;
3811 if (fix_idx
>= NETISR_CPUMAX
)
3813 table
[fix_idx
] = grid_idx
++ % rm
->rm_cnt
;
3818 * If the device supports larger redirect table, duplicate
3819 * the first NETISR_CPUMAX entries to the rest of the table,
3820 * so that it matches upper layer's expectation:
3821 * (hash & NETISR_CPUMASK) % netisr_ncpus
3823 ncopy
= table_nent
/ NETISR_CPUMAX
;
3824 for (i
= 1; i
< ncopy
; ++i
) {
3825 memcpy(&table
[i
* NETISR_CPUMAX
], table
,
3826 NETISR_CPUMAX
* sizeof(table
[0]));
3828 if (if_ringmap_dumprdr
) {
3829 for (i
= 0; i
< table_nent
; ++i
) {
3830 if (i
!= 0 && i
% 16 == 0)
3832 kprintf("%03d ", table
[i
]);
3839 if_ringmap_cpumap_sysctl(SYSCTL_HANDLER_ARGS
)
3841 struct if_ringmap
*rm
= arg1
;
3844 for (i
= 0; i
< rm
->rm_cnt
; ++i
) {
3845 int cpu
= rm
->rm_cpumap
[i
];
3847 error
= SYSCTL_OUT(req
, &cpu
, sizeof(cpu
));