2 * Copyright (c) 1980, 1986, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)if.c 8.3 (Berkeley) 1/4/94
30 * $FreeBSD: src/sys/net/if.c,v 1.185 2004/03/13 02:35:03 brooks Exp $
33 #include "opt_inet6.h"
35 #include "opt_ifpoll.h"
37 #include <sys/param.h>
38 #include <sys/malloc.h>
40 #include <sys/systm.h>
43 #include <sys/protosw.h>
44 #include <sys/socket.h>
45 #include <sys/socketvar.h>
46 #include <sys/socketops.h>
47 #include <sys/kernel.h>
49 #include <sys/mutex.h>
50 #include <sys/sockio.h>
51 #include <sys/syslog.h>
52 #include <sys/sysctl.h>
53 #include <sys/domain.h>
54 #include <sys/thread.h>
55 #include <sys/serialize.h>
58 #include <sys/thread2.h>
59 #include <sys/msgport2.h>
60 #include <sys/mutex2.h>
63 #include <net/if_arp.h>
64 #include <net/if_dl.h>
65 #include <net/if_types.h>
66 #include <net/if_var.h>
67 #include <net/if_ringmap.h>
68 #include <net/ifq_var.h>
69 #include <net/radix.h>
70 #include <net/route.h>
71 #include <net/if_clone.h>
72 #include <net/netisr2.h>
73 #include <net/netmsg2.h>
75 #include <machine/atomic.h>
76 #include <machine/stdarg.h>
77 #include <machine/smp.h>
79 #if defined(INET) || defined(INET6)
81 #include <netinet/in.h>
82 #include <netinet/in_var.h>
83 #include <netinet/if_ether.h>
85 #include <netinet6/in6_var.h>
86 #include <netinet6/in6_ifattach.h>
90 struct netmsg_ifaddr
{
91 struct netmsg_base base
;
97 struct ifsubq_stage_head
{
98 TAILQ_HEAD(, ifsubq_stage
) stg_head
;
107 #define RINGMAP_FLAG_NONE 0x0
108 #define RINGMAP_FLAG_POWEROF2 0x1
111 * System initialization
113 static void if_attachdomain(void *);
114 static void if_attachdomain1(struct ifnet
*);
115 static int ifconf(u_long
, caddr_t
, struct ucred
*);
116 static void ifinit(void *);
117 static void ifnetinit(void *);
118 static void if_slowtimo(void *);
119 static void link_rtrequest(int, struct rtentry
*);
120 static int if_rtdel(struct radix_node
*, void *);
121 static void if_slowtimo_dispatch(netmsg_t
);
123 /* Helper functions */
124 static void ifsq_watchdog_reset(struct ifsubq_watchdog
*);
125 static int if_delmulti_serialized(struct ifnet
*, struct sockaddr
*);
126 static struct ifnet_array
*ifnet_array_alloc(int);
127 static void ifnet_array_free(struct ifnet_array
*);
128 static struct ifnet_array
*ifnet_array_add(struct ifnet
*,
129 const struct ifnet_array
*);
130 static struct ifnet_array
*ifnet_array_del(struct ifnet
*,
131 const struct ifnet_array
*);
135 * XXX: declare here to avoid to include many inet6 related files..
136 * should be more generalized?
138 extern void nd6_setmtu(struct ifnet
*);
141 SYSCTL_NODE(_net
, PF_LINK
, link
, CTLFLAG_RW
, 0, "Link layers");
142 SYSCTL_NODE(_net_link
, 0, generic
, CTLFLAG_RW
, 0, "Generic link-management");
143 SYSCTL_NODE(_net_link
, OID_AUTO
, ringmap
, CTLFLAG_RW
, 0, "link ringmap");
145 static int ifsq_stage_cntmax
= 4;
146 TUNABLE_INT("net.link.stage_cntmax", &ifsq_stage_cntmax
);
147 SYSCTL_INT(_net_link
, OID_AUTO
, stage_cntmax
, CTLFLAG_RW
,
148 &ifsq_stage_cntmax
, 0, "ifq staging packet count max");
150 static int if_stats_compat
= 0;
151 SYSCTL_INT(_net_link
, OID_AUTO
, stats_compat
, CTLFLAG_RW
,
152 &if_stats_compat
, 0, "Compat the old ifnet stats");
154 static int if_ringmap_dumprdr
= 0;
155 SYSCTL_INT(_net_link_ringmap
, OID_AUTO
, dump_rdr
, CTLFLAG_RW
,
156 &if_ringmap_dumprdr
, 0, "dump redirect table");
158 SYSINIT(interfaces
, SI_SUB_PROTO_IF
, SI_ORDER_FIRST
, ifinit
, NULL
);
159 SYSINIT(ifnet
, SI_SUB_PRE_DRIVERS
, SI_ORDER_ANY
, ifnetinit
, NULL
);
161 static if_com_alloc_t
*if_com_alloc
[256];
162 static if_com_free_t
*if_com_free
[256];
164 MALLOC_DEFINE(M_IFADDR
, "ifaddr", "interface address");
165 MALLOC_DEFINE(M_IFMADDR
, "ether_multi", "link-level multicast address");
166 MALLOC_DEFINE(M_IFNET
, "ifnet", "interface structure");
168 int ifqmaxlen
= IFQ_MAXLEN
;
169 struct ifnethead ifnet
= TAILQ_HEAD_INITIALIZER(ifnet
);
171 static struct ifnet_array ifnet_array0
;
172 static struct ifnet_array
*ifnet_array
= &ifnet_array0
;
174 static struct callout if_slowtimo_timer
;
175 static struct netmsg_base if_slowtimo_netmsg
;
178 struct ifnet
**ifindex2ifnet
= NULL
;
179 static struct mtx ifnet_mtx
= MTX_INITIALIZER("ifnet");
181 static struct ifsubq_stage_head ifsubq_stage_heads
[MAXCPU
];
184 #define IFQ_KTR_STRING "ifq=%p"
185 #define IFQ_KTR_ARGS struct ifaltq *ifq
187 #define KTR_IFQ KTR_ALL
189 KTR_INFO_MASTER(ifq
);
190 KTR_INFO(KTR_IFQ
, ifq
, enqueue
, 0, IFQ_KTR_STRING
, IFQ_KTR_ARGS
);
191 KTR_INFO(KTR_IFQ
, ifq
, dequeue
, 1, IFQ_KTR_STRING
, IFQ_KTR_ARGS
);
192 #define logifq(name, arg) KTR_LOG(ifq_ ## name, arg)
194 #define IF_START_KTR_STRING "ifp=%p"
195 #define IF_START_KTR_ARGS struct ifnet *ifp
197 #define KTR_IF_START KTR_ALL
199 KTR_INFO_MASTER(if_start
);
200 KTR_INFO(KTR_IF_START
, if_start
, run
, 0,
201 IF_START_KTR_STRING
, IF_START_KTR_ARGS
);
202 KTR_INFO(KTR_IF_START
, if_start
, sched
, 1,
203 IF_START_KTR_STRING
, IF_START_KTR_ARGS
);
204 KTR_INFO(KTR_IF_START
, if_start
, avoid
, 2,
205 IF_START_KTR_STRING
, IF_START_KTR_ARGS
);
206 KTR_INFO(KTR_IF_START
, if_start
, contend_sched
, 3,
207 IF_START_KTR_STRING
, IF_START_KTR_ARGS
);
208 KTR_INFO(KTR_IF_START
, if_start
, chase_sched
, 4,
209 IF_START_KTR_STRING
, IF_START_KTR_ARGS
);
210 #define logifstart(name, arg) KTR_LOG(if_start_ ## name, arg)
213 TAILQ_HEAD(, ifg_group
) ifg_head
= TAILQ_HEAD_INITIALIZER(ifg_head
);
216 * Network interface utility routines.
218 * Routines with ifa_ifwith* names take sockaddr *'s as
227 callout_init_mp(&if_slowtimo_timer
);
228 netmsg_init(&if_slowtimo_netmsg
, NULL
, &netisr_adone_rport
,
229 MSGF_PRIORITY
, if_slowtimo_dispatch
);
231 /* XXX is this necessary? */
233 TAILQ_FOREACH(ifp
, &ifnetlist
, if_link
) {
234 if (ifp
->if_snd
.altq_maxlen
== 0) {
235 if_printf(ifp
, "XXX: driver didn't set altq_maxlen\n");
236 ifq_set_maxlen(&ifp
->if_snd
, ifqmaxlen
);
241 /* Start if_slowtimo */
242 lwkt_sendmsg(netisr_cpuport(0), &if_slowtimo_netmsg
.lmsg
);
246 ifsq_ifstart_ipifunc(void *arg
)
248 struct ifaltq_subque
*ifsq
= arg
;
249 struct lwkt_msg
*lmsg
= ifsq_get_ifstart_lmsg(ifsq
, mycpuid
);
252 if (lmsg
->ms_flags
& MSGF_DONE
)
253 lwkt_sendmsg_oncpu(netisr_cpuport(mycpuid
), lmsg
);
258 ifsq_stage_remove(struct ifsubq_stage_head
*head
, struct ifsubq_stage
*stage
)
260 KKASSERT(stage
->stg_flags
& IFSQ_STAGE_FLAG_QUED
);
261 TAILQ_REMOVE(&head
->stg_head
, stage
, stg_link
);
262 stage
->stg_flags
&= ~(IFSQ_STAGE_FLAG_QUED
| IFSQ_STAGE_FLAG_SCHED
);
268 ifsq_stage_insert(struct ifsubq_stage_head
*head
, struct ifsubq_stage
*stage
)
270 KKASSERT((stage
->stg_flags
&
271 (IFSQ_STAGE_FLAG_QUED
| IFSQ_STAGE_FLAG_SCHED
)) == 0);
272 stage
->stg_flags
|= IFSQ_STAGE_FLAG_QUED
;
273 TAILQ_INSERT_TAIL(&head
->stg_head
, stage
, stg_link
);
277 * Schedule ifnet.if_start on the subqueue owner CPU
280 ifsq_ifstart_schedule(struct ifaltq_subque
*ifsq
, int force
)
284 if (!force
&& curthread
->td_type
== TD_TYPE_NETISR
&&
285 ifsq_stage_cntmax
> 0) {
286 struct ifsubq_stage
*stage
= ifsq_get_stage(ifsq
, mycpuid
);
290 if ((stage
->stg_flags
& IFSQ_STAGE_FLAG_QUED
) == 0)
291 ifsq_stage_insert(&ifsubq_stage_heads
[mycpuid
], stage
);
292 stage
->stg_flags
|= IFSQ_STAGE_FLAG_SCHED
;
296 cpu
= ifsq_get_cpuid(ifsq
);
298 lwkt_send_ipiq(globaldata_find(cpu
), ifsq_ifstart_ipifunc
, ifsq
);
300 ifsq_ifstart_ipifunc(ifsq
);
305 * This function will release ifnet.if_start subqueue interlock,
306 * if ifnet.if_start for the subqueue does not need to be scheduled
309 ifsq_ifstart_need_schedule(struct ifaltq_subque
*ifsq
, int running
)
311 if (!running
|| ifsq_is_empty(ifsq
)
313 || ifsq
->ifsq_altq
->altq_tbr
!= NULL
318 * ifnet.if_start subqueue interlock is released, if:
319 * 1) Hardware can not take any packets, due to
320 * o interface is marked down
321 * o hardware queue is full (ifsq_is_oactive)
322 * Under the second situation, hardware interrupt
323 * or polling(4) will call/schedule ifnet.if_start
324 * on the subqueue when hardware queue is ready
325 * 2) There is no packet in the subqueue.
326 * Further ifq_dispatch or ifq_handoff will call/
327 * schedule ifnet.if_start on the subqueue.
328 * 3) TBR is used and it does not allow further
330 * TBR callout will call ifnet.if_start on the
333 if (!running
|| !ifsq_data_ready(ifsq
)) {
334 ifsq_clr_started(ifsq
);
335 ALTQ_SQ_UNLOCK(ifsq
);
338 ALTQ_SQ_UNLOCK(ifsq
);
344 ifsq_ifstart_dispatch(netmsg_t msg
)
346 struct lwkt_msg
*lmsg
= &msg
->base
.lmsg
;
347 struct ifaltq_subque
*ifsq
= lmsg
->u
.ms_resultp
;
348 struct ifnet
*ifp
= ifsq_get_ifp(ifsq
);
349 struct globaldata
*gd
= mycpu
;
350 int running
= 0, need_sched
;
354 lwkt_replymsg(lmsg
, 0); /* reply ASAP */
356 if (gd
->gd_cpuid
!= ifsq_get_cpuid(ifsq
)) {
358 * We need to chase the subqueue owner CPU change.
360 ifsq_ifstart_schedule(ifsq
, 1);
365 ifsq_serialize_hw(ifsq
);
366 if ((ifp
->if_flags
& IFF_RUNNING
) && !ifsq_is_oactive(ifsq
)) {
367 ifp
->if_start(ifp
, ifsq
);
368 if ((ifp
->if_flags
& IFF_RUNNING
) && !ifsq_is_oactive(ifsq
))
371 need_sched
= ifsq_ifstart_need_schedule(ifsq
, running
);
372 ifsq_deserialize_hw(ifsq
);
376 * More data need to be transmitted, ifnet.if_start is
377 * scheduled on the subqueue owner CPU, and we keep going.
378 * NOTE: ifnet.if_start subqueue interlock is not released.
380 ifsq_ifstart_schedule(ifsq
, 0);
386 /* Device driver ifnet.if_start helper function */
388 ifsq_devstart(struct ifaltq_subque
*ifsq
)
390 struct ifnet
*ifp
= ifsq_get_ifp(ifsq
);
393 ASSERT_ALTQ_SQ_SERIALIZED_HW(ifsq
);
396 if (ifsq_is_started(ifsq
) || !ifsq_data_ready(ifsq
)) {
397 ALTQ_SQ_UNLOCK(ifsq
);
400 ifsq_set_started(ifsq
);
401 ALTQ_SQ_UNLOCK(ifsq
);
403 ifp
->if_start(ifp
, ifsq
);
405 if ((ifp
->if_flags
& IFF_RUNNING
) && !ifsq_is_oactive(ifsq
))
408 if (ifsq_ifstart_need_schedule(ifsq
, running
)) {
410 * More data need to be transmitted, ifnet.if_start is
411 * scheduled on ifnet's CPU, and we keep going.
412 * NOTE: ifnet.if_start interlock is not released.
414 ifsq_ifstart_schedule(ifsq
, 0);
419 if_devstart(struct ifnet
*ifp
)
421 ifsq_devstart(ifq_get_subq_default(&ifp
->if_snd
));
424 /* Device driver ifnet.if_start schedule helper function */
426 ifsq_devstart_sched(struct ifaltq_subque
*ifsq
)
428 ifsq_ifstart_schedule(ifsq
, 1);
432 if_devstart_sched(struct ifnet
*ifp
)
434 ifsq_devstart_sched(ifq_get_subq_default(&ifp
->if_snd
));
438 if_default_serialize(struct ifnet
*ifp
, enum ifnet_serialize slz __unused
)
440 lwkt_serialize_enter(ifp
->if_serializer
);
444 if_default_deserialize(struct ifnet
*ifp
, enum ifnet_serialize slz __unused
)
446 lwkt_serialize_exit(ifp
->if_serializer
);
450 if_default_tryserialize(struct ifnet
*ifp
, enum ifnet_serialize slz __unused
)
452 return lwkt_serialize_try(ifp
->if_serializer
);
457 if_default_serialize_assert(struct ifnet
*ifp
,
458 enum ifnet_serialize slz __unused
,
459 boolean_t serialized
)
462 ASSERT_SERIALIZED(ifp
->if_serializer
);
464 ASSERT_NOT_SERIALIZED(ifp
->if_serializer
);
469 * Attach an interface to the list of "active" interfaces.
471 * The serializer is optional.
474 if_attach(struct ifnet
*ifp
, lwkt_serialize_t serializer
)
477 int namelen
, masklen
;
478 struct sockaddr_dl
*sdl
, *sdl_addr
;
481 struct ifnet
**old_ifindex2ifnet
= NULL
;
482 struct ifnet_array
*old_ifnet_array
;
485 static int if_indexlim
= 8;
487 if (ifp
->if_serialize
!= NULL
) {
488 KASSERT(ifp
->if_deserialize
!= NULL
&&
489 ifp
->if_tryserialize
!= NULL
&&
490 ifp
->if_serialize_assert
!= NULL
,
491 ("serialize functions are partially setup"));
494 * If the device supplies serialize functions,
495 * then clear if_serializer to catch any invalid
496 * usage of this field.
498 KASSERT(serializer
== NULL
,
499 ("both serialize functions and default serializer "
501 ifp
->if_serializer
= NULL
;
503 KASSERT(ifp
->if_deserialize
== NULL
&&
504 ifp
->if_tryserialize
== NULL
&&
505 ifp
->if_serialize_assert
== NULL
,
506 ("serialize functions are partially setup"));
507 ifp
->if_serialize
= if_default_serialize
;
508 ifp
->if_deserialize
= if_default_deserialize
;
509 ifp
->if_tryserialize
= if_default_tryserialize
;
511 ifp
->if_serialize_assert
= if_default_serialize_assert
;
515 * The serializer can be passed in from the device,
516 * allowing the same serializer to be used for both
517 * the interrupt interlock and the device queue.
518 * If not specified, the netif structure will use an
519 * embedded serializer.
521 if (serializer
== NULL
) {
522 serializer
= &ifp
->if_default_serializer
;
523 lwkt_serialize_init(serializer
);
525 ifp
->if_serializer
= serializer
;
530 * The old code would work if the interface passed a pre-existing
531 * chain of ifaddrs to this code. We don't trust our callers to
532 * properly initialize the tailq, however, so we no longer allow
533 * this unlikely case.
535 ifp
->if_addrheads
= kmalloc(ncpus
* sizeof(struct ifaddrhead
),
536 M_IFADDR
, M_WAITOK
| M_ZERO
);
537 for (i
= 0; i
< ncpus
; ++i
)
538 TAILQ_INIT(&ifp
->if_addrheads
[i
]);
540 TAILQ_INIT(&ifp
->if_multiaddrs
);
541 TAILQ_INIT(&ifp
->if_groups
);
542 getmicrotime(&ifp
->if_lastchange
);
545 * create a Link Level name for this device
547 namelen
= strlen(ifp
->if_xname
);
548 masklen
= offsetof(struct sockaddr_dl
, sdl_data
[0]) + namelen
;
549 socksize
= masklen
+ ifp
->if_addrlen
;
550 if (socksize
< sizeof(*sdl
))
551 socksize
= sizeof(*sdl
);
552 socksize
= RT_ROUNDUP(socksize
);
553 ifa
= ifa_create(sizeof(struct ifaddr
) + 2 * socksize
);
554 sdl
= sdl_addr
= (struct sockaddr_dl
*)(ifa
+ 1);
555 sdl
->sdl_len
= socksize
;
556 sdl
->sdl_family
= AF_LINK
;
557 bcopy(ifp
->if_xname
, sdl
->sdl_data
, namelen
);
558 sdl
->sdl_nlen
= namelen
;
559 sdl
->sdl_type
= ifp
->if_type
;
560 ifp
->if_lladdr
= ifa
;
562 ifa
->ifa_rtrequest
= link_rtrequest
;
563 ifa
->ifa_addr
= (struct sockaddr
*)sdl
;
564 sdl
= (struct sockaddr_dl
*)(socksize
+ (caddr_t
)sdl
);
565 ifa
->ifa_netmask
= (struct sockaddr
*)sdl
;
566 sdl
->sdl_len
= masklen
;
568 sdl
->sdl_data
[--namelen
] = 0xff;
569 ifa_iflink(ifa
, ifp
, 0 /* Insert head */);
571 ifp
->if_data_pcpu
= kmalloc_cachealign(
572 ncpus
* sizeof(struct ifdata_pcpu
), M_DEVBUF
, M_WAITOK
| M_ZERO
);
574 if (ifp
->if_mapsubq
== NULL
)
575 ifp
->if_mapsubq
= ifq_mapsubq_default
;
579 ifq
->altq_disc
= NULL
;
580 ifq
->altq_flags
&= ALTQF_CANTCHANGE
;
581 ifq
->altq_tbr
= NULL
;
584 if (ifq
->altq_subq_cnt
<= 0)
585 ifq
->altq_subq_cnt
= 1;
586 ifq
->altq_subq
= kmalloc_cachealign(
587 ifq
->altq_subq_cnt
* sizeof(struct ifaltq_subque
),
588 M_DEVBUF
, M_WAITOK
| M_ZERO
);
590 if (ifq
->altq_maxlen
== 0) {
591 if_printf(ifp
, "driver didn't set altq_maxlen\n");
592 ifq_set_maxlen(ifq
, ifqmaxlen
);
595 for (q
= 0; q
< ifq
->altq_subq_cnt
; ++q
) {
596 struct ifaltq_subque
*ifsq
= &ifq
->altq_subq
[q
];
598 ALTQ_SQ_LOCK_INIT(ifsq
);
599 ifsq
->ifsq_index
= q
;
601 ifsq
->ifsq_altq
= ifq
;
602 ifsq
->ifsq_ifp
= ifp
;
604 ifsq
->ifsq_maxlen
= ifq
->altq_maxlen
;
605 ifsq
->ifsq_maxbcnt
= ifsq
->ifsq_maxlen
* MCLBYTES
;
606 ifsq
->ifsq_prepended
= NULL
;
607 ifsq
->ifsq_started
= 0;
608 ifsq
->ifsq_hw_oactive
= 0;
609 ifsq_set_cpuid(ifsq
, 0);
610 if (ifp
->if_serializer
!= NULL
)
611 ifsq_set_hw_serialize(ifsq
, ifp
->if_serializer
);
614 kmalloc_cachealign(ncpus
* sizeof(struct ifsubq_stage
),
615 M_DEVBUF
, M_WAITOK
| M_ZERO
);
616 for (i
= 0; i
< ncpus
; ++i
)
617 ifsq
->ifsq_stage
[i
].stg_subq
= ifsq
;
619 ifsq
->ifsq_ifstart_nmsg
=
620 kmalloc(ncpus
* sizeof(struct netmsg_base
),
621 M_LWKTMSG
, M_WAITOK
);
622 for (i
= 0; i
< ncpus
; ++i
) {
623 netmsg_init(&ifsq
->ifsq_ifstart_nmsg
[i
], NULL
,
624 &netisr_adone_rport
, 0, ifsq_ifstart_dispatch
);
625 ifsq
->ifsq_ifstart_nmsg
[i
].lmsg
.u
.ms_resultp
= ifsq
;
628 ifq_set_classic(ifq
);
631 * Increase mbuf cluster/jcluster limits for the mbufs that
632 * could sit on the device queues for quite some time.
634 if (ifp
->if_nmbclusters
> 0)
635 mcl_inclimit(ifp
->if_nmbclusters
);
636 if (ifp
->if_nmbjclusters
> 0)
637 mjcl_inclimit(ifp
->if_nmbjclusters
);
640 * Install this ifp into ifindex2inet, ifnet queue and ifnet
641 * array after it is setup.
643 * Protect ifindex2ifnet, ifnet queue and ifnet array changes
644 * by ifnet lock, so that non-netisr threads could get a
649 /* Don't update if_index until ifindex2ifnet is setup */
650 ifp
->if_index
= if_index
+ 1;
651 sdl_addr
->sdl_index
= ifp
->if_index
;
654 * Install this ifp into ifindex2ifnet
656 if (ifindex2ifnet
== NULL
|| ifp
->if_index
>= if_indexlim
) {
664 n
= if_indexlim
* sizeof(*q
);
665 q
= kmalloc(n
, M_IFADDR
, M_WAITOK
| M_ZERO
);
666 if (ifindex2ifnet
!= NULL
) {
667 bcopy(ifindex2ifnet
, q
, n
/2);
668 /* Free old ifindex2ifnet after sync all netisrs */
669 old_ifindex2ifnet
= ifindex2ifnet
;
673 ifindex2ifnet
[ifp
->if_index
] = ifp
;
675 * Update if_index after this ifp is installed into ifindex2ifnet,
676 * so that netisrs could get a consistent view of ifindex2ifnet.
679 if_index
= ifp
->if_index
;
682 * Install this ifp into ifnet array.
684 /* Free old ifnet array after sync all netisrs */
685 old_ifnet_array
= ifnet_array
;
686 ifnet_array
= ifnet_array_add(ifp
, old_ifnet_array
);
689 * Install this ifp into ifnet queue.
691 TAILQ_INSERT_TAIL(&ifnetlist
, ifp
, if_link
);
696 * Sync all netisrs so that the old ifindex2ifnet and ifnet array
697 * are no longer accessed and we can free them safely later on.
699 netmsg_service_sync();
700 if (old_ifindex2ifnet
!= NULL
)
701 kfree(old_ifindex2ifnet
, M_IFADDR
);
702 ifnet_array_free(old_ifnet_array
);
704 if (!SLIST_EMPTY(&domains
))
705 if_attachdomain1(ifp
);
707 /* Announce the interface. */
708 EVENTHANDLER_INVOKE(ifnet_attach_event
, ifp
);
709 devctl_notify("IFNET", ifp
->if_xname
, "ATTACH", NULL
);
710 rt_ifannouncemsg(ifp
, IFAN_ARRIVAL
);
714 if_attachdomain(void *dummy
)
719 TAILQ_FOREACH(ifp
, &ifnetlist
, if_list
)
720 if_attachdomain1(ifp
);
723 SYSINIT(domainifattach
, SI_SUB_PROTO_IFATTACHDOMAIN
, SI_ORDER_FIRST
,
724 if_attachdomain
, NULL
);
727 if_attachdomain1(struct ifnet
*ifp
)
733 /* address family dependent data region */
734 bzero(ifp
->if_afdata
, sizeof(ifp
->if_afdata
));
735 SLIST_FOREACH(dp
, &domains
, dom_next
)
736 if (dp
->dom_ifattach
)
737 ifp
->if_afdata
[dp
->dom_family
] =
738 (*dp
->dom_ifattach
)(ifp
);
743 * Purge all addresses whose type is _not_ AF_LINK
746 if_purgeaddrs_nolink_dispatch(netmsg_t nmsg
)
748 struct lwkt_msg
*lmsg
= &nmsg
->lmsg
;
749 struct ifnet
*ifp
= lmsg
->u
.ms_resultp
;
750 struct ifaddr_container
*ifac
, *next
;
755 * The ifaddr processing in the following loop will block,
756 * however, this function is called in netisr0, in which
757 * ifaddr list changes happen, so we don't care about the
758 * blockness of the ifaddr processing here.
760 TAILQ_FOREACH_MUTABLE(ifac
, &ifp
->if_addrheads
[mycpuid
],
762 struct ifaddr
*ifa
= ifac
->ifa
;
765 if (ifa
->ifa_addr
->sa_family
== AF_UNSPEC
)
768 /* Leave link ifaddr as it is */
769 if (ifa
->ifa_addr
->sa_family
== AF_LINK
)
772 /* XXX: Ugly!! ad hoc just for INET */
773 if (ifa
->ifa_addr
&& ifa
->ifa_addr
->sa_family
== AF_INET
) {
774 struct ifaliasreq ifr
;
775 struct sockaddr_in saved_addr
, saved_dst
;
776 #ifdef IFADDR_DEBUG_VERBOSE
779 kprintf("purge in4 addr %p: ", ifa
);
780 for (i
= 0; i
< ncpus
; ++i
)
781 kprintf("%d ", ifa
->ifa_containers
[i
].ifa_refcnt
);
785 /* Save information for panic. */
786 memcpy(&saved_addr
, ifa
->ifa_addr
, sizeof(saved_addr
));
787 if (ifa
->ifa_dstaddr
!= NULL
) {
788 memcpy(&saved_dst
, ifa
->ifa_dstaddr
,
791 memset(&saved_dst
, 0, sizeof(saved_dst
));
794 bzero(&ifr
, sizeof ifr
);
795 ifr
.ifra_addr
= *ifa
->ifa_addr
;
796 if (ifa
->ifa_dstaddr
)
797 ifr
.ifra_broadaddr
= *ifa
->ifa_dstaddr
;
798 if (in_control(SIOCDIFADDR
, (caddr_t
)&ifr
, ifp
,
802 /* MUST NOT HAPPEN */
803 panic("%s: in_control failed %x, dst %x", ifp
->if_xname
,
804 ntohl(saved_addr
.sin_addr
.s_addr
),
805 ntohl(saved_dst
.sin_addr
.s_addr
));
809 if (ifa
->ifa_addr
&& ifa
->ifa_addr
->sa_family
== AF_INET6
) {
810 #ifdef IFADDR_DEBUG_VERBOSE
813 kprintf("purge in6 addr %p: ", ifa
);
814 for (i
= 0; i
< ncpus
; ++i
)
815 kprintf("%d ", ifa
->ifa_containers
[i
].ifa_refcnt
);
820 /* ifp_addrhead is already updated */
824 ifa_ifunlink(ifa
, ifp
);
828 lwkt_replymsg(lmsg
, 0);
832 if_purgeaddrs_nolink(struct ifnet
*ifp
)
834 struct netmsg_base nmsg
;
835 struct lwkt_msg
*lmsg
= &nmsg
.lmsg
;
837 ASSERT_CANDOMSG_NETISR0(curthread
);
839 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
, 0,
840 if_purgeaddrs_nolink_dispatch
);
841 lmsg
->u
.ms_resultp
= ifp
;
842 lwkt_domsg(netisr_cpuport(0), lmsg
, 0);
846 ifq_stage_detach_handler(netmsg_t nmsg
)
848 struct ifaltq
*ifq
= nmsg
->lmsg
.u
.ms_resultp
;
851 for (q
= 0; q
< ifq
->altq_subq_cnt
; ++q
) {
852 struct ifaltq_subque
*ifsq
= &ifq
->altq_subq
[q
];
853 struct ifsubq_stage
*stage
= ifsq_get_stage(ifsq
, mycpuid
);
855 if (stage
->stg_flags
& IFSQ_STAGE_FLAG_QUED
)
856 ifsq_stage_remove(&ifsubq_stage_heads
[mycpuid
], stage
);
858 lwkt_replymsg(&nmsg
->lmsg
, 0);
862 ifq_stage_detach(struct ifaltq
*ifq
)
864 struct netmsg_base base
;
867 netmsg_init(&base
, NULL
, &curthread
->td_msgport
, 0,
868 ifq_stage_detach_handler
);
869 base
.lmsg
.u
.ms_resultp
= ifq
;
871 for (cpu
= 0; cpu
< ncpus
; ++cpu
)
872 lwkt_domsg(netisr_cpuport(cpu
), &base
.lmsg
, 0);
875 struct netmsg_if_rtdel
{
876 struct netmsg_base base
;
881 if_rtdel_dispatch(netmsg_t msg
)
883 struct netmsg_if_rtdel
*rmsg
= (void *)msg
;
887 for (i
= 1; i
<= AF_MAX
; i
++) {
888 struct radix_node_head
*rnh
;
890 if ((rnh
= rt_tables
[cpu
][i
]) == NULL
)
892 rnh
->rnh_walktree(rnh
, if_rtdel
, rmsg
->ifp
);
897 lwkt_forwardmsg(netisr_cpuport(nextcpu
), &rmsg
->base
.lmsg
);
899 lwkt_replymsg(&rmsg
->base
.lmsg
, 0);
903 * Detach an interface, removing it from the
904 * list of "active" interfaces.
907 if_detach(struct ifnet
*ifp
)
909 struct ifnet_array
*old_ifnet_array
;
910 struct netmsg_if_rtdel msg
;
914 /* Announce that the interface is gone. */
915 EVENTHANDLER_INVOKE(ifnet_detach_event
, ifp
);
916 rt_ifannouncemsg(ifp
, IFAN_DEPARTURE
);
917 devctl_notify("IFNET", ifp
->if_xname
, "DETACH", NULL
);
920 * Remove this ifp from ifindex2inet, ifnet queue and ifnet
921 * array before it is whacked.
923 * Protect ifindex2ifnet, ifnet queue and ifnet array changes
924 * by ifnet lock, so that non-netisr threads could get a
930 * Remove this ifp from ifindex2ifnet and maybe decrement if_index.
932 ifindex2ifnet
[ifp
->if_index
] = NULL
;
933 while (if_index
> 0 && ifindex2ifnet
[if_index
] == NULL
)
937 * Remove this ifp from ifnet queue.
939 TAILQ_REMOVE(&ifnetlist
, ifp
, if_link
);
942 * Remove this ifp from ifnet array.
944 /* Free old ifnet array after sync all netisrs */
945 old_ifnet_array
= ifnet_array
;
946 ifnet_array
= ifnet_array_del(ifp
, old_ifnet_array
);
951 * Sync all netisrs so that the old ifnet array is no longer
952 * accessed and we can free it safely later on.
954 netmsg_service_sync();
955 ifnet_array_free(old_ifnet_array
);
958 * Remove routes and flush queues.
962 if (ifp
->if_flags
& IFF_NPOLLING
)
963 ifpoll_deregister(ifp
);
967 /* Decrease the mbuf clusters/jclusters limits increased by us */
968 if (ifp
->if_nmbclusters
> 0)
969 mcl_inclimit(-ifp
->if_nmbclusters
);
970 if (ifp
->if_nmbjclusters
> 0)
971 mjcl_inclimit(-ifp
->if_nmbjclusters
);
974 if (ifq_is_enabled(&ifp
->if_snd
))
975 altq_disable(&ifp
->if_snd
);
976 if (ifq_is_attached(&ifp
->if_snd
))
977 altq_detach(&ifp
->if_snd
);
981 * Clean up all addresses.
983 ifp
->if_lladdr
= NULL
;
985 if_purgeaddrs_nolink(ifp
);
986 if (!TAILQ_EMPTY(&ifp
->if_addrheads
[mycpuid
])) {
989 ifa
= TAILQ_FIRST(&ifp
->if_addrheads
[mycpuid
])->ifa
;
990 KASSERT(ifa
->ifa_addr
->sa_family
== AF_LINK
,
991 ("non-link ifaddr is left on if_addrheads"));
993 ifa_ifunlink(ifa
, ifp
);
995 KASSERT(TAILQ_EMPTY(&ifp
->if_addrheads
[mycpuid
]),
996 ("there are still ifaddrs left on if_addrheads"));
1001 * Remove all IPv4 kernel structures related to ifp.
1008 * Remove all IPv6 kernel structs related to ifp. This should be done
1009 * before removing routing entries below, since IPv6 interface direct
1010 * routes are expected to be removed by the IPv6-specific kernel API.
1011 * Otherwise, the kernel will detect some inconsistency and bark it.
1017 * Delete all remaining routes using this interface
1019 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
, MSGF_PRIORITY
,
1022 rt_domsg_global(&msg
.base
);
1024 SLIST_FOREACH(dp
, &domains
, dom_next
)
1025 if (dp
->dom_ifdetach
&& ifp
->if_afdata
[dp
->dom_family
])
1026 (*dp
->dom_ifdetach
)(ifp
,
1027 ifp
->if_afdata
[dp
->dom_family
]);
1029 kfree(ifp
->if_addrheads
, M_IFADDR
);
1031 lwkt_synchronize_ipiqs("if_detach");
1032 ifq_stage_detach(&ifp
->if_snd
);
1034 for (q
= 0; q
< ifp
->if_snd
.altq_subq_cnt
; ++q
) {
1035 struct ifaltq_subque
*ifsq
= &ifp
->if_snd
.altq_subq
[q
];
1037 kfree(ifsq
->ifsq_ifstart_nmsg
, M_LWKTMSG
);
1038 kfree(ifsq
->ifsq_stage
, M_DEVBUF
);
1040 kfree(ifp
->if_snd
.altq_subq
, M_DEVBUF
);
1042 kfree(ifp
->if_data_pcpu
, M_DEVBUF
);
1048 * Create interface group without members
1051 if_creategroup(const char *groupname
)
1053 struct ifg_group
*ifg
= NULL
;
1055 if ((ifg
= (struct ifg_group
*)kmalloc(sizeof(struct ifg_group
),
1056 M_TEMP
, M_NOWAIT
)) == NULL
)
1059 strlcpy(ifg
->ifg_group
, groupname
, sizeof(ifg
->ifg_group
));
1060 ifg
->ifg_refcnt
= 0;
1061 ifg
->ifg_carp_demoted
= 0;
1062 TAILQ_INIT(&ifg
->ifg_members
);
1064 pfi_attach_ifgroup(ifg
);
1066 TAILQ_INSERT_TAIL(&ifg_head
, ifg
, ifg_next
);
1072 * Add a group to an interface
1075 if_addgroup(struct ifnet
*ifp
, const char *groupname
)
1077 struct ifg_list
*ifgl
;
1078 struct ifg_group
*ifg
= NULL
;
1079 struct ifg_member
*ifgm
;
1081 if (groupname
[0] && groupname
[strlen(groupname
) - 1] >= '0' &&
1082 groupname
[strlen(groupname
) - 1] <= '9')
1085 TAILQ_FOREACH(ifgl
, &ifp
->if_groups
, ifgl_next
)
1086 if (!strcmp(ifgl
->ifgl_group
->ifg_group
, groupname
))
1089 if ((ifgl
= kmalloc(sizeof(*ifgl
), M_TEMP
, M_NOWAIT
)) == NULL
)
1092 if ((ifgm
= kmalloc(sizeof(*ifgm
), M_TEMP
, M_NOWAIT
)) == NULL
) {
1093 kfree(ifgl
, M_TEMP
);
1097 TAILQ_FOREACH(ifg
, &ifg_head
, ifg_next
)
1098 if (!strcmp(ifg
->ifg_group
, groupname
))
1101 if (ifg
== NULL
&& (ifg
= if_creategroup(groupname
)) == NULL
) {
1102 kfree(ifgl
, M_TEMP
);
1103 kfree(ifgm
, M_TEMP
);
1108 ifgl
->ifgl_group
= ifg
;
1109 ifgm
->ifgm_ifp
= ifp
;
1111 TAILQ_INSERT_TAIL(&ifg
->ifg_members
, ifgm
, ifgm_next
);
1112 TAILQ_INSERT_TAIL(&ifp
->if_groups
, ifgl
, ifgl_next
);
1115 pfi_group_change(groupname
);
1122 * Remove a group from an interface
1125 if_delgroup(struct ifnet
*ifp
, const char *groupname
)
1127 struct ifg_list
*ifgl
;
1128 struct ifg_member
*ifgm
;
1130 TAILQ_FOREACH(ifgl
, &ifp
->if_groups
, ifgl_next
)
1131 if (!strcmp(ifgl
->ifgl_group
->ifg_group
, groupname
))
1136 TAILQ_REMOVE(&ifp
->if_groups
, ifgl
, ifgl_next
);
1138 TAILQ_FOREACH(ifgm
, &ifgl
->ifgl_group
->ifg_members
, ifgm_next
)
1139 if (ifgm
->ifgm_ifp
== ifp
)
1143 TAILQ_REMOVE(&ifgl
->ifgl_group
->ifg_members
, ifgm
, ifgm_next
);
1144 kfree(ifgm
, M_TEMP
);
1147 if (--ifgl
->ifgl_group
->ifg_refcnt
== 0) {
1148 TAILQ_REMOVE(&ifg_head
, ifgl
->ifgl_group
, ifg_next
);
1150 pfi_detach_ifgroup(ifgl
->ifgl_group
);
1152 kfree(ifgl
->ifgl_group
, M_TEMP
);
1155 kfree(ifgl
, M_TEMP
);
1158 pfi_group_change(groupname
);
1165 * Stores all groups from an interface in memory pointed
1169 if_getgroup(caddr_t data
, struct ifnet
*ifp
)
1172 struct ifg_list
*ifgl
;
1173 struct ifg_req ifgrq
, *ifgp
;
1174 struct ifgroupreq
*ifgr
= (struct ifgroupreq
*)data
;
1176 if (ifgr
->ifgr_len
== 0) {
1177 TAILQ_FOREACH(ifgl
, &ifp
->if_groups
, ifgl_next
)
1178 ifgr
->ifgr_len
+= sizeof(struct ifg_req
);
1182 len
= ifgr
->ifgr_len
;
1183 ifgp
= ifgr
->ifgr_groups
;
1184 TAILQ_FOREACH(ifgl
, &ifp
->if_groups
, ifgl_next
) {
1185 if (len
< sizeof(ifgrq
))
1187 bzero(&ifgrq
, sizeof ifgrq
);
1188 strlcpy(ifgrq
.ifgrq_group
, ifgl
->ifgl_group
->ifg_group
,
1189 sizeof(ifgrq
.ifgrq_group
));
1190 if ((error
= copyout((caddr_t
)&ifgrq
, (caddr_t
)ifgp
,
1191 sizeof(struct ifg_req
))))
1193 len
-= sizeof(ifgrq
);
1201 * Stores all members of a group in memory pointed to by data
1204 if_getgroupmembers(caddr_t data
)
1206 struct ifgroupreq
*ifgr
= (struct ifgroupreq
*)data
;
1207 struct ifg_group
*ifg
;
1208 struct ifg_member
*ifgm
;
1209 struct ifg_req ifgrq
, *ifgp
;
1212 TAILQ_FOREACH(ifg
, &ifg_head
, ifg_next
)
1213 if (!strcmp(ifg
->ifg_group
, ifgr
->ifgr_name
))
1218 if (ifgr
->ifgr_len
== 0) {
1219 TAILQ_FOREACH(ifgm
, &ifg
->ifg_members
, ifgm_next
)
1220 ifgr
->ifgr_len
+= sizeof(ifgrq
);
1224 len
= ifgr
->ifgr_len
;
1225 ifgp
= ifgr
->ifgr_groups
;
1226 TAILQ_FOREACH(ifgm
, &ifg
->ifg_members
, ifgm_next
) {
1227 if (len
< sizeof(ifgrq
))
1229 bzero(&ifgrq
, sizeof ifgrq
);
1230 strlcpy(ifgrq
.ifgrq_member
, ifgm
->ifgm_ifp
->if_xname
,
1231 sizeof(ifgrq
.ifgrq_member
));
1232 if ((error
= copyout((caddr_t
)&ifgrq
, (caddr_t
)ifgp
,
1233 sizeof(struct ifg_req
))))
1235 len
-= sizeof(ifgrq
);
1243 * Delete Routes for a Network Interface
1245 * Called for each routing entry via the rnh->rnh_walktree() call above
1246 * to delete all route entries referencing a detaching network interface.
1249 * rn pointer to node in the routing table
1250 * arg argument passed to rnh->rnh_walktree() - detaching interface
1254 * errno failed - reason indicated
1258 if_rtdel(struct radix_node
*rn
, void *arg
)
1260 struct rtentry
*rt
= (struct rtentry
*)rn
;
1261 struct ifnet
*ifp
= arg
;
1264 if (rt
->rt_ifp
== ifp
) {
1267 * Protect (sorta) against walktree recursion problems
1268 * with cloned routes
1270 if (!(rt
->rt_flags
& RTF_UP
))
1273 err
= rtrequest(RTM_DELETE
, rt_key(rt
), rt
->rt_gateway
,
1274 rt_mask(rt
), rt
->rt_flags
,
1277 log(LOG_WARNING
, "if_rtdel: error %d\n", err
);
1284 static __inline boolean_t
1285 ifa_prefer(const struct ifaddr
*cur_ifa
, const struct ifaddr
*old_ifa
)
1287 if (old_ifa
== NULL
)
1290 if ((old_ifa
->ifa_ifp
->if_flags
& IFF_UP
) == 0 &&
1291 (cur_ifa
->ifa_ifp
->if_flags
& IFF_UP
))
1293 if ((old_ifa
->ifa_flags
& IFA_ROUTE
) == 0 &&
1294 (cur_ifa
->ifa_flags
& IFA_ROUTE
))
1300 * Locate an interface based on a complete address.
1303 ifa_ifwithaddr(struct sockaddr
*addr
)
1305 const struct ifnet_array
*arr
;
1308 arr
= ifnet_array_get();
1309 for (i
= 0; i
< arr
->ifnet_count
; ++i
) {
1310 struct ifnet
*ifp
= arr
->ifnet_arr
[i
];
1311 struct ifaddr_container
*ifac
;
1313 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
1314 struct ifaddr
*ifa
= ifac
->ifa
;
1316 if (ifa
->ifa_addr
->sa_family
!= addr
->sa_family
)
1318 if (sa_equal(addr
, ifa
->ifa_addr
))
1320 if ((ifp
->if_flags
& IFF_BROADCAST
) &&
1321 ifa
->ifa_broadaddr
&&
1322 /* IPv6 doesn't have broadcast */
1323 ifa
->ifa_broadaddr
->sa_len
!= 0 &&
1324 sa_equal(ifa
->ifa_broadaddr
, addr
))
1332 * Locate the point to point interface with a given destination address.
1335 ifa_ifwithdstaddr(struct sockaddr
*addr
)
1337 const struct ifnet_array
*arr
;
1340 arr
= ifnet_array_get();
1341 for (i
= 0; i
< arr
->ifnet_count
; ++i
) {
1342 struct ifnet
*ifp
= arr
->ifnet_arr
[i
];
1343 struct ifaddr_container
*ifac
;
1345 if (!(ifp
->if_flags
& IFF_POINTOPOINT
))
1348 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
1349 struct ifaddr
*ifa
= ifac
->ifa
;
1351 if (ifa
->ifa_addr
->sa_family
!= addr
->sa_family
)
1353 if (ifa
->ifa_dstaddr
&&
1354 sa_equal(addr
, ifa
->ifa_dstaddr
))
1362 * Find an interface on a specific network. If many, choice
1363 * is most specific found.
1366 ifa_ifwithnet(struct sockaddr
*addr
)
1368 struct ifaddr
*ifa_maybe
= NULL
;
1369 u_int af
= addr
->sa_family
;
1370 char *addr_data
= addr
->sa_data
, *cplim
;
1371 const struct ifnet_array
*arr
;
1375 * AF_LINK addresses can be looked up directly by their index number,
1376 * so do that if we can.
1378 if (af
== AF_LINK
) {
1379 struct sockaddr_dl
*sdl
= (struct sockaddr_dl
*)addr
;
1381 if (sdl
->sdl_index
&& sdl
->sdl_index
<= if_index
)
1382 return (ifindex2ifnet
[sdl
->sdl_index
]->if_lladdr
);
1386 * Scan though each interface, looking for ones that have
1387 * addresses in this address family.
1389 arr
= ifnet_array_get();
1390 for (i
= 0; i
< arr
->ifnet_count
; ++i
) {
1391 struct ifnet
*ifp
= arr
->ifnet_arr
[i
];
1392 struct ifaddr_container
*ifac
;
1394 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
1395 struct ifaddr
*ifa
= ifac
->ifa
;
1396 char *cp
, *cp2
, *cp3
;
1398 if (ifa
->ifa_addr
->sa_family
!= af
)
1400 if (af
== AF_INET
&& ifp
->if_flags
& IFF_POINTOPOINT
) {
1402 * This is a bit broken as it doesn't
1403 * take into account that the remote end may
1404 * be a single node in the network we are
1406 * The trouble is that we don't know the
1407 * netmask for the remote end.
1409 if (ifa
->ifa_dstaddr
!= NULL
&&
1410 sa_equal(addr
, ifa
->ifa_dstaddr
))
1414 * if we have a special address handler,
1415 * then use it instead of the generic one.
1417 if (ifa
->ifa_claim_addr
) {
1418 if ((*ifa
->ifa_claim_addr
)(ifa
, addr
)) {
1426 * Scan all the bits in the ifa's address.
1427 * If a bit dissagrees with what we are
1428 * looking for, mask it with the netmask
1429 * to see if it really matters.
1430 * (A byte at a time)
1432 if (ifa
->ifa_netmask
== 0)
1435 cp2
= ifa
->ifa_addr
->sa_data
;
1436 cp3
= ifa
->ifa_netmask
->sa_data
;
1437 cplim
= ifa
->ifa_netmask
->sa_len
+
1438 (char *)ifa
->ifa_netmask
;
1440 if ((*cp
++ ^ *cp2
++) & *cp3
++)
1441 goto next
; /* next address! */
1443 * If the netmask of what we just found
1444 * is more specific than what we had before
1445 * (if we had one) then remember the new one
1446 * before continuing to search for an even
1447 * better one. If the netmasks are equal,
1448 * we prefer the this ifa based on the result
1451 if (ifa_maybe
== NULL
||
1452 rn_refines((char *)ifa
->ifa_netmask
,
1453 (char *)ifa_maybe
->ifa_netmask
) ||
1454 (sa_equal(ifa_maybe
->ifa_netmask
,
1455 ifa
->ifa_netmask
) &&
1456 ifa_prefer(ifa
, ifa_maybe
)))
1465 * Find an interface address specific to an interface best matching
1469 ifaof_ifpforaddr(struct sockaddr
*addr
, struct ifnet
*ifp
)
1471 struct ifaddr_container
*ifac
;
1472 char *cp
, *cp2
, *cp3
;
1474 struct ifaddr
*ifa_maybe
= NULL
;
1475 u_int af
= addr
->sa_family
;
1479 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
1480 struct ifaddr
*ifa
= ifac
->ifa
;
1482 if (ifa
->ifa_addr
->sa_family
!= af
)
1484 if (ifa_maybe
== NULL
)
1486 if (ifa
->ifa_netmask
== NULL
) {
1487 if (sa_equal(addr
, ifa
->ifa_addr
) ||
1488 (ifa
->ifa_dstaddr
!= NULL
&&
1489 sa_equal(addr
, ifa
->ifa_dstaddr
)))
1493 if (ifp
->if_flags
& IFF_POINTOPOINT
) {
1494 if (sa_equal(addr
, ifa
->ifa_dstaddr
))
1498 cp2
= ifa
->ifa_addr
->sa_data
;
1499 cp3
= ifa
->ifa_netmask
->sa_data
;
1500 cplim
= ifa
->ifa_netmask
->sa_len
+ (char *)ifa
->ifa_netmask
;
1501 for (; cp3
< cplim
; cp3
++)
1502 if ((*cp
++ ^ *cp2
++) & *cp3
)
1512 * Default action when installing a route with a Link Level gateway.
1513 * Lookup an appropriate real ifa to point to.
1514 * This should be moved to /sys/net/link.c eventually.
1517 link_rtrequest(int cmd
, struct rtentry
*rt
)
1520 struct sockaddr
*dst
;
1523 if (cmd
!= RTM_ADD
|| (ifa
= rt
->rt_ifa
) == NULL
||
1524 (ifp
= ifa
->ifa_ifp
) == NULL
|| (dst
= rt_key(rt
)) == NULL
)
1526 ifa
= ifaof_ifpforaddr(dst
, ifp
);
1528 IFAFREE(rt
->rt_ifa
);
1531 if (ifa
->ifa_rtrequest
&& ifa
->ifa_rtrequest
!= link_rtrequest
)
1532 ifa
->ifa_rtrequest(cmd
, rt
);
1536 struct netmsg_ifroute
{
1537 struct netmsg_base base
;
1544 * Mark an interface down and notify protocols of the transition.
1547 if_unroute_dispatch(netmsg_t nmsg
)
1549 struct netmsg_ifroute
*msg
= (struct netmsg_ifroute
*)nmsg
;
1550 struct ifnet
*ifp
= msg
->ifp
;
1551 int flag
= msg
->flag
, fam
= msg
->fam
;
1552 struct ifaddr_container
*ifac
;
1554 ifp
->if_flags
&= ~flag
;
1555 getmicrotime(&ifp
->if_lastchange
);
1557 * The ifaddr processing in the following loop will block,
1558 * however, this function is called in netisr0, in which
1559 * ifaddr list changes happen, so we don't care about the
1560 * blockness of the ifaddr processing here.
1562 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
1563 struct ifaddr
*ifa
= ifac
->ifa
;
1566 if (ifa
->ifa_addr
->sa_family
== AF_UNSPEC
)
1569 if (fam
== PF_UNSPEC
|| (fam
== ifa
->ifa_addr
->sa_family
))
1570 kpfctlinput(PRC_IFDOWN
, ifa
->ifa_addr
);
1572 ifq_purge_all(&ifp
->if_snd
);
1575 lwkt_replymsg(&nmsg
->lmsg
, 0);
1579 if_unroute(struct ifnet
*ifp
, int flag
, int fam
)
1581 struct netmsg_ifroute msg
;
1583 ASSERT_CANDOMSG_NETISR0(curthread
);
1585 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
, 0,
1586 if_unroute_dispatch
);
1590 lwkt_domsg(netisr_cpuport(0), &msg
.base
.lmsg
, 0);
1594 * Mark an interface up and notify protocols of the transition.
1597 if_route_dispatch(netmsg_t nmsg
)
1599 struct netmsg_ifroute
*msg
= (struct netmsg_ifroute
*)nmsg
;
1600 struct ifnet
*ifp
= msg
->ifp
;
1601 int flag
= msg
->flag
, fam
= msg
->fam
;
1602 struct ifaddr_container
*ifac
;
1604 ifq_purge_all(&ifp
->if_snd
);
1605 ifp
->if_flags
|= flag
;
1606 getmicrotime(&ifp
->if_lastchange
);
1608 * The ifaddr processing in the following loop will block,
1609 * however, this function is called in netisr0, in which
1610 * ifaddr list changes happen, so we don't care about the
1611 * blockness of the ifaddr processing here.
1613 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
1614 struct ifaddr
*ifa
= ifac
->ifa
;
1617 if (ifa
->ifa_addr
->sa_family
== AF_UNSPEC
)
1620 if (fam
== PF_UNSPEC
|| (fam
== ifa
->ifa_addr
->sa_family
))
1621 kpfctlinput(PRC_IFUP
, ifa
->ifa_addr
);
1628 lwkt_replymsg(&nmsg
->lmsg
, 0);
1632 if_route(struct ifnet
*ifp
, int flag
, int fam
)
1634 struct netmsg_ifroute msg
;
1636 ASSERT_CANDOMSG_NETISR0(curthread
);
1638 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
, 0,
1643 lwkt_domsg(netisr_cpuport(0), &msg
.base
.lmsg
, 0);
1647 * Mark an interface down and notify protocols of the transition. An
1648 * interface going down is also considered to be a synchronizing event.
1649 * We must ensure that all packet processing related to the interface
1650 * has completed before we return so e.g. the caller can free the ifnet
1651 * structure that the mbufs may be referencing.
1653 * NOTE: must be called at splnet or eqivalent.
1656 if_down(struct ifnet
*ifp
)
1658 if_unroute(ifp
, IFF_UP
, AF_UNSPEC
);
1659 netmsg_service_sync();
1663 * Mark an interface up and notify protocols of
1665 * NOTE: must be called at splnet or eqivalent.
1668 if_up(struct ifnet
*ifp
)
1670 if_route(ifp
, IFF_UP
, AF_UNSPEC
);
1674 * Process a link state change.
1675 * NOTE: must be called at splsoftnet or equivalent.
1678 if_link_state_change(struct ifnet
*ifp
)
1680 int link_state
= ifp
->if_link_state
;
1683 devctl_notify("IFNET", ifp
->if_xname
,
1684 (link_state
== LINK_STATE_UP
) ? "LINK_UP" : "LINK_DOWN", NULL
);
1688 * Handle interface watchdog timer routines. Called
1689 * from softclock, we decrement timers (if set) and
1690 * call the appropriate interface routine on expiration.
1693 if_slowtimo_dispatch(netmsg_t nmsg
)
1695 struct globaldata
*gd
= mycpu
;
1696 const struct ifnet_array
*arr
;
1699 ASSERT_IN_NETISR(0);
1702 lwkt_replymsg(&nmsg
->lmsg
, 0); /* reply ASAP */
1705 arr
= ifnet_array_get();
1706 for (i
= 0; i
< arr
->ifnet_count
; ++i
) {
1707 struct ifnet
*ifp
= arr
->ifnet_arr
[i
];
1711 if (if_stats_compat
) {
1712 IFNET_STAT_GET(ifp
, ipackets
, ifp
->if_ipackets
);
1713 IFNET_STAT_GET(ifp
, ierrors
, ifp
->if_ierrors
);
1714 IFNET_STAT_GET(ifp
, opackets
, ifp
->if_opackets
);
1715 IFNET_STAT_GET(ifp
, oerrors
, ifp
->if_oerrors
);
1716 IFNET_STAT_GET(ifp
, collisions
, ifp
->if_collisions
);
1717 IFNET_STAT_GET(ifp
, ibytes
, ifp
->if_ibytes
);
1718 IFNET_STAT_GET(ifp
, obytes
, ifp
->if_obytes
);
1719 IFNET_STAT_GET(ifp
, imcasts
, ifp
->if_imcasts
);
1720 IFNET_STAT_GET(ifp
, omcasts
, ifp
->if_omcasts
);
1721 IFNET_STAT_GET(ifp
, iqdrops
, ifp
->if_iqdrops
);
1722 IFNET_STAT_GET(ifp
, noproto
, ifp
->if_noproto
);
1723 IFNET_STAT_GET(ifp
, oqdrops
, ifp
->if_oqdrops
);
1726 if (ifp
->if_timer
== 0 || --ifp
->if_timer
) {
1730 if (ifp
->if_watchdog
) {
1731 if (ifnet_tryserialize_all(ifp
)) {
1732 (*ifp
->if_watchdog
)(ifp
);
1733 ifnet_deserialize_all(ifp
);
1735 /* try again next timeout */
1743 callout_reset(&if_slowtimo_timer
, hz
/ IFNET_SLOWHZ
, if_slowtimo
, NULL
);
1747 if_slowtimo(void *arg __unused
)
1749 struct lwkt_msg
*lmsg
= &if_slowtimo_netmsg
.lmsg
;
1751 KASSERT(mycpuid
== 0, ("not on cpu0"));
1753 if (lmsg
->ms_flags
& MSGF_DONE
)
1754 lwkt_sendmsg_oncpu(netisr_cpuport(0), lmsg
);
1759 * Map interface name to
1760 * interface structure pointer.
1763 ifunit(const char *name
)
1768 * Search all the interfaces for this name/number
1770 KASSERT(mtx_owned(&ifnet_mtx
), ("ifnet is not locked"));
1772 TAILQ_FOREACH(ifp
, &ifnetlist
, if_link
) {
1773 if (strncmp(ifp
->if_xname
, name
, IFNAMSIZ
) == 0)
1780 ifunit_netisr(const char *name
)
1782 const struct ifnet_array
*arr
;
1786 * Search all the interfaces for this name/number
1789 arr
= ifnet_array_get();
1790 for (i
= 0; i
< arr
->ifnet_count
; ++i
) {
1791 struct ifnet
*ifp
= arr
->ifnet_arr
[i
];
1793 if (strncmp(ifp
->if_xname
, name
, IFNAMSIZ
) == 0)
1803 ifioctl(struct socket
*so
, u_long cmd
, caddr_t data
, struct ucred
*cred
)
1808 int error
, do_ifup
= 0;
1811 size_t namelen
, onamelen
;
1812 char new_name
[IFNAMSIZ
];
1814 struct sockaddr_dl
*sdl
;
1819 return (ifconf(cmd
, data
, cred
));
1824 ifr
= (struct ifreq
*)data
;
1829 if ((error
= priv_check_cred(cred
, PRIV_ROOT
, 0)) != 0)
1831 return (if_clone_create(ifr
->ifr_name
, sizeof(ifr
->ifr_name
),
1832 cmd
== SIOCIFCREATE2
? ifr
->ifr_data
: NULL
));
1834 if ((error
= priv_check_cred(cred
, PRIV_ROOT
, 0)) != 0)
1836 return (if_clone_destroy(ifr
->ifr_name
));
1837 case SIOCIFGCLONERS
:
1838 return (if_clone_list((struct if_clonereq
*)data
));
1844 * Nominal ioctl through interface, lookup the ifp and obtain a
1845 * lock to serialize the ifconfig ioctl operation.
1849 ifp
= ifunit(ifr
->ifr_name
);
1858 ifr
->ifr_index
= ifp
->if_index
;
1862 ifr
->ifr_flags
= ifp
->if_flags
;
1863 ifr
->ifr_flagshigh
= ifp
->if_flags
>> 16;
1867 ifr
->ifr_reqcap
= ifp
->if_capabilities
;
1868 ifr
->ifr_curcap
= ifp
->if_capenable
;
1872 ifr
->ifr_metric
= ifp
->if_metric
;
1876 ifr
->ifr_mtu
= ifp
->if_mtu
;
1880 ifr
->ifr_tsolen
= ifp
->if_tsolen
;
1884 error
= copyout((caddr_t
)&ifp
->if_data
, ifr
->ifr_data
,
1885 sizeof(ifp
->if_data
));
1889 ifr
->ifr_phys
= ifp
->if_physical
;
1892 case SIOCGIFPOLLCPU
:
1893 ifr
->ifr_pollcpu
= -1;
1896 case SIOCSIFPOLLCPU
:
1900 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1903 new_flags
= (ifr
->ifr_flags
& 0xffff) |
1904 (ifr
->ifr_flagshigh
<< 16);
1905 if (ifp
->if_flags
& IFF_SMART
) {
1906 /* Smart drivers twiddle their own routes */
1907 } else if (ifp
->if_flags
& IFF_UP
&&
1908 (new_flags
& IFF_UP
) == 0) {
1910 } else if (new_flags
& IFF_UP
&&
1911 (ifp
->if_flags
& IFF_UP
) == 0) {
1915 #ifdef IFPOLL_ENABLE
1916 if ((new_flags
^ ifp
->if_flags
) & IFF_NPOLLING
) {
1917 if (new_flags
& IFF_NPOLLING
)
1918 ifpoll_register(ifp
);
1920 ifpoll_deregister(ifp
);
1924 ifp
->if_flags
= (ifp
->if_flags
& IFF_CANTCHANGE
) |
1925 (new_flags
&~ IFF_CANTCHANGE
);
1926 if (new_flags
& IFF_PPROMISC
) {
1927 /* Permanently promiscuous mode requested */
1928 ifp
->if_flags
|= IFF_PROMISC
;
1929 } else if (ifp
->if_pcount
== 0) {
1930 ifp
->if_flags
&= ~IFF_PROMISC
;
1932 if (ifp
->if_ioctl
) {
1933 ifnet_serialize_all(ifp
);
1934 ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
1935 ifnet_deserialize_all(ifp
);
1939 getmicrotime(&ifp
->if_lastchange
);
1943 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1946 if (ifr
->ifr_reqcap
& ~ifp
->if_capabilities
) {
1950 ifnet_serialize_all(ifp
);
1951 ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
1952 ifnet_deserialize_all(ifp
);
1956 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1959 error
= copyinstr(ifr
->ifr_data
, new_name
, IFNAMSIZ
, NULL
);
1962 if (new_name
[0] == '\0') {
1966 if (ifunit(new_name
) != NULL
) {
1971 EVENTHANDLER_INVOKE(ifnet_detach_event
, ifp
);
1973 /* Announce the departure of the interface. */
1974 rt_ifannouncemsg(ifp
, IFAN_DEPARTURE
);
1976 strlcpy(ifp
->if_xname
, new_name
, sizeof(ifp
->if_xname
));
1977 ifa
= TAILQ_FIRST(&ifp
->if_addrheads
[mycpuid
])->ifa
;
1978 sdl
= (struct sockaddr_dl
*)ifa
->ifa_addr
;
1979 namelen
= strlen(new_name
);
1980 onamelen
= sdl
->sdl_nlen
;
1982 * Move the address if needed. This is safe because we
1983 * allocate space for a name of length IFNAMSIZ when we
1984 * create this in if_attach().
1986 if (namelen
!= onamelen
) {
1987 bcopy(sdl
->sdl_data
+ onamelen
,
1988 sdl
->sdl_data
+ namelen
, sdl
->sdl_alen
);
1990 bcopy(new_name
, sdl
->sdl_data
, namelen
);
1991 sdl
->sdl_nlen
= namelen
;
1992 sdl
= (struct sockaddr_dl
*)ifa
->ifa_netmask
;
1993 bzero(sdl
->sdl_data
, onamelen
);
1994 while (namelen
!= 0)
1995 sdl
->sdl_data
[--namelen
] = 0xff;
1997 EVENTHANDLER_INVOKE(ifnet_attach_event
, ifp
);
1999 /* Announce the return of the interface. */
2000 rt_ifannouncemsg(ifp
, IFAN_ARRIVAL
);
2004 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
2007 ifp
->if_metric
= ifr
->ifr_metric
;
2008 getmicrotime(&ifp
->if_lastchange
);
2012 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
2015 if (ifp
->if_ioctl
== NULL
) {
2019 ifnet_serialize_all(ifp
);
2020 error
= ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
2021 ifnet_deserialize_all(ifp
);
2023 getmicrotime(&ifp
->if_lastchange
);
2028 u_long oldmtu
= ifp
->if_mtu
;
2030 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
2033 if (ifp
->if_ioctl
== NULL
) {
2037 if (ifr
->ifr_mtu
< IF_MINMTU
|| ifr
->ifr_mtu
> IF_MAXMTU
) {
2041 ifnet_serialize_all(ifp
);
2042 error
= ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
2043 ifnet_deserialize_all(ifp
);
2045 getmicrotime(&ifp
->if_lastchange
);
2049 * If the link MTU changed, do network layer specific procedure.
2051 if (ifp
->if_mtu
!= oldmtu
) {
2060 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
2064 /* XXX need driver supplied upper limit */
2065 if (ifr
->ifr_tsolen
<= 0) {
2069 ifp
->if_tsolen
= ifr
->ifr_tsolen
;
2074 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
2078 /* Don't allow group membership on non-multicast interfaces. */
2079 if ((ifp
->if_flags
& IFF_MULTICAST
) == 0) {
2084 /* Don't let users screw up protocols' entries. */
2085 if (ifr
->ifr_addr
.sa_family
!= AF_LINK
) {
2090 if (cmd
== SIOCADDMULTI
) {
2091 struct ifmultiaddr
*ifma
;
2092 error
= if_addmulti(ifp
, &ifr
->ifr_addr
, &ifma
);
2094 error
= if_delmulti(ifp
, &ifr
->ifr_addr
);
2097 getmicrotime(&ifp
->if_lastchange
);
2100 case SIOCSIFPHYADDR
:
2101 case SIOCDIFPHYADDR
:
2103 case SIOCSIFPHYADDR_IN6
:
2105 case SIOCSLIFPHYADDR
:
2107 case SIOCSIFGENERIC
:
2108 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
2111 if (ifp
->if_ioctl
== 0) {
2115 ifnet_serialize_all(ifp
);
2116 error
= ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
2117 ifnet_deserialize_all(ifp
);
2119 getmicrotime(&ifp
->if_lastchange
);
2123 ifs
= (struct ifstat
*)data
;
2124 ifs
->ascii
[0] = '\0';
2126 case SIOCGIFPSRCADDR
:
2127 case SIOCGIFPDSTADDR
:
2128 case SIOCGLIFPHYADDR
:
2130 case SIOCGIFGENERIC
:
2131 if (ifp
->if_ioctl
== NULL
) {
2135 ifnet_serialize_all(ifp
);
2136 error
= ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
2137 ifnet_deserialize_all(ifp
);
2141 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
2144 error
= if_setlladdr(ifp
, ifr
->ifr_addr
.sa_data
,
2145 ifr
->ifr_addr
.sa_len
);
2146 EVENTHANDLER_INVOKE(iflladdr_event
, ifp
);
2150 oif_flags
= ifp
->if_flags
;
2151 if (so
->so_proto
== 0) {
2155 error
= so_pru_control_direct(so
, cmd
, data
, ifp
);
2157 if ((oif_flags
^ ifp
->if_flags
) & IFF_UP
) {
2159 DELAY(100);/* XXX: temporary workaround for fxp issue*/
2160 if (ifp
->if_flags
& IFF_UP
) {
2175 * Set/clear promiscuous mode on interface ifp based on the truth value
2176 * of pswitch. The calls are reference counted so that only the first
2177 * "on" request actually has an effect, as does the final "off" request.
2178 * Results are undefined if the "off" and "on" requests are not matched.
2181 ifpromisc(struct ifnet
*ifp
, int pswitch
)
2187 oldflags
= ifp
->if_flags
;
2188 if (ifp
->if_flags
& IFF_PPROMISC
) {
2189 /* Do nothing if device is in permanently promiscuous mode */
2190 ifp
->if_pcount
+= pswitch
? 1 : -1;
2195 * If the device is not configured up, we cannot put it in
2198 if ((ifp
->if_flags
& IFF_UP
) == 0)
2200 if (ifp
->if_pcount
++ != 0)
2202 ifp
->if_flags
|= IFF_PROMISC
;
2203 log(LOG_INFO
, "%s: promiscuous mode enabled\n",
2206 if (--ifp
->if_pcount
> 0)
2208 ifp
->if_flags
&= ~IFF_PROMISC
;
2209 log(LOG_INFO
, "%s: promiscuous mode disabled\n",
2212 ifr
.ifr_flags
= ifp
->if_flags
;
2213 ifr
.ifr_flagshigh
= ifp
->if_flags
>> 16;
2214 ifnet_serialize_all(ifp
);
2215 error
= ifp
->if_ioctl(ifp
, SIOCSIFFLAGS
, (caddr_t
)&ifr
, NULL
);
2216 ifnet_deserialize_all(ifp
);
2220 ifp
->if_flags
= oldflags
;
2225 * Return interface configuration
2226 * of system. List may be used
2227 * in later ioctl's (above) to get
2228 * other information.
2231 ifconf(u_long cmd
, caddr_t data
, struct ucred
*cred
)
2233 struct ifconf
*ifc
= (struct ifconf
*)data
;
2235 struct sockaddr
*sa
;
2236 struct ifreq ifr
, *ifrp
;
2237 int space
= ifc
->ifc_len
, error
= 0;
2239 ifrp
= ifc
->ifc_req
;
2242 TAILQ_FOREACH(ifp
, &ifnetlist
, if_link
) {
2243 struct ifaddr_container
*ifac
, *ifac_mark
;
2244 struct ifaddr_marker mark
;
2245 struct ifaddrhead
*head
;
2248 if (space
<= sizeof ifr
)
2252 * Zero the stack declared structure first to prevent
2253 * memory disclosure.
2255 bzero(&ifr
, sizeof(ifr
));
2256 if (strlcpy(ifr
.ifr_name
, ifp
->if_xname
, sizeof(ifr
.ifr_name
))
2257 >= sizeof(ifr
.ifr_name
)) {
2258 error
= ENAMETOOLONG
;
2263 * Add a marker, since copyout() could block and during that
2264 * period the list could be changed. Inserting the marker to
2265 * the header of the list will not cause trouble for the code
2266 * assuming that the first element of the list is AF_LINK; the
2267 * marker will be moved to the next position w/o blocking.
2269 ifa_marker_init(&mark
, ifp
);
2270 ifac_mark
= &mark
.ifac
;
2271 head
= &ifp
->if_addrheads
[mycpuid
];
2274 TAILQ_INSERT_HEAD(head
, ifac_mark
, ifa_link
);
2275 while ((ifac
= TAILQ_NEXT(ifac_mark
, ifa_link
)) != NULL
) {
2276 struct ifaddr
*ifa
= ifac
->ifa
;
2278 TAILQ_REMOVE(head
, ifac_mark
, ifa_link
);
2279 TAILQ_INSERT_AFTER(head
, ifac
, ifac_mark
, ifa_link
);
2282 if (ifa
->ifa_addr
->sa_family
== AF_UNSPEC
)
2285 if (space
<= sizeof ifr
)
2288 if (cred
->cr_prison
&&
2289 prison_if(cred
, sa
))
2293 * Keep a reference on this ifaddr, so that it will
2294 * not be destroyed when its address is copied to
2295 * the userland, which could block.
2298 if (sa
->sa_len
<= sizeof(*sa
)) {
2300 error
= copyout(&ifr
, ifrp
, sizeof ifr
);
2303 if (space
< (sizeof ifr
) + sa
->sa_len
-
2308 space
-= sa
->sa_len
- sizeof(*sa
);
2309 error
= copyout(&ifr
, ifrp
,
2310 sizeof ifr
.ifr_name
);
2312 error
= copyout(sa
, &ifrp
->ifr_addr
,
2314 ifrp
= (struct ifreq
*)
2315 (sa
->sa_len
+ (caddr_t
)&ifrp
->ifr_addr
);
2320 space
-= sizeof ifr
;
2322 TAILQ_REMOVE(head
, ifac_mark
, ifa_link
);
2326 bzero(&ifr
.ifr_addr
, sizeof ifr
.ifr_addr
);
2327 error
= copyout(&ifr
, ifrp
, sizeof ifr
);
2330 space
-= sizeof ifr
;
2336 ifc
->ifc_len
-= space
;
2341 * Just like if_promisc(), but for all-multicast-reception mode.
2344 if_allmulti(struct ifnet
*ifp
, int onswitch
)
2352 if (ifp
->if_amcount
++ == 0) {
2353 ifp
->if_flags
|= IFF_ALLMULTI
;
2354 ifr
.ifr_flags
= ifp
->if_flags
;
2355 ifr
.ifr_flagshigh
= ifp
->if_flags
>> 16;
2356 ifnet_serialize_all(ifp
);
2357 error
= ifp
->if_ioctl(ifp
, SIOCSIFFLAGS
, (caddr_t
)&ifr
,
2359 ifnet_deserialize_all(ifp
);
2362 if (ifp
->if_amcount
> 1) {
2365 ifp
->if_amcount
= 0;
2366 ifp
->if_flags
&= ~IFF_ALLMULTI
;
2367 ifr
.ifr_flags
= ifp
->if_flags
;
2368 ifr
.ifr_flagshigh
= ifp
->if_flags
>> 16;
2369 ifnet_serialize_all(ifp
);
2370 error
= ifp
->if_ioctl(ifp
, SIOCSIFFLAGS
, (caddr_t
)&ifr
,
2372 ifnet_deserialize_all(ifp
);
2384 * Add a multicast listenership to the interface in question.
2385 * The link layer provides a routine which converts
2388 if_addmulti_serialized(struct ifnet
*ifp
, struct sockaddr
*sa
,
2389 struct ifmultiaddr
**retifma
)
2391 struct sockaddr
*llsa
, *dupsa
;
2393 struct ifmultiaddr
*ifma
;
2395 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
2398 * If the matching multicast address already exists
2399 * then don't add a new one, just add a reference
2401 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
2402 if (sa_equal(sa
, ifma
->ifma_addr
)) {
2403 ifma
->ifma_refcount
++;
2411 * Give the link layer a chance to accept/reject it, and also
2412 * find out which AF_LINK address this maps to, if it isn't one
2415 if (ifp
->if_resolvemulti
) {
2416 error
= ifp
->if_resolvemulti(ifp
, &llsa
, sa
);
2423 ifma
= kmalloc(sizeof *ifma
, M_IFMADDR
, M_INTWAIT
);
2424 dupsa
= kmalloc(sa
->sa_len
, M_IFMADDR
, M_INTWAIT
);
2425 bcopy(sa
, dupsa
, sa
->sa_len
);
2427 ifma
->ifma_addr
= dupsa
;
2428 ifma
->ifma_lladdr
= llsa
;
2429 ifma
->ifma_ifp
= ifp
;
2430 ifma
->ifma_refcount
= 1;
2431 ifma
->ifma_protospec
= NULL
;
2432 rt_newmaddrmsg(RTM_NEWMADDR
, ifma
);
2434 TAILQ_INSERT_HEAD(&ifp
->if_multiaddrs
, ifma
, ifma_link
);
2439 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
2440 if (sa_equal(ifma
->ifma_addr
, llsa
))
2444 ifma
->ifma_refcount
++;
2446 ifma
= kmalloc(sizeof *ifma
, M_IFMADDR
, M_INTWAIT
);
2447 dupsa
= kmalloc(llsa
->sa_len
, M_IFMADDR
, M_INTWAIT
);
2448 bcopy(llsa
, dupsa
, llsa
->sa_len
);
2449 ifma
->ifma_addr
= dupsa
;
2450 ifma
->ifma_ifp
= ifp
;
2451 ifma
->ifma_refcount
= 1;
2452 TAILQ_INSERT_HEAD(&ifp
->if_multiaddrs
, ifma
, ifma_link
);
2456 * We are certain we have added something, so call down to the
2457 * interface to let them know about it.
2460 ifp
->if_ioctl(ifp
, SIOCADDMULTI
, 0, NULL
);
2466 if_addmulti(struct ifnet
*ifp
, struct sockaddr
*sa
,
2467 struct ifmultiaddr
**retifma
)
2471 ifnet_serialize_all(ifp
);
2472 error
= if_addmulti_serialized(ifp
, sa
, retifma
);
2473 ifnet_deserialize_all(ifp
);
2479 * Remove a reference to a multicast address on this interface. Yell
2480 * if the request does not match an existing membership.
2483 if_delmulti_serialized(struct ifnet
*ifp
, struct sockaddr
*sa
)
2485 struct ifmultiaddr
*ifma
;
2487 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
2489 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
)
2490 if (sa_equal(sa
, ifma
->ifma_addr
))
2495 if (ifma
->ifma_refcount
> 1) {
2496 ifma
->ifma_refcount
--;
2500 rt_newmaddrmsg(RTM_DELMADDR
, ifma
);
2501 sa
= ifma
->ifma_lladdr
;
2502 TAILQ_REMOVE(&ifp
->if_multiaddrs
, ifma
, ifma_link
);
2504 * Make sure the interface driver is notified
2505 * in the case of a link layer mcast group being left.
2507 if (ifma
->ifma_addr
->sa_family
== AF_LINK
&& sa
== NULL
)
2508 ifp
->if_ioctl(ifp
, SIOCDELMULTI
, 0, NULL
);
2509 kfree(ifma
->ifma_addr
, M_IFMADDR
);
2510 kfree(ifma
, M_IFMADDR
);
2515 * Now look for the link-layer address which corresponds to
2516 * this network address. It had been squirreled away in
2517 * ifma->ifma_lladdr for this purpose (so we don't have
2518 * to call ifp->if_resolvemulti() again), and we saved that
2519 * value in sa above. If some nasty deleted the
2520 * link-layer address out from underneath us, we can deal because
2521 * the address we stored was is not the same as the one which was
2522 * in the record for the link-layer address. (So we don't complain
2525 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
)
2526 if (sa_equal(sa
, ifma
->ifma_addr
))
2531 if (ifma
->ifma_refcount
> 1) {
2532 ifma
->ifma_refcount
--;
2536 TAILQ_REMOVE(&ifp
->if_multiaddrs
, ifma
, ifma_link
);
2537 ifp
->if_ioctl(ifp
, SIOCDELMULTI
, 0, NULL
);
2538 kfree(ifma
->ifma_addr
, M_IFMADDR
);
2539 kfree(sa
, M_IFMADDR
);
2540 kfree(ifma
, M_IFMADDR
);
2546 if_delmulti(struct ifnet
*ifp
, struct sockaddr
*sa
)
2550 ifnet_serialize_all(ifp
);
2551 error
= if_delmulti_serialized(ifp
, sa
);
2552 ifnet_deserialize_all(ifp
);
2558 * Delete all multicast group membership for an interface.
2559 * Should be used to quickly flush all multicast filters.
2562 if_delallmulti_serialized(struct ifnet
*ifp
)
2564 struct ifmultiaddr
*ifma
, mark
;
2567 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
2569 bzero(&sa
, sizeof(sa
));
2570 sa
.sa_family
= AF_UNSPEC
;
2571 sa
.sa_len
= sizeof(sa
);
2573 bzero(&mark
, sizeof(mark
));
2574 mark
.ifma_addr
= &sa
;
2576 TAILQ_INSERT_HEAD(&ifp
->if_multiaddrs
, &mark
, ifma_link
);
2577 while ((ifma
= TAILQ_NEXT(&mark
, ifma_link
)) != NULL
) {
2578 TAILQ_REMOVE(&ifp
->if_multiaddrs
, &mark
, ifma_link
);
2579 TAILQ_INSERT_AFTER(&ifp
->if_multiaddrs
, ifma
, &mark
,
2582 if (ifma
->ifma_addr
->sa_family
== AF_UNSPEC
)
2585 if_delmulti_serialized(ifp
, ifma
->ifma_addr
);
2587 TAILQ_REMOVE(&ifp
->if_multiaddrs
, &mark
, ifma_link
);
2592 * Set the link layer address on an interface.
2594 * At this time we only support certain types of interfaces,
2595 * and we don't allow the length of the address to change.
2598 if_setlladdr(struct ifnet
*ifp
, const u_char
*lladdr
, int len
)
2600 struct sockaddr_dl
*sdl
;
2603 sdl
= IF_LLSOCKADDR(ifp
);
2606 if (len
!= sdl
->sdl_alen
) /* don't allow length to change */
2608 switch (ifp
->if_type
) {
2609 case IFT_ETHER
: /* these types use struct arpcom */
2612 case IFT_IEEE8023ADLAG
:
2613 bcopy(lladdr
, ((struct arpcom
*)ifp
->if_softc
)->ac_enaddr
, len
);
2614 bcopy(lladdr
, LLADDR(sdl
), len
);
2620 * If the interface is already up, we need
2621 * to re-init it in order to reprogram its
2624 ifnet_serialize_all(ifp
);
2625 if ((ifp
->if_flags
& IFF_UP
) != 0) {
2627 struct ifaddr_container
*ifac
;
2630 ifp
->if_flags
&= ~IFF_UP
;
2631 ifr
.ifr_flags
= ifp
->if_flags
;
2632 ifr
.ifr_flagshigh
= ifp
->if_flags
>> 16;
2633 ifp
->if_ioctl(ifp
, SIOCSIFFLAGS
, (caddr_t
)&ifr
,
2635 ifp
->if_flags
|= IFF_UP
;
2636 ifr
.ifr_flags
= ifp
->if_flags
;
2637 ifr
.ifr_flagshigh
= ifp
->if_flags
>> 16;
2638 ifp
->if_ioctl(ifp
, SIOCSIFFLAGS
, (caddr_t
)&ifr
,
2642 * Also send gratuitous ARPs to notify other nodes about
2643 * the address change.
2645 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
2646 struct ifaddr
*ifa
= ifac
->ifa
;
2648 if (ifa
->ifa_addr
!= NULL
&&
2649 ifa
->ifa_addr
->sa_family
== AF_INET
)
2650 arp_gratuitous(ifp
, ifa
);
2654 ifnet_deserialize_all(ifp
);
2658 struct ifmultiaddr
*
2659 ifmaof_ifpforaddr(struct sockaddr
*sa
, struct ifnet
*ifp
)
2661 struct ifmultiaddr
*ifma
;
2663 /* TODO: need ifnet_serialize_main */
2664 ifnet_serialize_all(ifp
);
2665 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
)
2666 if (sa_equal(ifma
->ifma_addr
, sa
))
2668 ifnet_deserialize_all(ifp
);
2674 * This function locates the first real ethernet MAC from a network
2675 * card and loads it into node, returning 0 on success or ENOENT if
2676 * no suitable interfaces were found. It is used by the uuid code to
2677 * generate a unique 6-byte number.
2680 if_getanyethermac(uint16_t *node
, int minlen
)
2683 struct sockaddr_dl
*sdl
;
2686 TAILQ_FOREACH(ifp
, &ifnetlist
, if_link
) {
2687 if (ifp
->if_type
!= IFT_ETHER
)
2689 sdl
= IF_LLSOCKADDR(ifp
);
2690 if (sdl
->sdl_alen
< minlen
)
2692 bcopy(((struct arpcom
*)ifp
->if_softc
)->ac_enaddr
, node
,
2702 * The name argument must be a pointer to storage which will last as
2703 * long as the interface does. For physical devices, the result of
2704 * device_get_name(dev) is a good choice and for pseudo-devices a
2705 * static string works well.
2708 if_initname(struct ifnet
*ifp
, const char *name
, int unit
)
2710 ifp
->if_dname
= name
;
2711 ifp
->if_dunit
= unit
;
2712 if (unit
!= IF_DUNIT_NONE
)
2713 ksnprintf(ifp
->if_xname
, IFNAMSIZ
, "%s%d", name
, unit
);
2715 strlcpy(ifp
->if_xname
, name
, IFNAMSIZ
);
2719 if_printf(struct ifnet
*ifp
, const char *fmt
, ...)
2724 retval
= kprintf("%s: ", ifp
->if_xname
);
2725 __va_start(ap
, fmt
);
2726 retval
+= kvprintf(fmt
, ap
);
2732 if_alloc(uint8_t type
)
2738 * XXX temporary hack until arpcom is setup in if_l2com
2740 if (type
== IFT_ETHER
)
2741 size
= sizeof(struct arpcom
);
2743 size
= sizeof(struct ifnet
);
2745 ifp
= kmalloc(size
, M_IFNET
, M_WAITOK
|M_ZERO
);
2747 ifp
->if_type
= type
;
2749 if (if_com_alloc
[type
] != NULL
) {
2750 ifp
->if_l2com
= if_com_alloc
[type
](type
, ifp
);
2751 if (ifp
->if_l2com
== NULL
) {
2752 kfree(ifp
, M_IFNET
);
2760 if_free(struct ifnet
*ifp
)
2762 kfree(ifp
, M_IFNET
);
2766 ifq_set_classic(struct ifaltq
*ifq
)
2768 ifq_set_methods(ifq
, ifq
->altq_ifp
->if_mapsubq
,
2769 ifsq_classic_enqueue
, ifsq_classic_dequeue
, ifsq_classic_request
);
2773 ifq_set_methods(struct ifaltq
*ifq
, altq_mapsubq_t mapsubq
,
2774 ifsq_enqueue_t enqueue
, ifsq_dequeue_t dequeue
, ifsq_request_t request
)
2778 KASSERT(mapsubq
!= NULL
, ("mapsubq is not specified"));
2779 KASSERT(enqueue
!= NULL
, ("enqueue is not specified"));
2780 KASSERT(dequeue
!= NULL
, ("dequeue is not specified"));
2781 KASSERT(request
!= NULL
, ("request is not specified"));
2783 ifq
->altq_mapsubq
= mapsubq
;
2784 for (q
= 0; q
< ifq
->altq_subq_cnt
; ++q
) {
2785 struct ifaltq_subque
*ifsq
= &ifq
->altq_subq
[q
];
2787 ifsq
->ifsq_enqueue
= enqueue
;
2788 ifsq
->ifsq_dequeue
= dequeue
;
2789 ifsq
->ifsq_request
= request
;
2794 ifsq_norm_enqueue(struct ifaltq_subque
*ifsq
, struct mbuf
*m
)
2797 classq_add(&ifsq
->ifsq_norm
, m
);
2798 ALTQ_SQ_CNTR_INC(ifsq
, m
->m_pkthdr
.len
);
2802 ifsq_prio_enqueue(struct ifaltq_subque
*ifsq
, struct mbuf
*m
)
2805 classq_add(&ifsq
->ifsq_prio
, m
);
2806 ALTQ_SQ_CNTR_INC(ifsq
, m
->m_pkthdr
.len
);
2807 ALTQ_SQ_PRIO_CNTR_INC(ifsq
, m
->m_pkthdr
.len
);
2810 static struct mbuf
*
2811 ifsq_norm_dequeue(struct ifaltq_subque
*ifsq
)
2815 m
= classq_get(&ifsq
->ifsq_norm
);
2817 ALTQ_SQ_CNTR_DEC(ifsq
, m
->m_pkthdr
.len
);
2821 static struct mbuf
*
2822 ifsq_prio_dequeue(struct ifaltq_subque
*ifsq
)
2826 m
= classq_get(&ifsq
->ifsq_prio
);
2828 ALTQ_SQ_CNTR_DEC(ifsq
, m
->m_pkthdr
.len
);
2829 ALTQ_SQ_PRIO_CNTR_DEC(ifsq
, m
->m_pkthdr
.len
);
2835 ifsq_classic_enqueue(struct ifaltq_subque
*ifsq
, struct mbuf
*m
,
2836 struct altq_pktattr
*pa __unused
)
2841 if (ifsq
->ifsq_len
>= ifsq
->ifsq_maxlen
||
2842 ifsq
->ifsq_bcnt
>= ifsq
->ifsq_maxbcnt
) {
2843 struct mbuf
*m_drop
;
2845 if (m
->m_flags
& M_PRIO
) {
2847 if (ifsq
->ifsq_prio_len
< (ifsq
->ifsq_maxlen
>> 1) &&
2848 ifsq
->ifsq_prio_bcnt
< (ifsq
->ifsq_maxbcnt
>> 1)) {
2849 /* Try dropping some from normal queue. */
2850 m_drop
= ifsq_norm_dequeue(ifsq
);
2853 m_drop
= ifsq_prio_dequeue(ifsq
);
2855 m_drop
= ifsq_norm_dequeue(ifsq
);
2857 if (m_drop
!= NULL
) {
2858 IFNET_STAT_INC(ifsq
->ifsq_ifp
, oqdrops
, 1);
2863 * No old packets could be dropped!
2864 * NOTE: Caller increases oqdrops.
2869 if (m
->m_flags
& M_PRIO
)
2870 ifsq_prio_enqueue(ifsq
, m
);
2872 ifsq_norm_enqueue(ifsq
, m
);
2878 ifsq_classic_dequeue(struct ifaltq_subque
*ifsq
, int op
)
2884 m
= classq_head(&ifsq
->ifsq_prio
);
2886 m
= classq_head(&ifsq
->ifsq_norm
);
2890 m
= ifsq_prio_dequeue(ifsq
);
2892 m
= ifsq_norm_dequeue(ifsq
);
2896 panic("unsupported ALTQ dequeue op: %d", op
);
2902 ifsq_classic_request(struct ifaltq_subque
*ifsq
, int req
, void *arg
)
2909 m
= ifsq_classic_dequeue(ifsq
, ALTDQ_REMOVE
);
2917 panic("unsupported ALTQ request: %d", req
);
2923 ifsq_ifstart_try(struct ifaltq_subque
*ifsq
, int force_sched
)
2925 struct ifnet
*ifp
= ifsq_get_ifp(ifsq
);
2926 int running
= 0, need_sched
;
2929 * Try to do direct ifnet.if_start on the subqueue first, if there is
2930 * contention on the subqueue hardware serializer, ifnet.if_start on
2931 * the subqueue will be scheduled on the subqueue owner CPU.
2933 if (!ifsq_tryserialize_hw(ifsq
)) {
2935 * Subqueue hardware serializer contention happened,
2936 * ifnet.if_start on the subqueue is scheduled on
2937 * the subqueue owner CPU, and we keep going.
2939 ifsq_ifstart_schedule(ifsq
, 1);
2943 if ((ifp
->if_flags
& IFF_RUNNING
) && !ifsq_is_oactive(ifsq
)) {
2944 ifp
->if_start(ifp
, ifsq
);
2945 if ((ifp
->if_flags
& IFF_RUNNING
) && !ifsq_is_oactive(ifsq
))
2948 need_sched
= ifsq_ifstart_need_schedule(ifsq
, running
);
2950 ifsq_deserialize_hw(ifsq
);
2954 * More data need to be transmitted, ifnet.if_start on the
2955 * subqueue is scheduled on the subqueue owner CPU, and we
2957 * NOTE: ifnet.if_start subqueue interlock is not released.
2959 ifsq_ifstart_schedule(ifsq
, force_sched
);
2964 * Subqeue packets staging mechanism:
2966 * The packets enqueued into the subqueue are staged to a certain amount
2967 * before the ifnet.if_start on the subqueue is called. In this way, the
2968 * driver could avoid writing to hardware registers upon every packet,
2969 * instead, hardware registers could be written when certain amount of
2970 * packets are put onto hardware TX ring. The measurement on several modern
2971 * NICs (emx(4), igb(4), bnx(4), bge(4), jme(4)) shows that the hardware
2972 * registers writing aggregation could save ~20% CPU time when 18bytes UDP
2973 * datagrams are transmitted at 1.48Mpps. The performance improvement by
2974 * hardware registers writing aggeregation is also mentioned by Luigi Rizzo's
2975 * netmap paper (http://info.iet.unipi.it/~luigi/netmap/).
2977 * Subqueue packets staging is performed for two entry points into drivers'
2978 * transmission function:
2979 * - Direct ifnet.if_start calling on the subqueue, i.e. ifsq_ifstart_try()
2980 * - ifnet.if_start scheduling on the subqueue, i.e. ifsq_ifstart_schedule()
2982 * Subqueue packets staging will be stopped upon any of the following
2984 * - If the count of packets enqueued on the current CPU is great than or
2985 * equal to ifsq_stage_cntmax. (XXX this should be per-interface)
2986 * - If the total length of packets enqueued on the current CPU is great
2987 * than or equal to the hardware's MTU - max_protohdr. max_protohdr is
2988 * cut from the hardware's MTU mainly bacause a full TCP segment's size
2989 * is usually less than hardware's MTU.
2990 * - ifsq_ifstart_schedule() is not pending on the current CPU and
2991 * ifnet.if_start subqueue interlock (ifaltq_subq.ifsq_started) is not
2993 * - The if_start_rollup(), which is registered as low priority netisr
2994 * rollup function, is called; probably because no more work is pending
2998 * Currently subqueue packet staging is only performed in netisr threads.
3001 ifq_dispatch(struct ifnet
*ifp
, struct mbuf
*m
, struct altq_pktattr
*pa
)
3003 struct ifaltq
*ifq
= &ifp
->if_snd
;
3004 struct ifaltq_subque
*ifsq
;
3005 int error
, start
= 0, len
, mcast
= 0, avoid_start
= 0;
3006 struct ifsubq_stage_head
*head
= NULL
;
3007 struct ifsubq_stage
*stage
= NULL
;
3008 struct globaldata
*gd
= mycpu
;
3009 struct thread
*td
= gd
->gd_curthread
;
3011 crit_enter_quick(td
);
3013 ifsq
= ifq_map_subq(ifq
, gd
->gd_cpuid
);
3014 ASSERT_ALTQ_SQ_NOT_SERIALIZED_HW(ifsq
);
3016 len
= m
->m_pkthdr
.len
;
3017 if (m
->m_flags
& M_MCAST
)
3020 if (td
->td_type
== TD_TYPE_NETISR
) {
3021 head
= &ifsubq_stage_heads
[mycpuid
];
3022 stage
= ifsq_get_stage(ifsq
, mycpuid
);
3025 stage
->stg_len
+= len
;
3026 if (stage
->stg_cnt
< ifsq_stage_cntmax
&&
3027 stage
->stg_len
< (ifp
->if_mtu
- max_protohdr
))
3032 error
= ifsq_enqueue_locked(ifsq
, m
, pa
);
3034 IFNET_STAT_INC(ifp
, oqdrops
, 1);
3035 if (!ifsq_data_ready(ifsq
)) {
3036 ALTQ_SQ_UNLOCK(ifsq
);
3037 crit_exit_quick(td
);
3042 if (!ifsq_is_started(ifsq
)) {
3044 ALTQ_SQ_UNLOCK(ifsq
);
3047 if ((stage
->stg_flags
& IFSQ_STAGE_FLAG_QUED
) == 0)
3048 ifsq_stage_insert(head
, stage
);
3050 IFNET_STAT_INC(ifp
, obytes
, len
);
3052 IFNET_STAT_INC(ifp
, omcasts
, 1);
3053 crit_exit_quick(td
);
3058 * Hold the subqueue interlock of ifnet.if_start
3060 ifsq_set_started(ifsq
);
3063 ALTQ_SQ_UNLOCK(ifsq
);
3066 IFNET_STAT_INC(ifp
, obytes
, len
);
3068 IFNET_STAT_INC(ifp
, omcasts
, 1);
3071 if (stage
!= NULL
) {
3072 if (!start
&& (stage
->stg_flags
& IFSQ_STAGE_FLAG_SCHED
)) {
3073 KKASSERT(stage
->stg_flags
& IFSQ_STAGE_FLAG_QUED
);
3075 ifsq_stage_remove(head
, stage
);
3076 ifsq_ifstart_schedule(ifsq
, 1);
3078 crit_exit_quick(td
);
3082 if (stage
->stg_flags
& IFSQ_STAGE_FLAG_QUED
) {
3083 ifsq_stage_remove(head
, stage
);
3091 crit_exit_quick(td
);
3095 ifsq_ifstart_try(ifsq
, 0);
3097 crit_exit_quick(td
);
3102 ifa_create(int size
)
3107 KASSERT(size
>= sizeof(*ifa
), ("ifaddr size too small"));
3109 ifa
= kmalloc(size
, M_IFADDR
, M_INTWAIT
| M_ZERO
);
3110 ifa
->ifa_containers
=
3111 kmalloc_cachealign(ncpus
* sizeof(struct ifaddr_container
),
3112 M_IFADDR
, M_INTWAIT
| M_ZERO
);
3114 ifa
->ifa_ncnt
= ncpus
;
3115 for (i
= 0; i
< ncpus
; ++i
) {
3116 struct ifaddr_container
*ifac
= &ifa
->ifa_containers
[i
];
3118 ifac
->ifa_magic
= IFA_CONTAINER_MAGIC
;
3120 ifac
->ifa_refcnt
= 1;
3123 kprintf("alloc ifa %p %d\n", ifa
, size
);
3129 ifac_free(struct ifaddr_container
*ifac
, int cpu_id
)
3131 struct ifaddr
*ifa
= ifac
->ifa
;
3133 KKASSERT(ifac
->ifa_magic
== IFA_CONTAINER_MAGIC
);
3134 KKASSERT(ifac
->ifa_refcnt
== 0);
3135 KASSERT(ifac
->ifa_listmask
== 0,
3136 ("ifa is still on %#x lists", ifac
->ifa_listmask
));
3138 ifac
->ifa_magic
= IFA_CONTAINER_DEAD
;
3140 #ifdef IFADDR_DEBUG_VERBOSE
3141 kprintf("try free ifa %p cpu_id %d\n", ifac
->ifa
, cpu_id
);
3144 KASSERT(ifa
->ifa_ncnt
> 0 && ifa
->ifa_ncnt
<= ncpus
,
3145 ("invalid # of ifac, %d", ifa
->ifa_ncnt
));
3146 if (atomic_fetchadd_int(&ifa
->ifa_ncnt
, -1) == 1) {
3148 kprintf("free ifa %p\n", ifa
);
3150 kfree(ifa
->ifa_containers
, M_IFADDR
);
3151 kfree(ifa
, M_IFADDR
);
3156 ifa_iflink_dispatch(netmsg_t nmsg
)
3158 struct netmsg_ifaddr
*msg
= (struct netmsg_ifaddr
*)nmsg
;
3159 struct ifaddr
*ifa
= msg
->ifa
;
3160 struct ifnet
*ifp
= msg
->ifp
;
3162 struct ifaddr_container
*ifac
;
3166 ifac
= &ifa
->ifa_containers
[cpu
];
3167 ASSERT_IFAC_VALID(ifac
);
3168 KASSERT((ifac
->ifa_listmask
& IFA_LIST_IFADDRHEAD
) == 0,
3169 ("ifaddr is on if_addrheads"));
3171 ifac
->ifa_listmask
|= IFA_LIST_IFADDRHEAD
;
3173 TAILQ_INSERT_TAIL(&ifp
->if_addrheads
[cpu
], ifac
, ifa_link
);
3175 TAILQ_INSERT_HEAD(&ifp
->if_addrheads
[cpu
], ifac
, ifa_link
);
3179 netisr_forwardmsg(&nmsg
->base
, cpu
+ 1);
3183 ifa_iflink(struct ifaddr
*ifa
, struct ifnet
*ifp
, int tail
)
3185 struct netmsg_ifaddr msg
;
3187 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
,
3188 0, ifa_iflink_dispatch
);
3193 netisr_domsg(&msg
.base
, 0);
3197 ifa_ifunlink_dispatch(netmsg_t nmsg
)
3199 struct netmsg_ifaddr
*msg
= (struct netmsg_ifaddr
*)nmsg
;
3200 struct ifaddr
*ifa
= msg
->ifa
;
3201 struct ifnet
*ifp
= msg
->ifp
;
3203 struct ifaddr_container
*ifac
;
3207 ifac
= &ifa
->ifa_containers
[cpu
];
3208 ASSERT_IFAC_VALID(ifac
);
3209 KASSERT(ifac
->ifa_listmask
& IFA_LIST_IFADDRHEAD
,
3210 ("ifaddr is not on if_addrhead"));
3212 TAILQ_REMOVE(&ifp
->if_addrheads
[cpu
], ifac
, ifa_link
);
3213 ifac
->ifa_listmask
&= ~IFA_LIST_IFADDRHEAD
;
3217 netisr_forwardmsg(&nmsg
->base
, cpu
+ 1);
3221 ifa_ifunlink(struct ifaddr
*ifa
, struct ifnet
*ifp
)
3223 struct netmsg_ifaddr msg
;
3225 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
,
3226 0, ifa_ifunlink_dispatch
);
3230 netisr_domsg(&msg
.base
, 0);
3234 ifa_destroy_dispatch(netmsg_t nmsg
)
3236 struct netmsg_ifaddr
*msg
= (struct netmsg_ifaddr
*)nmsg
;
3239 netisr_forwardmsg(&nmsg
->base
, mycpuid
+ 1);
3243 ifa_destroy(struct ifaddr
*ifa
)
3245 struct netmsg_ifaddr msg
;
3247 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
,
3248 0, ifa_destroy_dispatch
);
3251 netisr_domsg(&msg
.base
, 0);
3255 if_start_rollup(void)
3257 struct ifsubq_stage_head
*head
= &ifsubq_stage_heads
[mycpuid
];
3258 struct ifsubq_stage
*stage
;
3262 while ((stage
= TAILQ_FIRST(&head
->stg_head
)) != NULL
) {
3263 struct ifaltq_subque
*ifsq
= stage
->stg_subq
;
3266 if (stage
->stg_flags
& IFSQ_STAGE_FLAG_SCHED
)
3268 ifsq_stage_remove(head
, stage
);
3271 ifsq_ifstart_schedule(ifsq
, 1);
3276 if (!ifsq_is_started(ifsq
)) {
3278 * Hold the subqueue interlock of
3281 ifsq_set_started(ifsq
);
3284 ALTQ_SQ_UNLOCK(ifsq
);
3287 ifsq_ifstart_try(ifsq
, 1);
3289 KKASSERT((stage
->stg_flags
&
3290 (IFSQ_STAGE_FLAG_QUED
| IFSQ_STAGE_FLAG_SCHED
)) == 0);
3297 ifnetinit(void *dummy __unused
)
3301 for (i
= 0; i
< ncpus
; ++i
)
3302 TAILQ_INIT(&ifsubq_stage_heads
[i
].stg_head
);
3303 netisr_register_rollup(if_start_rollup
, NETISR_ROLLUP_PRIO_IFSTART
);
3307 if_register_com_alloc(u_char type
,
3308 if_com_alloc_t
*a
, if_com_free_t
*f
)
3311 KASSERT(if_com_alloc
[type
] == NULL
,
3312 ("if_register_com_alloc: %d already registered", type
));
3313 KASSERT(if_com_free
[type
] == NULL
,
3314 ("if_register_com_alloc: %d free already registered", type
));
3316 if_com_alloc
[type
] = a
;
3317 if_com_free
[type
] = f
;
3321 if_deregister_com_alloc(u_char type
)
3324 KASSERT(if_com_alloc
[type
] != NULL
,
3325 ("if_deregister_com_alloc: %d not registered", type
));
3326 KASSERT(if_com_free
[type
] != NULL
,
3327 ("if_deregister_com_alloc: %d free not registered", type
));
3328 if_com_alloc
[type
] = NULL
;
3329 if_com_free
[type
] = NULL
;
3333 ifq_set_maxlen(struct ifaltq
*ifq
, int len
)
3335 ifq
->altq_maxlen
= len
+ (ncpus
* ifsq_stage_cntmax
);
3339 ifq_mapsubq_default(struct ifaltq
*ifq __unused
, int cpuid __unused
)
3341 return ALTQ_SUBQ_INDEX_DEFAULT
;
3345 ifq_mapsubq_modulo(struct ifaltq
*ifq
, int cpuid
)
3348 return (cpuid
% ifq
->altq_subq_mappriv
);
3352 ifsq_watchdog(void *arg
)
3354 struct ifsubq_watchdog
*wd
= arg
;
3357 if (__predict_true(wd
->wd_timer
== 0 || --wd
->wd_timer
))
3360 ifp
= ifsq_get_ifp(wd
->wd_subq
);
3361 if (ifnet_tryserialize_all(ifp
)) {
3362 wd
->wd_watchdog(wd
->wd_subq
);
3363 ifnet_deserialize_all(ifp
);
3365 /* try again next timeout */
3369 ifsq_watchdog_reset(wd
);
3373 ifsq_watchdog_reset(struct ifsubq_watchdog
*wd
)
3375 callout_reset_bycpu(&wd
->wd_callout
, hz
, ifsq_watchdog
, wd
,
3376 ifsq_get_cpuid(wd
->wd_subq
));
3380 ifsq_watchdog_init(struct ifsubq_watchdog
*wd
, struct ifaltq_subque
*ifsq
,
3381 ifsq_watchdog_t watchdog
)
3383 callout_init_mp(&wd
->wd_callout
);
3386 wd
->wd_watchdog
= watchdog
;
3390 ifsq_watchdog_start(struct ifsubq_watchdog
*wd
)
3393 ifsq_watchdog_reset(wd
);
3397 ifsq_watchdog_stop(struct ifsubq_watchdog
*wd
)
3400 callout_stop(&wd
->wd_callout
);
3406 KASSERT(curthread
->td_type
!= TD_TYPE_NETISR
,
3407 ("try holding ifnet lock in netisr"));
3408 mtx_lock(&ifnet_mtx
);
3414 KASSERT(curthread
->td_type
!= TD_TYPE_NETISR
,
3415 ("try holding ifnet lock in netisr"));
3416 mtx_unlock(&ifnet_mtx
);
3419 static struct ifnet_array
*
3420 ifnet_array_alloc(int count
)
3422 struct ifnet_array
*arr
;
3424 arr
= kmalloc(__offsetof(struct ifnet_array
, ifnet_arr
[count
]),
3426 arr
->ifnet_count
= count
;
3432 ifnet_array_free(struct ifnet_array
*arr
)
3434 if (arr
== &ifnet_array0
)
3436 kfree(arr
, M_IFNET
);
3439 static struct ifnet_array
*
3440 ifnet_array_add(struct ifnet
*ifp
, const struct ifnet_array
*old_arr
)
3442 struct ifnet_array
*arr
;
3445 KASSERT(old_arr
->ifnet_count
>= 0,
3446 ("invalid ifnet array count %d", old_arr
->ifnet_count
));
3447 count
= old_arr
->ifnet_count
+ 1;
3448 arr
= ifnet_array_alloc(count
);
3451 * Save the old ifnet array and append this ifp to the end of
3452 * the new ifnet array.
3454 for (i
= 0; i
< old_arr
->ifnet_count
; ++i
) {
3455 KASSERT(old_arr
->ifnet_arr
[i
] != ifp
,
3456 ("%s is already in ifnet array", ifp
->if_xname
));
3457 arr
->ifnet_arr
[i
] = old_arr
->ifnet_arr
[i
];
3459 KASSERT(i
== count
- 1,
3460 ("add %s, ifnet array index mismatch, should be %d, but got %d",
3461 ifp
->if_xname
, count
- 1, i
));
3462 arr
->ifnet_arr
[i
] = ifp
;
3467 static struct ifnet_array
*
3468 ifnet_array_del(struct ifnet
*ifp
, const struct ifnet_array
*old_arr
)
3470 struct ifnet_array
*arr
;
3471 int count
, i
, idx
, found
= 0;
3473 KASSERT(old_arr
->ifnet_count
> 0,
3474 ("invalid ifnet array count %d", old_arr
->ifnet_count
));
3475 count
= old_arr
->ifnet_count
- 1;
3476 arr
= ifnet_array_alloc(count
);
3479 * Save the old ifnet array, but skip this ifp.
3482 for (i
= 0; i
< old_arr
->ifnet_count
; ++i
) {
3483 if (old_arr
->ifnet_arr
[i
] == ifp
) {
3485 ("dup %s is in ifnet array", ifp
->if_xname
));
3489 KASSERT(idx
< count
,
3490 ("invalid ifnet array index %d, count %d", idx
, count
));
3491 arr
->ifnet_arr
[idx
] = old_arr
->ifnet_arr
[i
];
3494 KASSERT(found
, ("%s is not in ifnet array", ifp
->if_xname
));
3495 KASSERT(idx
== count
,
3496 ("del %s, ifnet array count mismatch, should be %d, but got %d ",
3497 ifp
->if_xname
, count
, idx
));
3502 const struct ifnet_array
*
3503 ifnet_array_get(void)
3505 const struct ifnet_array
*ret
;
3507 KASSERT(curthread
->td_type
== TD_TYPE_NETISR
, ("not in netisr"));
3509 /* Make sure 'ret' is really used. */
3515 ifnet_array_isempty(void)
3517 KASSERT(curthread
->td_type
== TD_TYPE_NETISR
, ("not in netisr"));
3518 if (ifnet_array
->ifnet_count
== 0)
3525 ifa_marker_init(struct ifaddr_marker
*mark
, struct ifnet
*ifp
)
3529 memset(mark
, 0, sizeof(*mark
));
3532 mark
->ifac
.ifa
= ifa
;
3534 ifa
->ifa_addr
= &mark
->addr
;
3535 ifa
->ifa_dstaddr
= &mark
->dstaddr
;
3536 ifa
->ifa_netmask
= &mark
->netmask
;
3541 if_ringcnt_fixup(int ring_cnt
, int ring_cntmax
)
3544 KASSERT(ring_cntmax
> 0, ("invalid ring count max %d", ring_cntmax
));
3546 if (ring_cnt
<= 0 || ring_cnt
> ring_cntmax
)
3547 ring_cnt
= ring_cntmax
;
3548 if (ring_cnt
> netisr_ncpus
)
3549 ring_cnt
= netisr_ncpus
;
3554 if_ringmap_set_grid(device_t dev
, struct if_ringmap
*rm
, int grid
)
3558 KASSERT(grid
> 0, ("invalid if_ringmap grid %d", grid
));
3559 KASSERT(grid
>= rm
->rm_cnt
, ("invalid if_ringmap grid %d, count %d",
3563 offset
= (rm
->rm_grid
* device_get_unit(dev
)) % netisr_ncpus
;
3564 for (i
= 0; i
< rm
->rm_cnt
; ++i
) {
3565 rm
->rm_cpumap
[i
] = offset
+ i
;
3566 KASSERT(rm
->rm_cpumap
[i
] < netisr_ncpus
,
3567 ("invalid cpumap[%d] = %d, offset %d", i
,
3568 rm
->rm_cpumap
[i
], offset
));
3572 static struct if_ringmap
*
3573 if_ringmap_alloc_flags(device_t dev
, int ring_cnt
, int ring_cntmax
,
3576 struct if_ringmap
*rm
;
3577 int i
, grid
= 0, prev_grid
;
3579 ring_cnt
= if_ringcnt_fixup(ring_cnt
, ring_cntmax
);
3580 rm
= kmalloc(__offsetof(struct if_ringmap
, rm_cpumap
[ring_cnt
]),
3581 M_DEVBUF
, M_WAITOK
| M_ZERO
);
3583 rm
->rm_cnt
= ring_cnt
;
3584 if (flags
& RINGMAP_FLAG_POWEROF2
)
3585 rm
->rm_cnt
= 1 << (fls(rm
->rm_cnt
) - 1);
3587 prev_grid
= netisr_ncpus
;
3588 for (i
= 0; i
< netisr_ncpus
; ++i
) {
3589 if (netisr_ncpus
% (i
+ 1) != 0)
3592 grid
= netisr_ncpus
/ (i
+ 1);
3593 if (rm
->rm_cnt
> grid
) {
3598 if (rm
->rm_cnt
> netisr_ncpus
/ (i
+ 2))
3602 if_ringmap_set_grid(dev
, rm
, grid
);
3608 if_ringmap_alloc(device_t dev
, int ring_cnt
, int ring_cntmax
)
3611 return (if_ringmap_alloc_flags(dev
, ring_cnt
, ring_cntmax
,
3612 RINGMAP_FLAG_NONE
));
3616 if_ringmap_alloc2(device_t dev
, int ring_cnt
, int ring_cntmax
)
3619 return (if_ringmap_alloc_flags(dev
, ring_cnt
, ring_cntmax
,
3620 RINGMAP_FLAG_POWEROF2
));
3624 if_ringmap_free(struct if_ringmap
*rm
)
3627 kfree(rm
, M_DEVBUF
);
3631 * Align the two ringmaps.
3633 * e.g. 8 netisrs, rm0 contains 4 rings, rm1 contains 2 rings.
3637 * CPU 0 1 2 3 4 5 6 7
3638 * NIC_RX n0 n1 n2 n3
3643 * CPU 0 1 2 3 4 5 6 7
3644 * NIC_RX n0 n1 n2 n3
3648 if_ringmap_align(device_t dev
, struct if_ringmap
*rm0
, struct if_ringmap
*rm1
)
3651 if (rm0
->rm_grid
> rm1
->rm_grid
)
3652 if_ringmap_set_grid(dev
, rm1
, rm0
->rm_grid
);
3653 else if (rm0
->rm_grid
< rm1
->rm_grid
)
3654 if_ringmap_set_grid(dev
, rm0
, rm1
->rm_grid
);
3658 if_ringmap_match(device_t dev
, struct if_ringmap
*rm0
, struct if_ringmap
*rm1
)
3660 int subset_grid
, cnt
, divisor
, mod
, offset
, i
;
3661 struct if_ringmap
*subset_rm
, *rm
;
3662 int old_rm0_grid
, old_rm1_grid
;
3664 if (rm0
->rm_grid
== rm1
->rm_grid
)
3667 /* Save grid for later use */
3668 old_rm0_grid
= rm0
->rm_grid
;
3669 old_rm1_grid
= rm1
->rm_grid
;
3671 if_ringmap_align(dev
, rm0
, rm1
);
3674 * Re-shuffle rings to get more even distribution.
3676 * e.g. 12 netisrs, rm0 contains 4 rings, rm1 contains 2 rings.
3678 * CPU 0 1 2 3 4 5 6 7 8 9 10 11
3680 * NIC_RX a0 a1 a2 a3 b0 b1 b2 b3 c0 c1 c2 c3
3681 * NIC_TX A0 A1 B0 B1 C0 C1
3683 * NIC_RX d0 d1 d2 d3 e0 e1 e2 e3 f0 f1 f2 f3
3684 * NIC_TX D0 D1 E0 E1 F0 F1
3687 if (rm0
->rm_cnt
>= (2 * old_rm1_grid
)) {
3689 subset_grid
= old_rm1_grid
;
3692 } else if (rm1
->rm_cnt
> (2 * old_rm0_grid
)) {
3694 subset_grid
= old_rm0_grid
;
3698 /* No space to shuffle. */
3702 mod
= cnt
/ subset_grid
;
3704 divisor
= netisr_ncpus
/ rm
->rm_grid
;
3705 offset
= ((device_get_unit(dev
) / divisor
) % mod
) * subset_grid
;
3707 for (i
= 0; i
< subset_rm
->rm_cnt
; ++i
) {
3708 subset_rm
->rm_cpumap
[i
] += offset
;
3709 KASSERT(subset_rm
->rm_cpumap
[i
] < netisr_ncpus
,
3710 ("match: invalid cpumap[%d] = %d, offset %d",
3711 i
, subset_rm
->rm_cpumap
[i
], offset
));
3714 for (i
= 0; i
< subset_rm
->rm_cnt
; ++i
) {
3717 for (j
= 0; j
< rm
->rm_cnt
; ++j
) {
3718 if (rm
->rm_cpumap
[j
] == subset_rm
->rm_cpumap
[i
])
3721 KASSERT(j
< rm
->rm_cnt
,
3722 ("subset cpumap[%d] = %d not found in superset",
3723 i
, subset_rm
->rm_cpumap
[i
]));
3729 if_ringmap_count(const struct if_ringmap
*rm
)
3732 return (rm
->rm_cnt
);
3736 if_ringmap_cpumap(const struct if_ringmap
*rm
, int ring
)
3739 KASSERT(ring
>= 0 && ring
< rm
->rm_cnt
, ("invalid ring %d", ring
));
3740 return (rm
->rm_cpumap
[ring
]);
3744 if_ringmap_rdrtable(const struct if_ringmap
*rm
, int table
[], int table_nent
)
3746 int i
, grid_idx
, grid_cnt
, patch_off
, patch_cnt
, ncopy
;
3748 KASSERT(table_nent
> 0 && (table_nent
& NETISR_CPUMASK
) == 0,
3749 ("invalid redirect table entries %d", table_nent
));
3752 for (i
= 0; i
< NETISR_CPUMAX
; ++i
) {
3753 table
[i
] = grid_idx
++ % rm
->rm_cnt
;
3755 if (grid_idx
== rm
->rm_grid
)
3760 * Make the ring distributed more evenly for the remainder
3763 * e.g. 12 netisrs, rm contains 8 rings.
3765 * Redirect table before:
3767 * 0 1 2 3 4 5 6 7 0 1 2 3 0 1 2 3
3768 * 4 5 6 7 0 1 2 3 0 1 2 3 4 5 6 7
3769 * 0 1 2 3 0 1 2 3 4 5 6 7 0 1 2 3
3772 * Redirect table after being patched (pX, patched entries):
3774 * 0 1 2 3 4 5 6 7 p0 p1 p2 p3 0 1 2 3
3775 * 4 5 6 7 p4 p5 p6 p7 0 1 2 3 4 5 6 7
3776 * p0 p1 p2 p3 0 1 2 3 4 5 6 7 p4 p5 p6 p7
3779 patch_cnt
= rm
->rm_grid
% rm
->rm_cnt
;
3782 patch_off
= rm
->rm_grid
- (rm
->rm_grid
% rm
->rm_cnt
);
3784 grid_cnt
= roundup(NETISR_CPUMAX
, rm
->rm_grid
) / rm
->rm_grid
;
3786 for (i
= 0; i
< grid_cnt
; ++i
) {
3789 for (j
= 0; j
< patch_cnt
; ++j
) {
3792 fix_idx
= (i
* rm
->rm_grid
) + patch_off
+ j
;
3793 if (fix_idx
>= NETISR_CPUMAX
)
3795 table
[fix_idx
] = grid_idx
++ % rm
->rm_cnt
;
3800 * If the device supports larger redirect table, duplicate
3801 * the first NETISR_CPUMAX entries to the rest of the table,
3802 * so that it matches upper layer's expectation:
3803 * (hash & NETISR_CPUMASK) % netisr_ncpus
3805 ncopy
= table_nent
/ NETISR_CPUMAX
;
3806 for (i
= 1; i
< ncopy
; ++i
) {
3807 memcpy(&table
[i
* NETISR_CPUMAX
], table
,
3808 NETISR_CPUMAX
* sizeof(table
[0]));
3810 if (if_ringmap_dumprdr
) {
3811 for (i
= 0; i
< table_nent
; ++i
) {
3812 if (i
!= 0 && i
% 16 == 0)
3814 kprintf("%03d ", table
[i
]);
3821 if_ringmap_cpumap_sysctl(SYSCTL_HANDLER_ARGS
)
3823 struct if_ringmap
*rm
= arg1
;
3826 for (i
= 0; i
< rm
->rm_cnt
; ++i
) {
3827 int cpu
= rm
->rm_cpumap
[i
];
3829 error
= SYSCTL_OUT(req
, &cpu
, sizeof(cpu
));