2 * Copyright (c) 1980, 1986, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * @(#)if.c 8.3 (Berkeley) 1/4/94
34 * $FreeBSD: src/sys/net/if.c,v 1.185 2004/03/13 02:35:03 brooks Exp $
35 * $DragonFly: src/sys/net/if.c,v 1.84 2008/11/15 11:58:16 sephe Exp $
38 #include "opt_compat.h"
39 #include "opt_inet6.h"
41 #include "opt_polling.h"
43 #include <sys/param.h>
44 #include <sys/malloc.h>
46 #include <sys/systm.h>
49 #include <sys/protosw.h>
50 #include <sys/socket.h>
51 #include <sys/socketvar.h>
52 #include <sys/socketops.h>
53 #include <sys/protosw.h>
54 #include <sys/kernel.h>
56 #include <sys/sockio.h>
57 #include <sys/syslog.h>
58 #include <sys/sysctl.h>
59 #include <sys/domain.h>
60 #include <sys/thread.h>
61 #include <sys/thread2.h>
62 #include <sys/serialize.h>
63 #include <sys/msgport2.h>
67 #include <net/if_arp.h>
68 #include <net/if_dl.h>
69 #include <net/if_types.h>
70 #include <net/if_var.h>
71 #include <net/ifq_var.h>
72 #include <net/radix.h>
73 #include <net/route.h>
74 #include <net/if_clone.h>
75 #include <net/netisr.h>
76 #include <net/netmsg2.h>
78 #include <machine/atomic.h>
79 #include <machine/stdarg.h>
80 #include <machine/smp.h>
82 #if defined(INET) || defined(INET6)
84 #include <netinet/in.h>
85 #include <netinet/in_var.h>
86 #include <netinet/if_ether.h>
88 #include <netinet6/in6_var.h>
89 #include <netinet6/in6_ifattach.h>
93 #if defined(COMPAT_43)
94 #include <emulation/43bsd/43bsd_socket.h>
95 #endif /* COMPAT_43 */
97 struct netmsg_ifaddr
{
105 * System initialization
107 static void if_attachdomain(void *);
108 static void if_attachdomain1(struct ifnet
*);
109 static int ifconf(u_long
, caddr_t
, struct ucred
*);
110 static void ifinit(void *);
111 static void ifnetinit(void *);
112 static void if_slowtimo(void *);
113 static void link_rtrequest(int, struct rtentry
*, struct rt_addrinfo
*);
114 static int if_rtdel(struct radix_node
*, void *);
118 * XXX: declare here to avoid to include many inet6 related files..
119 * should be more generalized?
121 extern void nd6_setmtu(struct ifnet
*);
124 SYSCTL_NODE(_net
, PF_LINK
, link
, CTLFLAG_RW
, 0, "Link layers");
125 SYSCTL_NODE(_net_link
, 0, generic
, CTLFLAG_RW
, 0, "Generic link-management");
127 SYSINIT(interfaces
, SI_SUB_PROTO_IF
, SI_ORDER_FIRST
, ifinit
, NULL
)
128 /* Must be after netisr_init */
129 SYSINIT(ifnet
, SI_SUB_PRE_DRIVERS
, SI_ORDER_SECOND
, ifnetinit
, NULL
)
131 MALLOC_DEFINE(M_IFADDR
, "ifaddr", "interface address");
132 MALLOC_DEFINE(M_IFMADDR
, "ether_multi", "link-level multicast address");
134 int ifqmaxlen
= IFQ_MAXLEN
;
135 struct ifnethead ifnet
= TAILQ_HEAD_INITIALIZER(ifnet
);
137 /* In ifq_dispatch(), try to do direct ifnet.if_start first */
138 static int ifq_dispatch_schedonly
= 0;
139 SYSCTL_INT(_net_link_generic
, OID_AUTO
, ifq_dispatch_schedonly
, CTLFLAG_RW
,
140 &ifq_dispatch_schedonly
, 0, "");
142 /* In ifq_dispatch(), schedule ifnet.if_start without checking ifnet.if_snd */
143 static int ifq_dispatch_schednochk
= 0;
144 SYSCTL_INT(_net_link_generic
, OID_AUTO
, ifq_dispatch_schednochk
, CTLFLAG_RW
,
145 &ifq_dispatch_schednochk
, 0, "");
147 /* In if_devstart(), try to do direct ifnet.if_start first */
148 static int if_devstart_schedonly
= 0;
149 SYSCTL_INT(_net_link_generic
, OID_AUTO
, if_devstart_schedonly
, CTLFLAG_RW
,
150 &if_devstart_schedonly
, 0, "");
152 /* In if_devstart(), schedule ifnet.if_start without checking ifnet.if_snd */
153 static int if_devstart_schednochk
= 0;
154 SYSCTL_INT(_net_link_generic
, OID_AUTO
, if_devstart_schednochk
, CTLFLAG_RW
,
155 &if_devstart_schednochk
, 0, "");
158 /* Schedule ifnet.if_start on the current CPU */
159 static int if_start_oncpu_sched
= 0;
160 SYSCTL_INT(_net_link_generic
, OID_AUTO
, if_start_oncpu_sched
, CTLFLAG_RW
,
161 &if_start_oncpu_sched
, 0, "");
164 struct callout if_slowtimo_timer
;
167 struct ifnet
**ifindex2ifnet
= NULL
;
168 static struct thread ifnet_threads
[MAXCPU
];
169 static int ifnet_mpsafe_thread
= NETMSG_SERVICE_MPSAFE
;
171 #define IFQ_KTR_STRING "ifq=%p"
172 #define IFQ_KTR_ARG_SIZE (sizeof(void *))
174 #define KTR_IFQ KTR_ALL
176 KTR_INFO_MASTER(ifq
);
177 KTR_INFO(KTR_IFQ
, ifq
, enqueue
, 0, IFQ_KTR_STRING
, IFQ_KTR_ARG_SIZE
);
178 KTR_INFO(KTR_IFQ
, ifq
, dequeue
, 1, IFQ_KTR_STRING
, IFQ_KTR_ARG_SIZE
);
179 #define logifq(name, arg) KTR_LOG(ifq_ ## name, arg)
181 #define IF_START_KTR_STRING "ifp=%p"
182 #define IF_START_KTR_ARG_SIZE (sizeof(void *))
184 #define KTR_IF_START KTR_ALL
186 KTR_INFO_MASTER(if_start
);
187 KTR_INFO(KTR_IF_START
, if_start
, run
, 0,
188 IF_START_KTR_STRING
, IF_START_KTR_ARG_SIZE
);
189 KTR_INFO(KTR_IF_START
, if_start
, sched
, 1,
190 IF_START_KTR_STRING
, IF_START_KTR_ARG_SIZE
);
191 KTR_INFO(KTR_IF_START
, if_start
, avoid
, 2,
192 IF_START_KTR_STRING
, IF_START_KTR_ARG_SIZE
);
193 KTR_INFO(KTR_IF_START
, if_start
, contend_sched
, 3,
194 IF_START_KTR_STRING
, IF_START_KTR_ARG_SIZE
);
195 KTR_INFO(KTR_IF_START
, if_start
, chase_sched
, 4,
196 IF_START_KTR_STRING
, IF_START_KTR_ARG_SIZE
);
197 #define logifstart(name, arg) KTR_LOG(if_start_ ## name, arg)
200 * Network interface utility routines.
202 * Routines with ifa_ifwith* names take sockaddr *'s as
211 callout_init(&if_slowtimo_timer
);
214 TAILQ_FOREACH(ifp
, &ifnet
, if_link
) {
215 if (ifp
->if_snd
.ifq_maxlen
== 0) {
216 if_printf(ifp
, "XXX: driver didn't set ifq_maxlen\n");
217 ifp
->if_snd
.ifq_maxlen
= ifqmaxlen
;
226 if_start_cpuid(struct ifnet
*ifp
)
228 return ifp
->if_cpuid
;
231 #ifdef DEVICE_POLLING
233 if_start_cpuid_poll(struct ifnet
*ifp
)
235 int poll_cpuid
= ifp
->if_poll_cpuid
;
240 return ifp
->if_cpuid
;
245 if_start_ipifunc(void *arg
)
247 struct ifnet
*ifp
= arg
;
248 struct lwkt_msg
*lmsg
= &ifp
->if_start_nmsg
[mycpuid
].nm_lmsg
;
251 if (lmsg
->ms_flags
& MSGF_DONE
)
252 lwkt_sendmsg(ifnet_portfn(mycpuid
), lmsg
);
257 * Schedule ifnet.if_start on ifnet's CPU
260 if_start_schedule(struct ifnet
*ifp
)
265 if (if_start_oncpu_sched
)
268 cpu
= ifp
->if_start_cpuid(ifp
);
271 lwkt_send_ipiq(globaldata_find(cpu
), if_start_ipifunc
, ifp
);
274 if_start_ipifunc(ifp
);
279 * This function will release ifnet.if_start interlock,
280 * if ifnet.if_start does not need to be scheduled
283 if_start_need_schedule(struct ifaltq
*ifq
, int running
)
285 if (!running
|| ifq_is_empty(ifq
)
287 || ifq
->altq_tbr
!= NULL
292 * ifnet.if_start interlock is released, if:
293 * 1) Hardware can not take any packets, due to
294 * o interface is marked down
295 * o hardware queue is full (IFF_OACTIVE)
296 * Under the second situation, hardware interrupt
297 * or polling(4) will call/schedule ifnet.if_start
298 * when hardware queue is ready
299 * 2) There is not packet in the ifnet.if_snd.
300 * Further ifq_dispatch or ifq_handoff will call/
301 * schedule ifnet.if_start
302 * 3) TBR is used and it does not allow further
304 * TBR callout will call ifnet.if_start
306 if (!running
|| !ifq_data_ready(ifq
)) {
307 ifq
->altq_started
= 0;
317 if_start_dispatch(struct netmsg
*nmsg
)
319 struct lwkt_msg
*lmsg
= &nmsg
->nm_lmsg
;
320 struct ifnet
*ifp
= lmsg
->u
.ms_resultp
;
321 struct ifaltq
*ifq
= &ifp
->if_snd
;
325 lwkt_replymsg(lmsg
, 0); /* reply ASAP */
329 if (!if_start_oncpu_sched
&& mycpuid
!= ifp
->if_start_cpuid(ifp
)) {
331 * If the ifnet is still up, we need to
332 * chase its CPU change.
334 if (ifp
->if_flags
& IFF_UP
) {
335 logifstart(chase_sched
, ifp
);
336 if_start_schedule(ifp
);
344 if (ifp
->if_flags
& IFF_UP
) {
345 ifnet_serialize_tx(ifp
); /* XXX try? */
346 if ((ifp
->if_flags
& IFF_OACTIVE
) == 0) {
347 logifstart(run
, ifp
);
350 (IFF_OACTIVE
| IFF_RUNNING
)) == IFF_RUNNING
)
353 ifnet_deserialize_tx(ifp
);
358 if (if_start_need_schedule(ifq
, running
)) {
360 if (lmsg
->ms_flags
& MSGF_DONE
) { /* XXX necessary? */
361 logifstart(sched
, ifp
);
362 lwkt_sendmsg(ifnet_portfn(mycpuid
), lmsg
);
368 /* Device driver ifnet.if_start helper function */
370 if_devstart(struct ifnet
*ifp
)
372 struct ifaltq
*ifq
= &ifp
->if_snd
;
375 ASSERT_IFNET_SERIALIZED_TX(ifp
);
378 if (ifq
->altq_started
|| !ifq_data_ready(ifq
)) {
379 logifstart(avoid
, ifp
);
383 ifq
->altq_started
= 1;
386 if (if_devstart_schedonly
) {
388 * Always schedule ifnet.if_start on ifnet's CPU,
389 * short circuit the rest of this function.
391 logifstart(sched
, ifp
);
392 if_start_schedule(ifp
);
396 logifstart(run
, ifp
);
399 if ((ifp
->if_flags
& (IFF_OACTIVE
| IFF_RUNNING
)) == IFF_RUNNING
)
402 if (if_devstart_schednochk
|| if_start_need_schedule(ifq
, running
)) {
404 * More data need to be transmitted, ifnet.if_start is
405 * scheduled on ifnet's CPU, and we keep going.
406 * NOTE: ifnet.if_start interlock is not released.
408 logifstart(sched
, ifp
);
409 if_start_schedule(ifp
);
414 if_default_serialize(struct ifnet
*ifp
, enum ifnet_serialize slz __unused
)
416 lwkt_serialize_enter(ifp
->if_serializer
);
420 if_default_deserialize(struct ifnet
*ifp
, enum ifnet_serialize slz __unused
)
422 lwkt_serialize_exit(ifp
->if_serializer
);
426 if_default_tryserialize(struct ifnet
*ifp
, enum ifnet_serialize slz __unused
)
428 return lwkt_serialize_try(ifp
->if_serializer
);
433 if_default_serialize_assert(struct ifnet
*ifp
,
434 enum ifnet_serialize slz __unused
,
435 boolean_t serialized
)
438 ASSERT_SERIALIZED(ifp
->if_serializer
);
440 ASSERT_NOT_SERIALIZED(ifp
->if_serializer
);
445 * Attach an interface to the list of "active" interfaces.
447 * The serializer is optional. If non-NULL access to the interface
451 if_attach(struct ifnet
*ifp
, lwkt_serialize_t serializer
)
453 unsigned socksize
, ifasize
;
454 int namelen
, masklen
;
455 struct sockaddr_dl
*sdl
;
460 static int if_indexlim
= 8;
462 if (ifp
->if_serialize
!= NULL
) {
463 KASSERT(ifp
->if_deserialize
!= NULL
&&
464 ifp
->if_tryserialize
!= NULL
&&
465 ifp
->if_serialize_assert
!= NULL
,
466 ("serialize functions are partially setup\n"));
469 * If the device supplies serialize functions,
470 * then clear if_serializer to catch any invalid
471 * usage of this field.
473 KASSERT(serializer
== NULL
,
474 ("both serialize functions and default serializer "
476 ifp
->if_serializer
= NULL
;
478 KASSERT(ifp
->if_deserialize
== NULL
&&
479 ifp
->if_tryserialize
== NULL
&&
480 ifp
->if_serialize_assert
== NULL
,
481 ("serialize functions are partially setup\n"));
482 ifp
->if_serialize
= if_default_serialize
;
483 ifp
->if_deserialize
= if_default_deserialize
;
484 ifp
->if_tryserialize
= if_default_tryserialize
;
486 ifp
->if_serialize_assert
= if_default_serialize_assert
;
490 * The serializer can be passed in from the device,
491 * allowing the same serializer to be used for both
492 * the interrupt interlock and the device queue.
493 * If not specified, the netif structure will use an
494 * embedded serializer.
496 if (serializer
== NULL
) {
497 serializer
= &ifp
->if_default_serializer
;
498 lwkt_serialize_init(serializer
);
500 ifp
->if_serializer
= serializer
;
503 ifp
->if_start_cpuid
= if_start_cpuid
;
506 #ifdef DEVICE_POLLING
507 /* Device is not in polling mode by default */
508 ifp
->if_poll_cpuid
= -1;
509 if (ifp
->if_poll
!= NULL
)
510 ifp
->if_start_cpuid
= if_start_cpuid_poll
;
513 ifp
->if_start_nmsg
= kmalloc(ncpus
* sizeof(struct netmsg
),
514 M_LWKTMSG
, M_WAITOK
);
515 for (i
= 0; i
< ncpus
; ++i
) {
516 netmsg_init(&ifp
->if_start_nmsg
[i
], &netisr_adone_rport
, 0,
518 ifp
->if_start_nmsg
[i
].nm_lmsg
.u
.ms_resultp
= ifp
;
521 TAILQ_INSERT_TAIL(&ifnet
, ifp
, if_link
);
522 ifp
->if_index
= ++if_index
;
526 * The old code would work if the interface passed a pre-existing
527 * chain of ifaddrs to this code. We don't trust our callers to
528 * properly initialize the tailq, however, so we no longer allow
529 * this unlikely case.
531 ifp
->if_addrheads
= kmalloc(ncpus
* sizeof(struct ifaddrhead
),
532 M_IFADDR
, M_WAITOK
| M_ZERO
);
533 for (i
= 0; i
< ncpus
; ++i
)
534 TAILQ_INIT(&ifp
->if_addrheads
[i
]);
536 TAILQ_INIT(&ifp
->if_prefixhead
);
537 LIST_INIT(&ifp
->if_multiaddrs
);
538 getmicrotime(&ifp
->if_lastchange
);
539 if (ifindex2ifnet
== NULL
|| if_index
>= if_indexlim
) {
545 /* grow ifindex2ifnet */
546 n
= if_indexlim
* sizeof(*q
);
547 q
= kmalloc(n
, M_IFADDR
, M_WAITOK
| M_ZERO
);
549 bcopy(ifindex2ifnet
, q
, n
/2);
550 kfree(ifindex2ifnet
, M_IFADDR
);
555 ifindex2ifnet
[if_index
] = ifp
;
558 * create a Link Level name for this device
560 namelen
= strlen(ifp
->if_xname
);
561 #define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m))
562 masklen
= _offsetof(struct sockaddr_dl
, sdl_data
[0]) + namelen
;
563 socksize
= masklen
+ ifp
->if_addrlen
;
564 #define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
565 if (socksize
< sizeof(*sdl
))
566 socksize
= sizeof(*sdl
);
567 socksize
= ROUNDUP(socksize
);
568 ifasize
= sizeof(struct ifaddr
) + 2 * socksize
;
569 ifa
= ifa_create(ifasize
, M_WAITOK
);
570 sdl
= (struct sockaddr_dl
*)(ifa
+ 1);
571 sdl
->sdl_len
= socksize
;
572 sdl
->sdl_family
= AF_LINK
;
573 bcopy(ifp
->if_xname
, sdl
->sdl_data
, namelen
);
574 sdl
->sdl_nlen
= namelen
;
575 sdl
->sdl_index
= ifp
->if_index
;
576 sdl
->sdl_type
= ifp
->if_type
;
577 ifp
->if_lladdr
= ifa
;
579 ifa
->ifa_rtrequest
= link_rtrequest
;
580 ifa
->ifa_addr
= (struct sockaddr
*)sdl
;
581 sdl
= (struct sockaddr_dl
*)(socksize
+ (caddr_t
)sdl
);
582 ifa
->ifa_netmask
= (struct sockaddr
*)sdl
;
583 sdl
->sdl_len
= masklen
;
585 sdl
->sdl_data
[--namelen
] = 0xff;
586 ifa_iflink(ifa
, ifp
, 0 /* Insert head */);
588 EVENTHANDLER_INVOKE(ifnet_attach_event
, ifp
);
589 devctl_notify("IFNET", ifp
->if_xname
, "ATTACH", NULL
);
593 ifq
->altq_disc
= NULL
;
594 ifq
->altq_flags
&= ALTQF_CANTCHANGE
;
595 ifq
->altq_tbr
= NULL
;
597 ifq
->altq_started
= 0;
598 ifq
->altq_prepended
= NULL
;
600 ifq_set_classic(ifq
);
602 if (!SLIST_EMPTY(&domains
))
603 if_attachdomain1(ifp
);
605 /* Announce the interface. */
606 rt_ifannouncemsg(ifp
, IFAN_ARRIVAL
);
610 if_attachdomain(void *dummy
)
615 TAILQ_FOREACH(ifp
, &ifnet
, if_list
)
616 if_attachdomain1(ifp
);
619 SYSINIT(domainifattach
, SI_SUB_PROTO_IFATTACHDOMAIN
, SI_ORDER_FIRST
,
620 if_attachdomain
, NULL
);
623 if_attachdomain1(struct ifnet
*ifp
)
629 /* address family dependent data region */
630 bzero(ifp
->if_afdata
, sizeof(ifp
->if_afdata
));
631 SLIST_FOREACH(dp
, &domains
, dom_next
)
632 if (dp
->dom_ifattach
)
633 ifp
->if_afdata
[dp
->dom_family
] =
634 (*dp
->dom_ifattach
)(ifp
);
639 * Purge all addresses whose type is _not_ AF_LINK
642 if_purgeaddrs_nolink(struct ifnet
*ifp
)
644 struct ifaddr_container
*ifac
, *next
;
646 TAILQ_FOREACH_MUTABLE(ifac
, &ifp
->if_addrheads
[mycpuid
],
648 struct ifaddr
*ifa
= ifac
->ifa
;
650 /* Leave link ifaddr as it is */
651 if (ifa
->ifa_addr
->sa_family
== AF_LINK
)
654 /* XXX: Ugly!! ad hoc just for INET */
655 if (ifa
->ifa_addr
&& ifa
->ifa_addr
->sa_family
== AF_INET
) {
656 struct ifaliasreq ifr
;
657 #ifdef IFADDR_DEBUG_VERBOSE
660 kprintf("purge in4 addr %p: ", ifa
);
661 for (i
= 0; i
< ncpus
; ++i
)
662 kprintf("%d ", ifa
->ifa_containers
[i
].ifa_refcnt
);
666 bzero(&ifr
, sizeof ifr
);
667 ifr
.ifra_addr
= *ifa
->ifa_addr
;
668 if (ifa
->ifa_dstaddr
)
669 ifr
.ifra_broadaddr
= *ifa
->ifa_dstaddr
;
670 if (in_control(NULL
, SIOCDIFADDR
, (caddr_t
)&ifr
, ifp
,
676 if (ifa
->ifa_addr
&& ifa
->ifa_addr
->sa_family
== AF_INET6
) {
677 #ifdef IFADDR_DEBUG_VERBOSE
680 kprintf("purge in6 addr %p: ", ifa
);
681 for (i
= 0; i
< ncpus
; ++i
)
682 kprintf("%d ", ifa
->ifa_containers
[i
].ifa_refcnt
);
687 /* ifp_addrhead is already updated */
691 ifa_ifunlink(ifa
, ifp
);
697 * Detach an interface, removing it from the
698 * list of "active" interfaces.
701 if_detach(struct ifnet
*ifp
)
703 struct radix_node_head
*rnh
;
708 EVENTHANDLER_INVOKE(ifnet_detach_event
, ifp
);
711 * Remove routes and flush queues.
714 #ifdef DEVICE_POLLING
715 if (ifp
->if_flags
& IFF_POLLING
)
716 ether_poll_deregister(ifp
);
720 if (ifq_is_enabled(&ifp
->if_snd
))
721 altq_disable(&ifp
->if_snd
);
722 if (ifq_is_attached(&ifp
->if_snd
))
723 altq_detach(&ifp
->if_snd
);
726 * Clean up all addresses.
728 ifp
->if_lladdr
= NULL
;
730 if_purgeaddrs_nolink(ifp
);
731 if (!TAILQ_EMPTY(&ifp
->if_addrheads
[mycpuid
])) {
734 ifa
= TAILQ_FIRST(&ifp
->if_addrheads
[mycpuid
])->ifa
;
735 KASSERT(ifa
->ifa_addr
->sa_family
== AF_LINK
,
736 ("non-link ifaddr is left on if_addrheads"));
738 ifa_ifunlink(ifa
, ifp
);
740 KASSERT(TAILQ_EMPTY(&ifp
->if_addrheads
[mycpuid
]),
741 ("there are still ifaddrs left on if_addrheads"));
746 * Remove all IPv4 kernel structures related to ifp.
753 * Remove all IPv6 kernel structs related to ifp. This should be done
754 * before removing routing entries below, since IPv6 interface direct
755 * routes are expected to be removed by the IPv6-specific kernel API.
756 * Otherwise, the kernel will detect some inconsistency and bark it.
762 * Delete all remaining routes using this interface
763 * Unfortuneatly the only way to do this is to slog through
764 * the entire routing table looking for routes which point
765 * to this interface...oh well...
768 for (cpu
= 0; cpu
< ncpus2
; cpu
++) {
769 lwkt_migratecpu(cpu
);
770 for (i
= 1; i
<= AF_MAX
; i
++) {
771 if ((rnh
= rt_tables
[cpu
][i
]) == NULL
)
773 rnh
->rnh_walktree(rnh
, if_rtdel
, ifp
);
776 lwkt_migratecpu(origcpu
);
778 /* Announce that the interface is gone. */
779 rt_ifannouncemsg(ifp
, IFAN_DEPARTURE
);
780 devctl_notify("IFNET", ifp
->if_xname
, "DETACH", NULL
);
782 SLIST_FOREACH(dp
, &domains
, dom_next
)
783 if (dp
->dom_ifdetach
&& ifp
->if_afdata
[dp
->dom_family
])
784 (*dp
->dom_ifdetach
)(ifp
,
785 ifp
->if_afdata
[dp
->dom_family
]);
788 * Remove interface from ifindex2ifp[] and maybe decrement if_index.
790 ifindex2ifnet
[ifp
->if_index
] = NULL
;
791 while (if_index
> 0 && ifindex2ifnet
[if_index
] == NULL
)
794 TAILQ_REMOVE(&ifnet
, ifp
, if_link
);
795 kfree(ifp
->if_addrheads
, M_IFADDR
);
796 kfree(ifp
->if_start_nmsg
, M_LWKTMSG
);
801 * Delete Routes for a Network Interface
803 * Called for each routing entry via the rnh->rnh_walktree() call above
804 * to delete all route entries referencing a detaching network interface.
807 * rn pointer to node in the routing table
808 * arg argument passed to rnh->rnh_walktree() - detaching interface
812 * errno failed - reason indicated
816 if_rtdel(struct radix_node
*rn
, void *arg
)
818 struct rtentry
*rt
= (struct rtentry
*)rn
;
819 struct ifnet
*ifp
= arg
;
822 if (rt
->rt_ifp
== ifp
) {
825 * Protect (sorta) against walktree recursion problems
828 if (!(rt
->rt_flags
& RTF_UP
))
831 err
= rtrequest(RTM_DELETE
, rt_key(rt
), rt
->rt_gateway
,
832 rt_mask(rt
), rt
->rt_flags
,
835 log(LOG_WARNING
, "if_rtdel: error %d\n", err
);
843 * Locate an interface based on a complete address.
846 ifa_ifwithaddr(struct sockaddr
*addr
)
850 TAILQ_FOREACH(ifp
, &ifnet
, if_link
) {
851 struct ifaddr_container
*ifac
;
853 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
854 struct ifaddr
*ifa
= ifac
->ifa
;
856 if (ifa
->ifa_addr
->sa_family
!= addr
->sa_family
)
858 if (sa_equal(addr
, ifa
->ifa_addr
))
860 if ((ifp
->if_flags
& IFF_BROADCAST
) &&
861 ifa
->ifa_broadaddr
&&
862 /* IPv6 doesn't have broadcast */
863 ifa
->ifa_broadaddr
->sa_len
!= 0 &&
864 sa_equal(ifa
->ifa_broadaddr
, addr
))
871 * Locate the point to point interface with a given destination address.
874 ifa_ifwithdstaddr(struct sockaddr
*addr
)
878 TAILQ_FOREACH(ifp
, &ifnet
, if_link
) {
879 struct ifaddr_container
*ifac
;
881 if (!(ifp
->if_flags
& IFF_POINTOPOINT
))
884 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
885 struct ifaddr
*ifa
= ifac
->ifa
;
887 if (ifa
->ifa_addr
->sa_family
!= addr
->sa_family
)
889 if (ifa
->ifa_dstaddr
&&
890 sa_equal(addr
, ifa
->ifa_dstaddr
))
898 * Find an interface on a specific network. If many, choice
899 * is most specific found.
902 ifa_ifwithnet(struct sockaddr
*addr
)
905 struct ifaddr
*ifa_maybe
= NULL
;
906 u_int af
= addr
->sa_family
;
907 char *addr_data
= addr
->sa_data
, *cplim
;
910 * AF_LINK addresses can be looked up directly by their index number,
911 * so do that if we can.
914 struct sockaddr_dl
*sdl
= (struct sockaddr_dl
*)addr
;
916 if (sdl
->sdl_index
&& sdl
->sdl_index
<= if_index
)
917 return (ifindex2ifnet
[sdl
->sdl_index
]->if_lladdr
);
921 * Scan though each interface, looking for ones that have
922 * addresses in this address family.
924 TAILQ_FOREACH(ifp
, &ifnet
, if_link
) {
925 struct ifaddr_container
*ifac
;
927 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
928 struct ifaddr
*ifa
= ifac
->ifa
;
929 char *cp
, *cp2
, *cp3
;
931 if (ifa
->ifa_addr
->sa_family
!= af
)
933 if (af
== AF_INET
&& ifp
->if_flags
& IFF_POINTOPOINT
) {
935 * This is a bit broken as it doesn't
936 * take into account that the remote end may
937 * be a single node in the network we are
939 * The trouble is that we don't know the
940 * netmask for the remote end.
942 if (ifa
->ifa_dstaddr
!= NULL
&&
943 sa_equal(addr
, ifa
->ifa_dstaddr
))
947 * if we have a special address handler,
948 * then use it instead of the generic one.
950 if (ifa
->ifa_claim_addr
) {
951 if ((*ifa
->ifa_claim_addr
)(ifa
, addr
)) {
959 * Scan all the bits in the ifa's address.
960 * If a bit dissagrees with what we are
961 * looking for, mask it with the netmask
962 * to see if it really matters.
965 if (ifa
->ifa_netmask
== 0)
968 cp2
= ifa
->ifa_addr
->sa_data
;
969 cp3
= ifa
->ifa_netmask
->sa_data
;
970 cplim
= ifa
->ifa_netmask
->sa_len
+
971 (char *)ifa
->ifa_netmask
;
973 if ((*cp
++ ^ *cp2
++) & *cp3
++)
974 goto next
; /* next address! */
976 * If the netmask of what we just found
977 * is more specific than what we had before
978 * (if we had one) then remember the new one
979 * before continuing to search
980 * for an even better one.
982 if (ifa_maybe
== 0 ||
983 rn_refines((char *)ifa
->ifa_netmask
,
984 (char *)ifa_maybe
->ifa_netmask
))
993 * Find an interface address specific to an interface best matching
997 ifaof_ifpforaddr(struct sockaddr
*addr
, struct ifnet
*ifp
)
999 struct ifaddr_container
*ifac
;
1000 char *cp
, *cp2
, *cp3
;
1002 struct ifaddr
*ifa_maybe
= 0;
1003 u_int af
= addr
->sa_family
;
1007 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
1008 struct ifaddr
*ifa
= ifac
->ifa
;
1010 if (ifa
->ifa_addr
->sa_family
!= af
)
1014 if (ifa
->ifa_netmask
== NULL
) {
1015 if (sa_equal(addr
, ifa
->ifa_addr
) ||
1016 (ifa
->ifa_dstaddr
!= NULL
&&
1017 sa_equal(addr
, ifa
->ifa_dstaddr
)))
1021 if (ifp
->if_flags
& IFF_POINTOPOINT
) {
1022 if (sa_equal(addr
, ifa
->ifa_dstaddr
))
1026 cp2
= ifa
->ifa_addr
->sa_data
;
1027 cp3
= ifa
->ifa_netmask
->sa_data
;
1028 cplim
= ifa
->ifa_netmask
->sa_len
+ (char *)ifa
->ifa_netmask
;
1029 for (; cp3
< cplim
; cp3
++)
1030 if ((*cp
++ ^ *cp2
++) & *cp3
)
1040 * Default action when installing a route with a Link Level gateway.
1041 * Lookup an appropriate real ifa to point to.
1042 * This should be moved to /sys/net/link.c eventually.
1045 link_rtrequest(int cmd
, struct rtentry
*rt
, struct rt_addrinfo
*info
)
1048 struct sockaddr
*dst
;
1051 if (cmd
!= RTM_ADD
|| (ifa
= rt
->rt_ifa
) == NULL
||
1052 (ifp
= ifa
->ifa_ifp
) == NULL
|| (dst
= rt_key(rt
)) == NULL
)
1054 ifa
= ifaof_ifpforaddr(dst
, ifp
);
1056 IFAFREE(rt
->rt_ifa
);
1059 if (ifa
->ifa_rtrequest
&& ifa
->ifa_rtrequest
!= link_rtrequest
)
1060 ifa
->ifa_rtrequest(cmd
, rt
, info
);
1065 * Mark an interface down and notify protocols of
1067 * NOTE: must be called at splnet or eqivalent.
1070 if_unroute(struct ifnet
*ifp
, int flag
, int fam
)
1072 struct ifaddr_container
*ifac
;
1074 ifp
->if_flags
&= ~flag
;
1075 getmicrotime(&ifp
->if_lastchange
);
1076 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
1077 struct ifaddr
*ifa
= ifac
->ifa
;
1079 if (fam
== PF_UNSPEC
|| (fam
== ifa
->ifa_addr
->sa_family
))
1080 kpfctlinput(PRC_IFDOWN
, ifa
->ifa_addr
);
1082 ifq_purge(&ifp
->if_snd
);
1087 * Mark an interface up and notify protocols of
1089 * NOTE: must be called at splnet or eqivalent.
1092 if_route(struct ifnet
*ifp
, int flag
, int fam
)
1094 struct ifaddr_container
*ifac
;
1096 ifq_purge(&ifp
->if_snd
);
1097 ifp
->if_flags
|= flag
;
1098 getmicrotime(&ifp
->if_lastchange
);
1099 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
1100 struct ifaddr
*ifa
= ifac
->ifa
;
1102 if (fam
== PF_UNSPEC
|| (fam
== ifa
->ifa_addr
->sa_family
))
1103 kpfctlinput(PRC_IFUP
, ifa
->ifa_addr
);
1112 * Mark an interface down and notify protocols of the transition. An
1113 * interface going down is also considered to be a synchronizing event.
1114 * We must ensure that all packet processing related to the interface
1115 * has completed before we return so e.g. the caller can free the ifnet
1116 * structure that the mbufs may be referencing.
1118 * NOTE: must be called at splnet or eqivalent.
1121 if_down(struct ifnet
*ifp
)
1123 if_unroute(ifp
, IFF_UP
, AF_UNSPEC
);
1124 netmsg_service_sync();
1128 * Mark an interface up and notify protocols of
1130 * NOTE: must be called at splnet or eqivalent.
1133 if_up(struct ifnet
*ifp
)
1135 if_route(ifp
, IFF_UP
, AF_UNSPEC
);
1139 * Process a link state change.
1140 * NOTE: must be called at splsoftnet or equivalent.
1143 if_link_state_change(struct ifnet
*ifp
)
1145 int link_state
= ifp
->if_link_state
;
1148 devctl_notify("IFNET", ifp
->if_xname
,
1149 (link_state
== LINK_STATE_UP
) ? "LINK_UP" : "LINK_DOWN", NULL
);
1153 * Handle interface watchdog timer routines. Called
1154 * from softclock, we decrement timers (if set) and
1155 * call the appropriate interface routine on expiration.
1158 if_slowtimo(void *arg
)
1164 TAILQ_FOREACH(ifp
, &ifnet
, if_link
) {
1165 if (ifp
->if_timer
== 0 || --ifp
->if_timer
)
1167 if (ifp
->if_watchdog
) {
1168 if (ifnet_tryserialize_all(ifp
)) {
1169 (*ifp
->if_watchdog
)(ifp
);
1170 ifnet_deserialize_all(ifp
);
1172 /* try again next timeout */
1180 callout_reset(&if_slowtimo_timer
, hz
/ IFNET_SLOWHZ
, if_slowtimo
, NULL
);
1184 * Map interface name to
1185 * interface structure pointer.
1188 ifunit(const char *name
)
1193 * Search all the interfaces for this name/number
1196 TAILQ_FOREACH(ifp
, &ifnet
, if_link
) {
1197 if (strncmp(ifp
->if_xname
, name
, IFNAMSIZ
) == 0)
1205 * Map interface name in a sockaddr_dl to
1206 * interface structure pointer.
1209 if_withname(struct sockaddr
*sa
)
1211 char ifname
[IFNAMSIZ
+1];
1212 struct sockaddr_dl
*sdl
= (struct sockaddr_dl
*)sa
;
1214 if ( (sa
->sa_family
!= AF_LINK
) || (sdl
->sdl_nlen
== 0) ||
1215 (sdl
->sdl_nlen
> IFNAMSIZ
) )
1219 * ifunit wants a null-terminated name. It may not be null-terminated
1220 * in the sockaddr. We don't want to change the caller's sockaddr,
1221 * and there might not be room to put the trailing null anyway, so we
1222 * make a local copy that we know we can null terminate safely.
1225 bcopy(sdl
->sdl_data
, ifname
, sdl
->sdl_nlen
);
1226 ifname
[sdl
->sdl_nlen
] = '\0';
1227 return ifunit(ifname
);
1235 ifioctl(struct socket
*so
, u_long cmd
, caddr_t data
, struct ucred
*cred
)
1243 size_t namelen
, onamelen
;
1244 char new_name
[IFNAMSIZ
];
1246 struct sockaddr_dl
*sdl
;
1252 return (ifconf(cmd
, data
, cred
));
1254 ifr
= (struct ifreq
*)data
;
1259 if ((error
= priv_check_cred(cred
, PRIV_ROOT
, 0)) != 0)
1261 return ((cmd
== SIOCIFCREATE
) ?
1262 if_clone_create(ifr
->ifr_name
, sizeof(ifr
->ifr_name
)) :
1263 if_clone_destroy(ifr
->ifr_name
));
1265 case SIOCIFGCLONERS
:
1266 return (if_clone_list((struct if_clonereq
*)data
));
1269 ifp
= ifunit(ifr
->ifr_name
);
1275 ifr
->ifr_index
= ifp
->if_index
;
1279 ifr
->ifr_flags
= ifp
->if_flags
;
1280 ifr
->ifr_flagshigh
= ifp
->if_flags
>> 16;
1284 ifr
->ifr_reqcap
= ifp
->if_capabilities
;
1285 ifr
->ifr_curcap
= ifp
->if_capenable
;
1289 ifr
->ifr_metric
= ifp
->if_metric
;
1293 ifr
->ifr_mtu
= ifp
->if_mtu
;
1297 ifr
->ifr_phys
= ifp
->if_physical
;
1300 case SIOCGIFPOLLCPU
:
1301 #ifdef DEVICE_POLLING
1302 ifr
->ifr_pollcpu
= ifp
->if_poll_cpuid
;
1304 ifr
->ifr_pollcpu
= -1;
1308 case SIOCSIFPOLLCPU
:
1309 #ifdef DEVICE_POLLING
1310 if ((ifp
->if_flags
& IFF_POLLING
) == 0)
1311 ether_pollcpu_register(ifp
, ifr
->ifr_pollcpu
);
1316 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1319 new_flags
= (ifr
->ifr_flags
& 0xffff) |
1320 (ifr
->ifr_flagshigh
<< 16);
1321 if (ifp
->if_flags
& IFF_SMART
) {
1322 /* Smart drivers twiddle their own routes */
1323 } else if (ifp
->if_flags
& IFF_UP
&&
1324 (new_flags
& IFF_UP
) == 0) {
1328 } else if (new_flags
& IFF_UP
&&
1329 (ifp
->if_flags
& IFF_UP
) == 0) {
1335 #ifdef DEVICE_POLLING
1336 if ((new_flags
^ ifp
->if_flags
) & IFF_POLLING
) {
1337 if (new_flags
& IFF_POLLING
) {
1338 ether_poll_register(ifp
);
1340 ether_poll_deregister(ifp
);
1345 ifp
->if_flags
= (ifp
->if_flags
& IFF_CANTCHANGE
) |
1346 (new_flags
&~ IFF_CANTCHANGE
);
1347 if (new_flags
& IFF_PPROMISC
) {
1348 /* Permanently promiscuous mode requested */
1349 ifp
->if_flags
|= IFF_PROMISC
;
1350 } else if (ifp
->if_pcount
== 0) {
1351 ifp
->if_flags
&= ~IFF_PROMISC
;
1353 if (ifp
->if_ioctl
) {
1354 ifnet_serialize_all(ifp
);
1355 ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
1356 ifnet_deserialize_all(ifp
);
1358 getmicrotime(&ifp
->if_lastchange
);
1362 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1365 if (ifr
->ifr_reqcap
& ~ifp
->if_capabilities
)
1367 ifnet_serialize_all(ifp
);
1368 ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
1369 ifnet_deserialize_all(ifp
);
1373 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1376 error
= copyinstr(ifr
->ifr_data
, new_name
, IFNAMSIZ
, NULL
);
1379 if (new_name
[0] == '\0')
1381 if (ifunit(new_name
) != NULL
)
1384 EVENTHANDLER_INVOKE(ifnet_detach_event
, ifp
);
1386 /* Announce the departure of the interface. */
1387 rt_ifannouncemsg(ifp
, IFAN_DEPARTURE
);
1389 strlcpy(ifp
->if_xname
, new_name
, sizeof(ifp
->if_xname
));
1390 ifa
= TAILQ_FIRST(&ifp
->if_addrheads
[mycpuid
])->ifa
;
1391 /* XXX IFA_LOCK(ifa); */
1392 sdl
= (struct sockaddr_dl
*)ifa
->ifa_addr
;
1393 namelen
= strlen(new_name
);
1394 onamelen
= sdl
->sdl_nlen
;
1396 * Move the address if needed. This is safe because we
1397 * allocate space for a name of length IFNAMSIZ when we
1398 * create this in if_attach().
1400 if (namelen
!= onamelen
) {
1401 bcopy(sdl
->sdl_data
+ onamelen
,
1402 sdl
->sdl_data
+ namelen
, sdl
->sdl_alen
);
1404 bcopy(new_name
, sdl
->sdl_data
, namelen
);
1405 sdl
->sdl_nlen
= namelen
;
1406 sdl
= (struct sockaddr_dl
*)ifa
->ifa_netmask
;
1407 bzero(sdl
->sdl_data
, onamelen
);
1408 while (namelen
!= 0)
1409 sdl
->sdl_data
[--namelen
] = 0xff;
1410 /* XXX IFA_UNLOCK(ifa) */
1412 EVENTHANDLER_INVOKE(ifnet_attach_event
, ifp
);
1414 /* Announce the return of the interface. */
1415 rt_ifannouncemsg(ifp
, IFAN_ARRIVAL
);
1419 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1422 ifp
->if_metric
= ifr
->ifr_metric
;
1423 getmicrotime(&ifp
->if_lastchange
);
1427 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1432 ifnet_serialize_all(ifp
);
1433 error
= ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
1434 ifnet_deserialize_all(ifp
);
1436 getmicrotime(&ifp
->if_lastchange
);
1441 u_long oldmtu
= ifp
->if_mtu
;
1443 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1446 if (ifp
->if_ioctl
== NULL
)
1447 return (EOPNOTSUPP
);
1448 if (ifr
->ifr_mtu
< IF_MINMTU
|| ifr
->ifr_mtu
> IF_MAXMTU
)
1450 ifnet_serialize_all(ifp
);
1451 error
= ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
1452 ifnet_deserialize_all(ifp
);
1454 getmicrotime(&ifp
->if_lastchange
);
1458 * If the link MTU changed, do network layer specific procedure.
1460 if (ifp
->if_mtu
!= oldmtu
) {
1470 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1474 /* Don't allow group membership on non-multicast interfaces. */
1475 if ((ifp
->if_flags
& IFF_MULTICAST
) == 0)
1478 /* Don't let users screw up protocols' entries. */
1479 if (ifr
->ifr_addr
.sa_family
!= AF_LINK
)
1482 if (cmd
== SIOCADDMULTI
) {
1483 struct ifmultiaddr
*ifma
;
1484 error
= if_addmulti(ifp
, &ifr
->ifr_addr
, &ifma
);
1486 error
= if_delmulti(ifp
, &ifr
->ifr_addr
);
1489 getmicrotime(&ifp
->if_lastchange
);
1492 case SIOCSIFPHYADDR
:
1493 case SIOCDIFPHYADDR
:
1495 case SIOCSIFPHYADDR_IN6
:
1497 case SIOCSLIFPHYADDR
:
1499 case SIOCSIFGENERIC
:
1500 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1503 if (ifp
->if_ioctl
== 0)
1504 return (EOPNOTSUPP
);
1505 ifnet_serialize_all(ifp
);
1506 error
= ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
1507 ifnet_deserialize_all(ifp
);
1509 getmicrotime(&ifp
->if_lastchange
);
1513 ifs
= (struct ifstat
*)data
;
1514 ifs
->ascii
[0] = '\0';
1516 case SIOCGIFPSRCADDR
:
1517 case SIOCGIFPDSTADDR
:
1518 case SIOCGLIFPHYADDR
:
1520 case SIOCGIFGENERIC
:
1521 if (ifp
->if_ioctl
== NULL
)
1522 return (EOPNOTSUPP
);
1523 ifnet_serialize_all(ifp
);
1524 error
= ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
1525 ifnet_deserialize_all(ifp
);
1529 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1532 return if_setlladdr(ifp
,
1533 ifr
->ifr_addr
.sa_data
, ifr
->ifr_addr
.sa_len
);
1536 oif_flags
= ifp
->if_flags
;
1537 if (so
->so_proto
== 0)
1538 return (EOPNOTSUPP
);
1540 error
= so_pru_control(so
, cmd
, data
, ifp
);
1547 case SIOCSIFDSTADDR
:
1549 case SIOCSIFBRDADDR
:
1550 case SIOCSIFNETMASK
:
1551 #if BYTE_ORDER != BIG_ENDIAN
1552 if (ifr
->ifr_addr
.sa_family
== 0 &&
1553 ifr
->ifr_addr
.sa_len
< 16) {
1554 ifr
->ifr_addr
.sa_family
= ifr
->ifr_addr
.sa_len
;
1555 ifr
->ifr_addr
.sa_len
= 16;
1558 if (ifr
->ifr_addr
.sa_len
== 0)
1559 ifr
->ifr_addr
.sa_len
= 16;
1567 case OSIOCGIFDSTADDR
:
1568 cmd
= SIOCGIFDSTADDR
;
1571 case OSIOCGIFBRDADDR
:
1572 cmd
= SIOCGIFBRDADDR
;
1575 case OSIOCGIFNETMASK
:
1576 cmd
= SIOCGIFNETMASK
;
1578 error
= so_pru_control(so
, cmd
, data
, ifp
);
1582 case OSIOCGIFDSTADDR
:
1583 case OSIOCGIFBRDADDR
:
1584 case OSIOCGIFNETMASK
:
1585 *(u_short
*)&ifr
->ifr_addr
= ifr
->ifr_addr
.sa_family
;
1589 #endif /* COMPAT_43 */
1591 if ((oif_flags
^ ifp
->if_flags
) & IFF_UP
) {
1593 DELAY(100);/* XXX: temporary workaround for fxp issue*/
1594 if (ifp
->if_flags
& IFF_UP
) {
1608 * Set/clear promiscuous mode on interface ifp based on the truth value
1609 * of pswitch. The calls are reference counted so that only the first
1610 * "on" request actually has an effect, as does the final "off" request.
1611 * Results are undefined if the "off" and "on" requests are not matched.
1614 ifpromisc(struct ifnet
*ifp
, int pswitch
)
1620 oldflags
= ifp
->if_flags
;
1621 if (ifp
->if_flags
& IFF_PPROMISC
) {
1622 /* Do nothing if device is in permanently promiscuous mode */
1623 ifp
->if_pcount
+= pswitch
? 1 : -1;
1628 * If the device is not configured up, we cannot put it in
1631 if ((ifp
->if_flags
& IFF_UP
) == 0)
1633 if (ifp
->if_pcount
++ != 0)
1635 ifp
->if_flags
|= IFF_PROMISC
;
1636 log(LOG_INFO
, "%s: promiscuous mode enabled\n",
1639 if (--ifp
->if_pcount
> 0)
1641 ifp
->if_flags
&= ~IFF_PROMISC
;
1642 log(LOG_INFO
, "%s: promiscuous mode disabled\n",
1645 ifr
.ifr_flags
= ifp
->if_flags
;
1646 ifr
.ifr_flagshigh
= ifp
->if_flags
>> 16;
1647 ifnet_serialize_all(ifp
);
1648 error
= ifp
->if_ioctl(ifp
, SIOCSIFFLAGS
, (caddr_t
)&ifr
, NULL
);
1649 ifnet_deserialize_all(ifp
);
1653 ifp
->if_flags
= oldflags
;
1658 * Return interface configuration
1659 * of system. List may be used
1660 * in later ioctl's (above) to get
1661 * other information.
1664 ifconf(u_long cmd
, caddr_t data
, struct ucred
*cred
)
1666 struct ifconf
*ifc
= (struct ifconf
*)data
;
1668 struct sockaddr
*sa
;
1669 struct ifreq ifr
, *ifrp
;
1670 int space
= ifc
->ifc_len
, error
= 0;
1672 ifrp
= ifc
->ifc_req
;
1673 TAILQ_FOREACH(ifp
, &ifnet
, if_link
) {
1674 struct ifaddr_container
*ifac
;
1677 if (space
<= sizeof ifr
)
1681 * Zero the stack declared structure first to prevent
1682 * memory disclosure.
1684 bzero(&ifr
, sizeof(ifr
));
1685 if (strlcpy(ifr
.ifr_name
, ifp
->if_xname
, sizeof(ifr
.ifr_name
))
1686 >= sizeof(ifr
.ifr_name
)) {
1687 error
= ENAMETOOLONG
;
1692 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
1693 struct ifaddr
*ifa
= ifac
->ifa
;
1695 if (space
<= sizeof ifr
)
1698 if (cred
->cr_prison
&&
1699 prison_if(cred
, sa
))
1703 if (cmd
== OSIOCGIFCONF
) {
1704 struct osockaddr
*osa
=
1705 (struct osockaddr
*)&ifr
.ifr_addr
;
1707 osa
->sa_family
= sa
->sa_family
;
1708 error
= copyout(&ifr
, ifrp
, sizeof ifr
);
1712 if (sa
->sa_len
<= sizeof(*sa
)) {
1714 error
= copyout(&ifr
, ifrp
, sizeof ifr
);
1717 if (space
< (sizeof ifr
) + sa
->sa_len
-
1720 space
-= sa
->sa_len
- sizeof(*sa
);
1721 error
= copyout(&ifr
, ifrp
,
1722 sizeof ifr
.ifr_name
);
1724 error
= copyout(sa
, &ifrp
->ifr_addr
,
1726 ifrp
= (struct ifreq
*)
1727 (sa
->sa_len
+ (caddr_t
)&ifrp
->ifr_addr
);
1731 space
-= sizeof ifr
;
1736 bzero(&ifr
.ifr_addr
, sizeof ifr
.ifr_addr
);
1737 error
= copyout(&ifr
, ifrp
, sizeof ifr
);
1740 space
-= sizeof ifr
;
1744 ifc
->ifc_len
-= space
;
1749 * Just like if_promisc(), but for all-multicast-reception mode.
1752 if_allmulti(struct ifnet
*ifp
, int onswitch
)
1760 if (ifp
->if_amcount
++ == 0) {
1761 ifp
->if_flags
|= IFF_ALLMULTI
;
1762 ifr
.ifr_flags
= ifp
->if_flags
;
1763 ifr
.ifr_flagshigh
= ifp
->if_flags
>> 16;
1764 ifnet_serialize_all(ifp
);
1765 error
= ifp
->if_ioctl(ifp
, SIOCSIFFLAGS
, (caddr_t
)&ifr
,
1767 ifnet_deserialize_all(ifp
);
1770 if (ifp
->if_amcount
> 1) {
1773 ifp
->if_amcount
= 0;
1774 ifp
->if_flags
&= ~IFF_ALLMULTI
;
1775 ifr
.ifr_flags
= ifp
->if_flags
;
1776 ifr
.ifr_flagshigh
= ifp
->if_flags
>> 16;
1777 ifnet_serialize_all(ifp
);
1778 error
= ifp
->if_ioctl(ifp
, SIOCSIFFLAGS
, (caddr_t
)&ifr
,
1780 ifnet_deserialize_all(ifp
);
1792 * Add a multicast listenership to the interface in question.
1793 * The link layer provides a routine which converts
1797 struct ifnet
*ifp
, /* interface to manipulate */
1798 struct sockaddr
*sa
, /* address to add */
1799 struct ifmultiaddr
**retifma
)
1801 struct sockaddr
*llsa
, *dupsa
;
1803 struct ifmultiaddr
*ifma
;
1806 * If the matching multicast address already exists
1807 * then don't add a new one, just add a reference
1809 LIST_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
1810 if (sa_equal(sa
, ifma
->ifma_addr
)) {
1811 ifma
->ifma_refcount
++;
1819 * Give the link layer a chance to accept/reject it, and also
1820 * find out which AF_LINK address this maps to, if it isn't one
1823 if (ifp
->if_resolvemulti
) {
1824 ifnet_serialize_all(ifp
);
1825 error
= ifp
->if_resolvemulti(ifp
, &llsa
, sa
);
1826 ifnet_deserialize_all(ifp
);
1833 MALLOC(ifma
, struct ifmultiaddr
*, sizeof *ifma
, M_IFMADDR
, M_WAITOK
);
1834 MALLOC(dupsa
, struct sockaddr
*, sa
->sa_len
, M_IFMADDR
, M_WAITOK
);
1835 bcopy(sa
, dupsa
, sa
->sa_len
);
1837 ifma
->ifma_addr
= dupsa
;
1838 ifma
->ifma_lladdr
= llsa
;
1839 ifma
->ifma_ifp
= ifp
;
1840 ifma
->ifma_refcount
= 1;
1841 ifma
->ifma_protospec
= 0;
1842 rt_newmaddrmsg(RTM_NEWMADDR
, ifma
);
1845 * Some network interfaces can scan the address list at
1846 * interrupt time; lock them out.
1849 LIST_INSERT_HEAD(&ifp
->if_multiaddrs
, ifma
, ifma_link
);
1854 LIST_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
1855 if (sa_equal(ifma
->ifma_addr
, llsa
))
1859 ifma
->ifma_refcount
++;
1861 MALLOC(ifma
, struct ifmultiaddr
*, sizeof *ifma
,
1862 M_IFMADDR
, M_WAITOK
);
1863 MALLOC(dupsa
, struct sockaddr
*, llsa
->sa_len
,
1864 M_IFMADDR
, M_WAITOK
);
1865 bcopy(llsa
, dupsa
, llsa
->sa_len
);
1866 ifma
->ifma_addr
= dupsa
;
1867 ifma
->ifma_ifp
= ifp
;
1868 ifma
->ifma_refcount
= 1;
1870 LIST_INSERT_HEAD(&ifp
->if_multiaddrs
, ifma
, ifma_link
);
1875 * We are certain we have added something, so call down to the
1876 * interface to let them know about it.
1879 ifnet_serialize_all(ifp
);
1880 ifp
->if_ioctl(ifp
, SIOCADDMULTI
, 0, NULL
);
1881 ifnet_deserialize_all(ifp
);
1888 * Remove a reference to a multicast address on this interface. Yell
1889 * if the request does not match an existing membership.
1892 if_delmulti(struct ifnet
*ifp
, struct sockaddr
*sa
)
1894 struct ifmultiaddr
*ifma
;
1896 LIST_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
)
1897 if (sa_equal(sa
, ifma
->ifma_addr
))
1902 if (ifma
->ifma_refcount
> 1) {
1903 ifma
->ifma_refcount
--;
1907 rt_newmaddrmsg(RTM_DELMADDR
, ifma
);
1908 sa
= ifma
->ifma_lladdr
;
1910 LIST_REMOVE(ifma
, ifma_link
);
1912 * Make sure the interface driver is notified
1913 * in the case of a link layer mcast group being left.
1915 if (ifma
->ifma_addr
->sa_family
== AF_LINK
&& sa
== 0) {
1916 ifnet_serialize_all(ifp
);
1917 ifp
->if_ioctl(ifp
, SIOCDELMULTI
, 0, NULL
);
1918 ifnet_deserialize_all(ifp
);
1921 kfree(ifma
->ifma_addr
, M_IFMADDR
);
1922 kfree(ifma
, M_IFMADDR
);
1927 * Now look for the link-layer address which corresponds to
1928 * this network address. It had been squirreled away in
1929 * ifma->ifma_lladdr for this purpose (so we don't have
1930 * to call ifp->if_resolvemulti() again), and we saved that
1931 * value in sa above. If some nasty deleted the
1932 * link-layer address out from underneath us, we can deal because
1933 * the address we stored was is not the same as the one which was
1934 * in the record for the link-layer address. (So we don't complain
1937 LIST_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
)
1938 if (sa_equal(sa
, ifma
->ifma_addr
))
1943 if (ifma
->ifma_refcount
> 1) {
1944 ifma
->ifma_refcount
--;
1949 ifnet_serialize_all(ifp
);
1950 LIST_REMOVE(ifma
, ifma_link
);
1951 ifp
->if_ioctl(ifp
, SIOCDELMULTI
, 0, NULL
);
1952 ifnet_deserialize_all(ifp
);
1954 kfree(ifma
->ifma_addr
, M_IFMADDR
);
1955 kfree(sa
, M_IFMADDR
);
1956 kfree(ifma
, M_IFMADDR
);
1962 * Set the link layer address on an interface.
1964 * At this time we only support certain types of interfaces,
1965 * and we don't allow the length of the address to change.
1968 if_setlladdr(struct ifnet
*ifp
, const u_char
*lladdr
, int len
)
1970 struct sockaddr_dl
*sdl
;
1973 sdl
= IF_LLSOCKADDR(ifp
);
1976 if (len
!= sdl
->sdl_alen
) /* don't allow length to change */
1978 switch (ifp
->if_type
) {
1979 case IFT_ETHER
: /* these types use struct arpcom */
1982 bcopy(lladdr
, ((struct arpcom
*)ifp
->if_softc
)->ac_enaddr
, len
);
1983 bcopy(lladdr
, LLADDR(sdl
), len
);
1989 * If the interface is already up, we need
1990 * to re-init it in order to reprogram its
1993 ifnet_serialize_all(ifp
);
1994 if ((ifp
->if_flags
& IFF_UP
) != 0) {
1995 struct ifaddr_container
*ifac
;
1997 ifp
->if_flags
&= ~IFF_UP
;
1998 ifr
.ifr_flags
= ifp
->if_flags
;
1999 ifr
.ifr_flagshigh
= ifp
->if_flags
>> 16;
2000 ifp
->if_ioctl(ifp
, SIOCSIFFLAGS
, (caddr_t
)&ifr
,
2002 ifp
->if_flags
|= IFF_UP
;
2003 ifr
.ifr_flags
= ifp
->if_flags
;
2004 ifr
.ifr_flagshigh
= ifp
->if_flags
>> 16;
2005 ifp
->if_ioctl(ifp
, SIOCSIFFLAGS
, (caddr_t
)&ifr
,
2009 * Also send gratuitous ARPs to notify other nodes about
2010 * the address change.
2012 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
2013 struct ifaddr
*ifa
= ifac
->ifa
;
2015 if (ifa
->ifa_addr
!= NULL
&&
2016 ifa
->ifa_addr
->sa_family
== AF_INET
)
2017 arp_ifinit(ifp
, ifa
);
2021 ifnet_deserialize_all(ifp
);
2025 struct ifmultiaddr
*
2026 ifmaof_ifpforaddr(struct sockaddr
*sa
, struct ifnet
*ifp
)
2028 struct ifmultiaddr
*ifma
;
2030 LIST_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
)
2031 if (sa_equal(ifma
->ifma_addr
, sa
))
2038 * This function locates the first real ethernet MAC from a network
2039 * card and loads it into node, returning 0 on success or ENOENT if
2040 * no suitable interfaces were found. It is used by the uuid code to
2041 * generate a unique 6-byte number.
2044 if_getanyethermac(uint16_t *node
, int minlen
)
2047 struct sockaddr_dl
*sdl
;
2049 TAILQ_FOREACH(ifp
, &ifnet
, if_link
) {
2050 if (ifp
->if_type
!= IFT_ETHER
)
2052 sdl
= IF_LLSOCKADDR(ifp
);
2053 if (sdl
->sdl_alen
< minlen
)
2055 bcopy(((struct arpcom
*)ifp
->if_softc
)->ac_enaddr
, node
,
2063 * The name argument must be a pointer to storage which will last as
2064 * long as the interface does. For physical devices, the result of
2065 * device_get_name(dev) is a good choice and for pseudo-devices a
2066 * static string works well.
2069 if_initname(struct ifnet
*ifp
, const char *name
, int unit
)
2071 ifp
->if_dname
= name
;
2072 ifp
->if_dunit
= unit
;
2073 if (unit
!= IF_DUNIT_NONE
)
2074 ksnprintf(ifp
->if_xname
, IFNAMSIZ
, "%s%d", name
, unit
);
2076 strlcpy(ifp
->if_xname
, name
, IFNAMSIZ
);
2080 if_printf(struct ifnet
*ifp
, const char *fmt
, ...)
2085 retval
= kprintf("%s: ", ifp
->if_xname
);
2086 __va_start(ap
, fmt
);
2087 retval
+= kvprintf(fmt
, ap
);
2093 ifq_set_classic(struct ifaltq
*ifq
)
2095 ifq
->altq_enqueue
= ifq_classic_enqueue
;
2096 ifq
->altq_dequeue
= ifq_classic_dequeue
;
2097 ifq
->altq_request
= ifq_classic_request
;
2101 ifq_classic_enqueue(struct ifaltq
*ifq
, struct mbuf
*m
,
2102 struct altq_pktattr
*pa __unused
)
2104 logifq(enqueue
, ifq
);
2105 if (IF_QFULL(ifq
)) {
2115 ifq_classic_dequeue(struct ifaltq
*ifq
, struct mbuf
*mpolled
, int op
)
2124 logifq(dequeue
, ifq
);
2128 panic("unsupported ALTQ dequeue op: %d", op
);
2130 KKASSERT(mpolled
== NULL
|| mpolled
== m
);
2135 ifq_classic_request(struct ifaltq
*ifq
, int req
, void *arg
)
2142 panic("unsupported ALTQ request: %d", req
);
2148 ifq_dispatch(struct ifnet
*ifp
, struct mbuf
*m
, struct altq_pktattr
*pa
)
2150 struct ifaltq
*ifq
= &ifp
->if_snd
;
2151 int running
= 0, error
, start
= 0;
2153 ASSERT_IFNET_NOT_SERIALIZED_TX(ifp
);
2156 error
= ifq_enqueue_locked(ifq
, m
, pa
);
2161 if (!ifq
->altq_started
) {
2163 * Hold the interlock of ifnet.if_start
2165 ifq
->altq_started
= 1;
2170 ifp
->if_obytes
+= m
->m_pkthdr
.len
;
2171 if (m
->m_flags
& M_MCAST
)
2175 logifstart(avoid
, ifp
);
2179 if (ifq_dispatch_schedonly
) {
2181 * Always schedule ifnet.if_start on ifnet's CPU,
2182 * short circuit the rest of this function.
2184 logifstart(sched
, ifp
);
2185 if_start_schedule(ifp
);
2190 * Try to do direct ifnet.if_start first, if there is
2191 * contention on ifnet's serializer, ifnet.if_start will
2192 * be scheduled on ifnet's CPU.
2194 if (!ifnet_tryserialize_tx(ifp
)) {
2196 * ifnet serializer contention happened,
2197 * ifnet.if_start is scheduled on ifnet's
2198 * CPU, and we keep going.
2200 logifstart(contend_sched
, ifp
);
2201 if_start_schedule(ifp
);
2205 if ((ifp
->if_flags
& IFF_OACTIVE
) == 0) {
2206 logifstart(run
, ifp
);
2208 if ((ifp
->if_flags
&
2209 (IFF_OACTIVE
| IFF_RUNNING
)) == IFF_RUNNING
)
2213 ifnet_deserialize_tx(ifp
);
2215 if (ifq_dispatch_schednochk
|| if_start_need_schedule(ifq
, running
)) {
2217 * More data need to be transmitted, ifnet.if_start is
2218 * scheduled on ifnet's CPU, and we keep going.
2219 * NOTE: ifnet.if_start interlock is not released.
2221 logifstart(sched
, ifp
);
2222 if_start_schedule(ifp
);
2228 ifa_create(int size
, int flags
)
2233 KASSERT(size
>= sizeof(*ifa
), ("ifaddr size too small\n"));
2235 ifa
= kmalloc(size
, M_IFADDR
, flags
| M_ZERO
);
2239 ifa
->ifa_containers
= kmalloc(ncpus
* sizeof(struct ifaddr_container
),
2240 M_IFADDR
, M_WAITOK
| M_ZERO
);
2241 ifa
->ifa_ncnt
= ncpus
;
2242 for (i
= 0; i
< ncpus
; ++i
) {
2243 struct ifaddr_container
*ifac
= &ifa
->ifa_containers
[i
];
2245 ifac
->ifa_magic
= IFA_CONTAINER_MAGIC
;
2247 ifac
->ifa_refcnt
= 1;
2250 kprintf("alloc ifa %p %d\n", ifa
, size
);
2256 ifac_free(struct ifaddr_container
*ifac
, int cpu_id
)
2258 struct ifaddr
*ifa
= ifac
->ifa
;
2260 KKASSERT(ifac
->ifa_magic
== IFA_CONTAINER_MAGIC
);
2261 KKASSERT(ifac
->ifa_refcnt
== 0);
2262 KASSERT(ifac
->ifa_listmask
== 0,
2263 ("ifa is still on %#x lists\n", ifac
->ifa_listmask
));
2265 ifac
->ifa_magic
= IFA_CONTAINER_DEAD
;
2267 #ifdef IFADDR_DEBUG_VERBOSE
2268 kprintf("try free ifa %p cpu_id %d\n", ifac
->ifa
, cpu_id
);
2271 KASSERT(ifa
->ifa_ncnt
> 0 && ifa
->ifa_ncnt
<= ncpus
,
2272 ("invalid # of ifac, %d\n", ifa
->ifa_ncnt
));
2273 if (atomic_fetchadd_int(&ifa
->ifa_ncnt
, -1) == 1) {
2275 kprintf("free ifa %p\n", ifa
);
2277 kfree(ifa
->ifa_containers
, M_IFADDR
);
2278 kfree(ifa
, M_IFADDR
);
2283 ifa_iflink_dispatch(struct netmsg
*nmsg
)
2285 struct netmsg_ifaddr
*msg
= (struct netmsg_ifaddr
*)nmsg
;
2286 struct ifaddr
*ifa
= msg
->ifa
;
2287 struct ifnet
*ifp
= msg
->ifp
;
2289 struct ifaddr_container
*ifac
;
2293 ifac
= &ifa
->ifa_containers
[cpu
];
2294 ASSERT_IFAC_VALID(ifac
);
2295 KASSERT((ifac
->ifa_listmask
& IFA_LIST_IFADDRHEAD
) == 0,
2296 ("ifaddr is on if_addrheads\n"));
2298 ifac
->ifa_listmask
|= IFA_LIST_IFADDRHEAD
;
2300 TAILQ_INSERT_TAIL(&ifp
->if_addrheads
[cpu
], ifac
, ifa_link
);
2302 TAILQ_INSERT_HEAD(&ifp
->if_addrheads
[cpu
], ifac
, ifa_link
);
2306 ifa_forwardmsg(&nmsg
->nm_lmsg
, cpu
+ 1);
2310 ifa_iflink(struct ifaddr
*ifa
, struct ifnet
*ifp
, int tail
)
2312 struct netmsg_ifaddr msg
;
2314 netmsg_init(&msg
.netmsg
, &curthread
->td_msgport
, 0,
2315 ifa_iflink_dispatch
);
2320 ifa_domsg(&msg
.netmsg
.nm_lmsg
, 0);
2324 ifa_ifunlink_dispatch(struct netmsg
*nmsg
)
2326 struct netmsg_ifaddr
*msg
= (struct netmsg_ifaddr
*)nmsg
;
2327 struct ifaddr
*ifa
= msg
->ifa
;
2328 struct ifnet
*ifp
= msg
->ifp
;
2330 struct ifaddr_container
*ifac
;
2334 ifac
= &ifa
->ifa_containers
[cpu
];
2335 ASSERT_IFAC_VALID(ifac
);
2336 KASSERT(ifac
->ifa_listmask
& IFA_LIST_IFADDRHEAD
,
2337 ("ifaddr is not on if_addrhead\n"));
2339 TAILQ_REMOVE(&ifp
->if_addrheads
[cpu
], ifac
, ifa_link
);
2340 ifac
->ifa_listmask
&= ~IFA_LIST_IFADDRHEAD
;
2344 ifa_forwardmsg(&nmsg
->nm_lmsg
, cpu
+ 1);
2348 ifa_ifunlink(struct ifaddr
*ifa
, struct ifnet
*ifp
)
2350 struct netmsg_ifaddr msg
;
2352 netmsg_init(&msg
.netmsg
, &curthread
->td_msgport
, 0,
2353 ifa_ifunlink_dispatch
);
2357 ifa_domsg(&msg
.netmsg
.nm_lmsg
, 0);
2361 ifa_destroy_dispatch(struct netmsg
*nmsg
)
2363 struct netmsg_ifaddr
*msg
= (struct netmsg_ifaddr
*)nmsg
;
2366 ifa_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
2370 ifa_destroy(struct ifaddr
*ifa
)
2372 struct netmsg_ifaddr msg
;
2374 netmsg_init(&msg
.netmsg
, &curthread
->td_msgport
, 0,
2375 ifa_destroy_dispatch
);
2378 ifa_domsg(&msg
.netmsg
.nm_lmsg
, 0);
2382 ifnet_portfn(int cpu
)
2384 return &ifnet_threads
[cpu
].td_msgport
;
2388 ifnet_forwardmsg(struct lwkt_msg
*lmsg
, int next_cpu
)
2390 KKASSERT(next_cpu
> mycpuid
&& next_cpu
<= ncpus
);
2392 if (next_cpu
< ncpus
)
2393 lwkt_forwardmsg(ifnet_portfn(next_cpu
), lmsg
);
2395 lwkt_replymsg(lmsg
, 0);
2399 ifnet_domsg(struct lwkt_msg
*lmsg
, int cpu
)
2401 KKASSERT(cpu
< ncpus
);
2402 return lwkt_domsg(ifnet_portfn(cpu
), lmsg
, 0);
2406 ifnet_sendmsg(struct lwkt_msg
*lmsg
, int cpu
)
2408 KKASSERT(cpu
< ncpus
);
2409 lwkt_sendmsg(ifnet_portfn(cpu
), lmsg
);
2413 ifnetinit(void *dummy __unused
)
2417 for (i
= 0; i
< ncpus
; ++i
) {
2418 struct thread
*thr
= &ifnet_threads
[i
];
2420 lwkt_create(netmsg_service_loop
, &ifnet_mpsafe_thread
, NULL
,
2421 thr
, TDF_NETWORK
| TDF_MPSAFE
, i
, "ifnet %d", i
);
2422 netmsg_service_port_init(&thr
->td_msgport
);