2 * Copyright (c) 1980, 1986, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * @(#)if.c 8.3 (Berkeley) 1/4/94
34 * $FreeBSD: src/sys/net/if.c,v 1.185 2004/03/13 02:35:03 brooks Exp $
35 * $DragonFly: src/sys/net/if.c,v 1.84 2008/11/15 11:58:16 sephe Exp $
38 #include "opt_compat.h"
39 #include "opt_inet6.h"
41 #include "opt_polling.h"
42 #include "opt_ifpoll.h"
44 #include <sys/param.h>
45 #include <sys/malloc.h>
47 #include <sys/systm.h>
50 #include <sys/protosw.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/socketops.h>
54 #include <sys/protosw.h>
55 #include <sys/kernel.h>
57 #include <sys/sockio.h>
58 #include <sys/syslog.h>
59 #include <sys/sysctl.h>
60 #include <sys/domain.h>
61 #include <sys/thread.h>
62 #include <sys/thread2.h>
63 #include <sys/serialize.h>
64 #include <sys/msgport2.h>
68 #include <net/if_arp.h>
69 #include <net/if_dl.h>
70 #include <net/if_types.h>
71 #include <net/if_var.h>
72 #include <net/ifq_var.h>
73 #include <net/radix.h>
74 #include <net/route.h>
75 #include <net/if_clone.h>
76 #include <net/netisr.h>
77 #include <net/netmsg2.h>
79 #include <machine/atomic.h>
80 #include <machine/stdarg.h>
81 #include <machine/smp.h>
83 #if defined(INET) || defined(INET6)
85 #include <netinet/in.h>
86 #include <netinet/in_var.h>
87 #include <netinet/if_ether.h>
89 #include <netinet6/in6_var.h>
90 #include <netinet6/in6_ifattach.h>
94 #if defined(COMPAT_43)
95 #include <emulation/43bsd/43bsd_socket.h>
96 #endif /* COMPAT_43 */
98 struct netmsg_ifaddr
{
106 * System initialization
108 static void if_attachdomain(void *);
109 static void if_attachdomain1(struct ifnet
*);
110 static int ifconf(u_long
, caddr_t
, struct ucred
*);
111 static void ifinit(void *);
112 static void ifnetinit(void *);
113 static void if_slowtimo(void *);
114 static void link_rtrequest(int, struct rtentry
*, struct rt_addrinfo
*);
115 static int if_rtdel(struct radix_node
*, void *);
119 * XXX: declare here to avoid to include many inet6 related files..
120 * should be more generalized?
122 extern void nd6_setmtu(struct ifnet
*);
125 SYSCTL_NODE(_net
, PF_LINK
, link
, CTLFLAG_RW
, 0, "Link layers");
126 SYSCTL_NODE(_net_link
, 0, generic
, CTLFLAG_RW
, 0, "Generic link-management");
128 SYSINIT(interfaces
, SI_SUB_PROTO_IF
, SI_ORDER_FIRST
, ifinit
, NULL
)
129 /* Must be after netisr_init */
130 SYSINIT(ifnet
, SI_SUB_PRE_DRIVERS
, SI_ORDER_SECOND
, ifnetinit
, NULL
)
132 MALLOC_DEFINE(M_IFADDR
, "ifaddr", "interface address");
133 MALLOC_DEFINE(M_IFMADDR
, "ether_multi", "link-level multicast address");
135 int ifqmaxlen
= IFQ_MAXLEN
;
136 struct ifnethead ifnet
= TAILQ_HEAD_INITIALIZER(ifnet
);
138 /* In ifq_dispatch(), try to do direct ifnet.if_start first */
139 static int ifq_dispatch_schedonly
= 0;
140 SYSCTL_INT(_net_link_generic
, OID_AUTO
, ifq_dispatch_schedonly
, CTLFLAG_RW
,
141 &ifq_dispatch_schedonly
, 0, "");
143 /* In ifq_dispatch(), schedule ifnet.if_start without checking ifnet.if_snd */
144 static int ifq_dispatch_schednochk
= 0;
145 SYSCTL_INT(_net_link_generic
, OID_AUTO
, ifq_dispatch_schednochk
, CTLFLAG_RW
,
146 &ifq_dispatch_schednochk
, 0, "");
148 /* In if_devstart(), try to do direct ifnet.if_start first */
149 static int if_devstart_schedonly
= 0;
150 SYSCTL_INT(_net_link_generic
, OID_AUTO
, if_devstart_schedonly
, CTLFLAG_RW
,
151 &if_devstart_schedonly
, 0, "");
153 /* In if_devstart(), schedule ifnet.if_start without checking ifnet.if_snd */
154 static int if_devstart_schednochk
= 0;
155 SYSCTL_INT(_net_link_generic
, OID_AUTO
, if_devstart_schednochk
, CTLFLAG_RW
,
156 &if_devstart_schednochk
, 0, "");
159 /* Schedule ifnet.if_start on the current CPU */
160 static int if_start_oncpu_sched
= 0;
161 SYSCTL_INT(_net_link_generic
, OID_AUTO
, if_start_oncpu_sched
, CTLFLAG_RW
,
162 &if_start_oncpu_sched
, 0, "");
165 struct callout if_slowtimo_timer
;
168 struct ifnet
**ifindex2ifnet
= NULL
;
169 static struct thread ifnet_threads
[MAXCPU
];
170 static int ifnet_mpsafe_thread
= NETMSG_SERVICE_MPSAFE
;
172 #define IFQ_KTR_STRING "ifq=%p"
173 #define IFQ_KTR_ARG_SIZE (sizeof(void *))
175 #define KTR_IFQ KTR_ALL
177 KTR_INFO_MASTER(ifq
);
178 KTR_INFO(KTR_IFQ
, ifq
, enqueue
, 0, IFQ_KTR_STRING
, IFQ_KTR_ARG_SIZE
);
179 KTR_INFO(KTR_IFQ
, ifq
, dequeue
, 1, IFQ_KTR_STRING
, IFQ_KTR_ARG_SIZE
);
180 #define logifq(name, arg) KTR_LOG(ifq_ ## name, arg)
182 #define IF_START_KTR_STRING "ifp=%p"
183 #define IF_START_KTR_ARG_SIZE (sizeof(void *))
185 #define KTR_IF_START KTR_ALL
187 KTR_INFO_MASTER(if_start
);
188 KTR_INFO(KTR_IF_START
, if_start
, run
, 0,
189 IF_START_KTR_STRING
, IF_START_KTR_ARG_SIZE
);
190 KTR_INFO(KTR_IF_START
, if_start
, sched
, 1,
191 IF_START_KTR_STRING
, IF_START_KTR_ARG_SIZE
);
192 KTR_INFO(KTR_IF_START
, if_start
, avoid
, 2,
193 IF_START_KTR_STRING
, IF_START_KTR_ARG_SIZE
);
194 KTR_INFO(KTR_IF_START
, if_start
, contend_sched
, 3,
195 IF_START_KTR_STRING
, IF_START_KTR_ARG_SIZE
);
196 KTR_INFO(KTR_IF_START
, if_start
, chase_sched
, 4,
197 IF_START_KTR_STRING
, IF_START_KTR_ARG_SIZE
);
198 #define logifstart(name, arg) KTR_LOG(if_start_ ## name, arg)
201 * Network interface utility routines.
203 * Routines with ifa_ifwith* names take sockaddr *'s as
212 callout_init(&if_slowtimo_timer
);
215 TAILQ_FOREACH(ifp
, &ifnet
, if_link
) {
216 if (ifp
->if_snd
.ifq_maxlen
== 0) {
217 if_printf(ifp
, "XXX: driver didn't set ifq_maxlen\n");
218 ifp
->if_snd
.ifq_maxlen
= ifqmaxlen
;
227 if_start_cpuid(struct ifnet
*ifp
)
229 return ifp
->if_cpuid
;
232 #ifdef DEVICE_POLLING
234 if_start_cpuid_poll(struct ifnet
*ifp
)
236 int poll_cpuid
= ifp
->if_poll_cpuid
;
241 return ifp
->if_cpuid
;
246 if_start_ipifunc(void *arg
)
248 struct ifnet
*ifp
= arg
;
249 struct lwkt_msg
*lmsg
= &ifp
->if_start_nmsg
[mycpuid
].nm_lmsg
;
252 if (lmsg
->ms_flags
& MSGF_DONE
)
253 lwkt_sendmsg(ifnet_portfn(mycpuid
), lmsg
);
258 * Schedule ifnet.if_start on ifnet's CPU
261 if_start_schedule(struct ifnet
*ifp
)
266 if (if_start_oncpu_sched
)
269 cpu
= ifp
->if_start_cpuid(ifp
);
272 lwkt_send_ipiq(globaldata_find(cpu
), if_start_ipifunc
, ifp
);
275 if_start_ipifunc(ifp
);
280 * This function will release ifnet.if_start interlock,
281 * if ifnet.if_start does not need to be scheduled
284 if_start_need_schedule(struct ifaltq
*ifq
, int running
)
286 if (!running
|| ifq_is_empty(ifq
)
288 || ifq
->altq_tbr
!= NULL
293 * ifnet.if_start interlock is released, if:
294 * 1) Hardware can not take any packets, due to
295 * o interface is marked down
296 * o hardware queue is full (IFF_OACTIVE)
297 * Under the second situation, hardware interrupt
298 * or polling(4) will call/schedule ifnet.if_start
299 * when hardware queue is ready
300 * 2) There is not packet in the ifnet.if_snd.
301 * Further ifq_dispatch or ifq_handoff will call/
302 * schedule ifnet.if_start
303 * 3) TBR is used and it does not allow further
305 * TBR callout will call ifnet.if_start
307 if (!running
|| !ifq_data_ready(ifq
)) {
308 ifq
->altq_started
= 0;
318 if_start_dispatch(struct netmsg
*nmsg
)
320 struct lwkt_msg
*lmsg
= &nmsg
->nm_lmsg
;
321 struct ifnet
*ifp
= lmsg
->u
.ms_resultp
;
322 struct ifaltq
*ifq
= &ifp
->if_snd
;
326 lwkt_replymsg(lmsg
, 0); /* reply ASAP */
330 if (!if_start_oncpu_sched
&& mycpuid
!= ifp
->if_start_cpuid(ifp
)) {
332 * If the ifnet is still up, we need to
333 * chase its CPU change.
335 if (ifp
->if_flags
& IFF_UP
) {
336 logifstart(chase_sched
, ifp
);
337 if_start_schedule(ifp
);
345 if (ifp
->if_flags
& IFF_UP
) {
346 ifnet_serialize_tx(ifp
); /* XXX try? */
347 if ((ifp
->if_flags
& IFF_OACTIVE
) == 0) {
348 logifstart(run
, ifp
);
351 (IFF_OACTIVE
| IFF_RUNNING
)) == IFF_RUNNING
)
354 ifnet_deserialize_tx(ifp
);
359 if (if_start_need_schedule(ifq
, running
)) {
361 if (lmsg
->ms_flags
& MSGF_DONE
) { /* XXX necessary? */
362 logifstart(sched
, ifp
);
363 lwkt_sendmsg(ifnet_portfn(mycpuid
), lmsg
);
369 /* Device driver ifnet.if_start helper function */
371 if_devstart(struct ifnet
*ifp
)
373 struct ifaltq
*ifq
= &ifp
->if_snd
;
376 ASSERT_IFNET_SERIALIZED_TX(ifp
);
379 if (ifq
->altq_started
|| !ifq_data_ready(ifq
)) {
380 logifstart(avoid
, ifp
);
384 ifq
->altq_started
= 1;
387 if (if_devstart_schedonly
) {
389 * Always schedule ifnet.if_start on ifnet's CPU,
390 * short circuit the rest of this function.
392 logifstart(sched
, ifp
);
393 if_start_schedule(ifp
);
397 logifstart(run
, ifp
);
400 if ((ifp
->if_flags
& (IFF_OACTIVE
| IFF_RUNNING
)) == IFF_RUNNING
)
403 if (if_devstart_schednochk
|| if_start_need_schedule(ifq
, running
)) {
405 * More data need to be transmitted, ifnet.if_start is
406 * scheduled on ifnet's CPU, and we keep going.
407 * NOTE: ifnet.if_start interlock is not released.
409 logifstart(sched
, ifp
);
410 if_start_schedule(ifp
);
415 if_default_serialize(struct ifnet
*ifp
, enum ifnet_serialize slz __unused
)
417 lwkt_serialize_enter(ifp
->if_serializer
);
421 if_default_deserialize(struct ifnet
*ifp
, enum ifnet_serialize slz __unused
)
423 lwkt_serialize_exit(ifp
->if_serializer
);
427 if_default_tryserialize(struct ifnet
*ifp
, enum ifnet_serialize slz __unused
)
429 return lwkt_serialize_try(ifp
->if_serializer
);
434 if_default_serialize_assert(struct ifnet
*ifp
,
435 enum ifnet_serialize slz __unused
,
436 boolean_t serialized
)
439 ASSERT_SERIALIZED(ifp
->if_serializer
);
441 ASSERT_NOT_SERIALIZED(ifp
->if_serializer
);
446 * Attach an interface to the list of "active" interfaces.
448 * The serializer is optional. If non-NULL access to the interface
452 if_attach(struct ifnet
*ifp
, lwkt_serialize_t serializer
)
454 unsigned socksize
, ifasize
;
455 int namelen
, masklen
;
456 struct sockaddr_dl
*sdl
;
461 static int if_indexlim
= 8;
463 if (ifp
->if_serialize
!= NULL
) {
464 KASSERT(ifp
->if_deserialize
!= NULL
&&
465 ifp
->if_tryserialize
!= NULL
&&
466 ifp
->if_serialize_assert
!= NULL
,
467 ("serialize functions are partially setup\n"));
470 * If the device supplies serialize functions,
471 * then clear if_serializer to catch any invalid
472 * usage of this field.
474 KASSERT(serializer
== NULL
,
475 ("both serialize functions and default serializer "
477 ifp
->if_serializer
= NULL
;
479 KASSERT(ifp
->if_deserialize
== NULL
&&
480 ifp
->if_tryserialize
== NULL
&&
481 ifp
->if_serialize_assert
== NULL
,
482 ("serialize functions are partially setup\n"));
483 ifp
->if_serialize
= if_default_serialize
;
484 ifp
->if_deserialize
= if_default_deserialize
;
485 ifp
->if_tryserialize
= if_default_tryserialize
;
487 ifp
->if_serialize_assert
= if_default_serialize_assert
;
491 * The serializer can be passed in from the device,
492 * allowing the same serializer to be used for both
493 * the interrupt interlock and the device queue.
494 * If not specified, the netif structure will use an
495 * embedded serializer.
497 if (serializer
== NULL
) {
498 serializer
= &ifp
->if_default_serializer
;
499 lwkt_serialize_init(serializer
);
501 ifp
->if_serializer
= serializer
;
504 ifp
->if_start_cpuid
= if_start_cpuid
;
507 #ifdef DEVICE_POLLING
508 /* Device is not in polling mode by default */
509 ifp
->if_poll_cpuid
= -1;
510 if (ifp
->if_poll
!= NULL
)
511 ifp
->if_start_cpuid
= if_start_cpuid_poll
;
514 ifp
->if_start_nmsg
= kmalloc(ncpus
* sizeof(struct netmsg
),
515 M_LWKTMSG
, M_WAITOK
);
516 for (i
= 0; i
< ncpus
; ++i
) {
517 netmsg_init(&ifp
->if_start_nmsg
[i
], &netisr_adone_rport
, 0,
519 ifp
->if_start_nmsg
[i
].nm_lmsg
.u
.ms_resultp
= ifp
;
522 TAILQ_INSERT_TAIL(&ifnet
, ifp
, if_link
);
523 ifp
->if_index
= ++if_index
;
527 * The old code would work if the interface passed a pre-existing
528 * chain of ifaddrs to this code. We don't trust our callers to
529 * properly initialize the tailq, however, so we no longer allow
530 * this unlikely case.
532 ifp
->if_addrheads
= kmalloc(ncpus
* sizeof(struct ifaddrhead
),
533 M_IFADDR
, M_WAITOK
| M_ZERO
);
534 for (i
= 0; i
< ncpus
; ++i
)
535 TAILQ_INIT(&ifp
->if_addrheads
[i
]);
537 TAILQ_INIT(&ifp
->if_prefixhead
);
538 LIST_INIT(&ifp
->if_multiaddrs
);
539 getmicrotime(&ifp
->if_lastchange
);
540 if (ifindex2ifnet
== NULL
|| if_index
>= if_indexlim
) {
546 /* grow ifindex2ifnet */
547 n
= if_indexlim
* sizeof(*q
);
548 q
= kmalloc(n
, M_IFADDR
, M_WAITOK
| M_ZERO
);
550 bcopy(ifindex2ifnet
, q
, n
/2);
551 kfree(ifindex2ifnet
, M_IFADDR
);
556 ifindex2ifnet
[if_index
] = ifp
;
559 * create a Link Level name for this device
561 namelen
= strlen(ifp
->if_xname
);
562 #define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m))
563 masklen
= _offsetof(struct sockaddr_dl
, sdl_data
[0]) + namelen
;
564 socksize
= masklen
+ ifp
->if_addrlen
;
565 #define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
566 if (socksize
< sizeof(*sdl
))
567 socksize
= sizeof(*sdl
);
568 socksize
= ROUNDUP(socksize
);
569 ifasize
= sizeof(struct ifaddr
) + 2 * socksize
;
570 ifa
= ifa_create(ifasize
, M_WAITOK
);
571 sdl
= (struct sockaddr_dl
*)(ifa
+ 1);
572 sdl
->sdl_len
= socksize
;
573 sdl
->sdl_family
= AF_LINK
;
574 bcopy(ifp
->if_xname
, sdl
->sdl_data
, namelen
);
575 sdl
->sdl_nlen
= namelen
;
576 sdl
->sdl_index
= ifp
->if_index
;
577 sdl
->sdl_type
= ifp
->if_type
;
578 ifp
->if_lladdr
= ifa
;
580 ifa
->ifa_rtrequest
= link_rtrequest
;
581 ifa
->ifa_addr
= (struct sockaddr
*)sdl
;
582 sdl
= (struct sockaddr_dl
*)(socksize
+ (caddr_t
)sdl
);
583 ifa
->ifa_netmask
= (struct sockaddr
*)sdl
;
584 sdl
->sdl_len
= masklen
;
586 sdl
->sdl_data
[--namelen
] = 0xff;
587 ifa_iflink(ifa
, ifp
, 0 /* Insert head */);
589 EVENTHANDLER_INVOKE(ifnet_attach_event
, ifp
);
590 devctl_notify("IFNET", ifp
->if_xname
, "ATTACH", NULL
);
594 ifq
->altq_disc
= NULL
;
595 ifq
->altq_flags
&= ALTQF_CANTCHANGE
;
596 ifq
->altq_tbr
= NULL
;
598 ifq
->altq_started
= 0;
599 ifq
->altq_prepended
= NULL
;
601 ifq_set_classic(ifq
);
603 if (!SLIST_EMPTY(&domains
))
604 if_attachdomain1(ifp
);
606 /* Announce the interface. */
607 rt_ifannouncemsg(ifp
, IFAN_ARRIVAL
);
611 if_attachdomain(void *dummy
)
616 TAILQ_FOREACH(ifp
, &ifnet
, if_list
)
617 if_attachdomain1(ifp
);
620 SYSINIT(domainifattach
, SI_SUB_PROTO_IFATTACHDOMAIN
, SI_ORDER_FIRST
,
621 if_attachdomain
, NULL
);
624 if_attachdomain1(struct ifnet
*ifp
)
630 /* address family dependent data region */
631 bzero(ifp
->if_afdata
, sizeof(ifp
->if_afdata
));
632 SLIST_FOREACH(dp
, &domains
, dom_next
)
633 if (dp
->dom_ifattach
)
634 ifp
->if_afdata
[dp
->dom_family
] =
635 (*dp
->dom_ifattach
)(ifp
);
640 * Purge all addresses whose type is _not_ AF_LINK
643 if_purgeaddrs_nolink(struct ifnet
*ifp
)
645 struct ifaddr_container
*ifac
, *next
;
647 TAILQ_FOREACH_MUTABLE(ifac
, &ifp
->if_addrheads
[mycpuid
],
649 struct ifaddr
*ifa
= ifac
->ifa
;
651 /* Leave link ifaddr as it is */
652 if (ifa
->ifa_addr
->sa_family
== AF_LINK
)
655 /* XXX: Ugly!! ad hoc just for INET */
656 if (ifa
->ifa_addr
&& ifa
->ifa_addr
->sa_family
== AF_INET
) {
657 struct ifaliasreq ifr
;
658 #ifdef IFADDR_DEBUG_VERBOSE
661 kprintf("purge in4 addr %p: ", ifa
);
662 for (i
= 0; i
< ncpus
; ++i
)
663 kprintf("%d ", ifa
->ifa_containers
[i
].ifa_refcnt
);
667 bzero(&ifr
, sizeof ifr
);
668 ifr
.ifra_addr
= *ifa
->ifa_addr
;
669 if (ifa
->ifa_dstaddr
)
670 ifr
.ifra_broadaddr
= *ifa
->ifa_dstaddr
;
671 if (in_control(NULL
, SIOCDIFADDR
, (caddr_t
)&ifr
, ifp
,
677 if (ifa
->ifa_addr
&& ifa
->ifa_addr
->sa_family
== AF_INET6
) {
678 #ifdef IFADDR_DEBUG_VERBOSE
681 kprintf("purge in6 addr %p: ", ifa
);
682 for (i
= 0; i
< ncpus
; ++i
)
683 kprintf("%d ", ifa
->ifa_containers
[i
].ifa_refcnt
);
688 /* ifp_addrhead is already updated */
692 ifa_ifunlink(ifa
, ifp
);
698 * Detach an interface, removing it from the
699 * list of "active" interfaces.
702 if_detach(struct ifnet
*ifp
)
704 struct radix_node_head
*rnh
;
709 EVENTHANDLER_INVOKE(ifnet_detach_event
, ifp
);
712 * Remove routes and flush queues.
715 #ifdef DEVICE_POLLING
716 if (ifp
->if_flags
& IFF_POLLING
)
717 ether_poll_deregister(ifp
);
720 if (ifp
->if_flags
& IFF_NPOLLING
)
721 ifpoll_deregister(ifp
);
725 if (ifq_is_enabled(&ifp
->if_snd
))
726 altq_disable(&ifp
->if_snd
);
727 if (ifq_is_attached(&ifp
->if_snd
))
728 altq_detach(&ifp
->if_snd
);
731 * Clean up all addresses.
733 ifp
->if_lladdr
= NULL
;
735 if_purgeaddrs_nolink(ifp
);
736 if (!TAILQ_EMPTY(&ifp
->if_addrheads
[mycpuid
])) {
739 ifa
= TAILQ_FIRST(&ifp
->if_addrheads
[mycpuid
])->ifa
;
740 KASSERT(ifa
->ifa_addr
->sa_family
== AF_LINK
,
741 ("non-link ifaddr is left on if_addrheads"));
743 ifa_ifunlink(ifa
, ifp
);
745 KASSERT(TAILQ_EMPTY(&ifp
->if_addrheads
[mycpuid
]),
746 ("there are still ifaddrs left on if_addrheads"));
751 * Remove all IPv4 kernel structures related to ifp.
758 * Remove all IPv6 kernel structs related to ifp. This should be done
759 * before removing routing entries below, since IPv6 interface direct
760 * routes are expected to be removed by the IPv6-specific kernel API.
761 * Otherwise, the kernel will detect some inconsistency and bark it.
767 * Delete all remaining routes using this interface
768 * Unfortuneatly the only way to do this is to slog through
769 * the entire routing table looking for routes which point
770 * to this interface...oh well...
773 for (cpu
= 0; cpu
< ncpus2
; cpu
++) {
774 lwkt_migratecpu(cpu
);
775 for (i
= 1; i
<= AF_MAX
; i
++) {
776 if ((rnh
= rt_tables
[cpu
][i
]) == NULL
)
778 rnh
->rnh_walktree(rnh
, if_rtdel
, ifp
);
781 lwkt_migratecpu(origcpu
);
783 /* Announce that the interface is gone. */
784 rt_ifannouncemsg(ifp
, IFAN_DEPARTURE
);
785 devctl_notify("IFNET", ifp
->if_xname
, "DETACH", NULL
);
787 SLIST_FOREACH(dp
, &domains
, dom_next
)
788 if (dp
->dom_ifdetach
&& ifp
->if_afdata
[dp
->dom_family
])
789 (*dp
->dom_ifdetach
)(ifp
,
790 ifp
->if_afdata
[dp
->dom_family
]);
793 * Remove interface from ifindex2ifp[] and maybe decrement if_index.
795 ifindex2ifnet
[ifp
->if_index
] = NULL
;
796 while (if_index
> 0 && ifindex2ifnet
[if_index
] == NULL
)
799 TAILQ_REMOVE(&ifnet
, ifp
, if_link
);
800 kfree(ifp
->if_addrheads
, M_IFADDR
);
801 kfree(ifp
->if_start_nmsg
, M_LWKTMSG
);
806 * Delete Routes for a Network Interface
808 * Called for each routing entry via the rnh->rnh_walktree() call above
809 * to delete all route entries referencing a detaching network interface.
812 * rn pointer to node in the routing table
813 * arg argument passed to rnh->rnh_walktree() - detaching interface
817 * errno failed - reason indicated
821 if_rtdel(struct radix_node
*rn
, void *arg
)
823 struct rtentry
*rt
= (struct rtentry
*)rn
;
824 struct ifnet
*ifp
= arg
;
827 if (rt
->rt_ifp
== ifp
) {
830 * Protect (sorta) against walktree recursion problems
833 if (!(rt
->rt_flags
& RTF_UP
))
836 err
= rtrequest(RTM_DELETE
, rt_key(rt
), rt
->rt_gateway
,
837 rt_mask(rt
), rt
->rt_flags
,
840 log(LOG_WARNING
, "if_rtdel: error %d\n", err
);
848 * Locate an interface based on a complete address.
851 ifa_ifwithaddr(struct sockaddr
*addr
)
855 TAILQ_FOREACH(ifp
, &ifnet
, if_link
) {
856 struct ifaddr_container
*ifac
;
858 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
859 struct ifaddr
*ifa
= ifac
->ifa
;
861 if (ifa
->ifa_addr
->sa_family
!= addr
->sa_family
)
863 if (sa_equal(addr
, ifa
->ifa_addr
))
865 if ((ifp
->if_flags
& IFF_BROADCAST
) &&
866 ifa
->ifa_broadaddr
&&
867 /* IPv6 doesn't have broadcast */
868 ifa
->ifa_broadaddr
->sa_len
!= 0 &&
869 sa_equal(ifa
->ifa_broadaddr
, addr
))
876 * Locate the point to point interface with a given destination address.
879 ifa_ifwithdstaddr(struct sockaddr
*addr
)
883 TAILQ_FOREACH(ifp
, &ifnet
, if_link
) {
884 struct ifaddr_container
*ifac
;
886 if (!(ifp
->if_flags
& IFF_POINTOPOINT
))
889 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
890 struct ifaddr
*ifa
= ifac
->ifa
;
892 if (ifa
->ifa_addr
->sa_family
!= addr
->sa_family
)
894 if (ifa
->ifa_dstaddr
&&
895 sa_equal(addr
, ifa
->ifa_dstaddr
))
903 * Find an interface on a specific network. If many, choice
904 * is most specific found.
907 ifa_ifwithnet(struct sockaddr
*addr
)
910 struct ifaddr
*ifa_maybe
= NULL
;
911 u_int af
= addr
->sa_family
;
912 char *addr_data
= addr
->sa_data
, *cplim
;
915 * AF_LINK addresses can be looked up directly by their index number,
916 * so do that if we can.
919 struct sockaddr_dl
*sdl
= (struct sockaddr_dl
*)addr
;
921 if (sdl
->sdl_index
&& sdl
->sdl_index
<= if_index
)
922 return (ifindex2ifnet
[sdl
->sdl_index
]->if_lladdr
);
926 * Scan though each interface, looking for ones that have
927 * addresses in this address family.
929 TAILQ_FOREACH(ifp
, &ifnet
, if_link
) {
930 struct ifaddr_container
*ifac
;
932 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
933 struct ifaddr
*ifa
= ifac
->ifa
;
934 char *cp
, *cp2
, *cp3
;
936 if (ifa
->ifa_addr
->sa_family
!= af
)
938 if (af
== AF_INET
&& ifp
->if_flags
& IFF_POINTOPOINT
) {
940 * This is a bit broken as it doesn't
941 * take into account that the remote end may
942 * be a single node in the network we are
944 * The trouble is that we don't know the
945 * netmask for the remote end.
947 if (ifa
->ifa_dstaddr
!= NULL
&&
948 sa_equal(addr
, ifa
->ifa_dstaddr
))
952 * if we have a special address handler,
953 * then use it instead of the generic one.
955 if (ifa
->ifa_claim_addr
) {
956 if ((*ifa
->ifa_claim_addr
)(ifa
, addr
)) {
964 * Scan all the bits in the ifa's address.
965 * If a bit dissagrees with what we are
966 * looking for, mask it with the netmask
967 * to see if it really matters.
970 if (ifa
->ifa_netmask
== 0)
973 cp2
= ifa
->ifa_addr
->sa_data
;
974 cp3
= ifa
->ifa_netmask
->sa_data
;
975 cplim
= ifa
->ifa_netmask
->sa_len
+
976 (char *)ifa
->ifa_netmask
;
978 if ((*cp
++ ^ *cp2
++) & *cp3
++)
979 goto next
; /* next address! */
981 * If the netmask of what we just found
982 * is more specific than what we had before
983 * (if we had one) then remember the new one
984 * before continuing to search
985 * for an even better one.
987 if (ifa_maybe
== 0 ||
988 rn_refines((char *)ifa
->ifa_netmask
,
989 (char *)ifa_maybe
->ifa_netmask
))
998 * Find an interface address specific to an interface best matching
1002 ifaof_ifpforaddr(struct sockaddr
*addr
, struct ifnet
*ifp
)
1004 struct ifaddr_container
*ifac
;
1005 char *cp
, *cp2
, *cp3
;
1007 struct ifaddr
*ifa_maybe
= 0;
1008 u_int af
= addr
->sa_family
;
1012 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
1013 struct ifaddr
*ifa
= ifac
->ifa
;
1015 if (ifa
->ifa_addr
->sa_family
!= af
)
1019 if (ifa
->ifa_netmask
== NULL
) {
1020 if (sa_equal(addr
, ifa
->ifa_addr
) ||
1021 (ifa
->ifa_dstaddr
!= NULL
&&
1022 sa_equal(addr
, ifa
->ifa_dstaddr
)))
1026 if (ifp
->if_flags
& IFF_POINTOPOINT
) {
1027 if (sa_equal(addr
, ifa
->ifa_dstaddr
))
1031 cp2
= ifa
->ifa_addr
->sa_data
;
1032 cp3
= ifa
->ifa_netmask
->sa_data
;
1033 cplim
= ifa
->ifa_netmask
->sa_len
+ (char *)ifa
->ifa_netmask
;
1034 for (; cp3
< cplim
; cp3
++)
1035 if ((*cp
++ ^ *cp2
++) & *cp3
)
1045 * Default action when installing a route with a Link Level gateway.
1046 * Lookup an appropriate real ifa to point to.
1047 * This should be moved to /sys/net/link.c eventually.
1050 link_rtrequest(int cmd
, struct rtentry
*rt
, struct rt_addrinfo
*info
)
1053 struct sockaddr
*dst
;
1056 if (cmd
!= RTM_ADD
|| (ifa
= rt
->rt_ifa
) == NULL
||
1057 (ifp
= ifa
->ifa_ifp
) == NULL
|| (dst
= rt_key(rt
)) == NULL
)
1059 ifa
= ifaof_ifpforaddr(dst
, ifp
);
1061 IFAFREE(rt
->rt_ifa
);
1064 if (ifa
->ifa_rtrequest
&& ifa
->ifa_rtrequest
!= link_rtrequest
)
1065 ifa
->ifa_rtrequest(cmd
, rt
, info
);
1070 * Mark an interface down and notify protocols of
1072 * NOTE: must be called at splnet or eqivalent.
1075 if_unroute(struct ifnet
*ifp
, int flag
, int fam
)
1077 struct ifaddr_container
*ifac
;
1079 ifp
->if_flags
&= ~flag
;
1080 getmicrotime(&ifp
->if_lastchange
);
1081 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
1082 struct ifaddr
*ifa
= ifac
->ifa
;
1084 if (fam
== PF_UNSPEC
|| (fam
== ifa
->ifa_addr
->sa_family
))
1085 kpfctlinput(PRC_IFDOWN
, ifa
->ifa_addr
);
1087 ifq_purge(&ifp
->if_snd
);
1092 * Mark an interface up and notify protocols of
1094 * NOTE: must be called at splnet or eqivalent.
1097 if_route(struct ifnet
*ifp
, int flag
, int fam
)
1099 struct ifaddr_container
*ifac
;
1101 ifq_purge(&ifp
->if_snd
);
1102 ifp
->if_flags
|= flag
;
1103 getmicrotime(&ifp
->if_lastchange
);
1104 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
1105 struct ifaddr
*ifa
= ifac
->ifa
;
1107 if (fam
== PF_UNSPEC
|| (fam
== ifa
->ifa_addr
->sa_family
))
1108 kpfctlinput(PRC_IFUP
, ifa
->ifa_addr
);
1117 * Mark an interface down and notify protocols of the transition. An
1118 * interface going down is also considered to be a synchronizing event.
1119 * We must ensure that all packet processing related to the interface
1120 * has completed before we return so e.g. the caller can free the ifnet
1121 * structure that the mbufs may be referencing.
1123 * NOTE: must be called at splnet or eqivalent.
1126 if_down(struct ifnet
*ifp
)
1128 if_unroute(ifp
, IFF_UP
, AF_UNSPEC
);
1129 netmsg_service_sync();
1133 * Mark an interface up and notify protocols of
1135 * NOTE: must be called at splnet or eqivalent.
1138 if_up(struct ifnet
*ifp
)
1140 if_route(ifp
, IFF_UP
, AF_UNSPEC
);
1144 * Process a link state change.
1145 * NOTE: must be called at splsoftnet or equivalent.
1148 if_link_state_change(struct ifnet
*ifp
)
1150 int link_state
= ifp
->if_link_state
;
1153 devctl_notify("IFNET", ifp
->if_xname
,
1154 (link_state
== LINK_STATE_UP
) ? "LINK_UP" : "LINK_DOWN", NULL
);
1158 * Handle interface watchdog timer routines. Called
1159 * from softclock, we decrement timers (if set) and
1160 * call the appropriate interface routine on expiration.
1163 if_slowtimo(void *arg
)
1169 TAILQ_FOREACH(ifp
, &ifnet
, if_link
) {
1170 if (ifp
->if_timer
== 0 || --ifp
->if_timer
)
1172 if (ifp
->if_watchdog
) {
1173 if (ifnet_tryserialize_all(ifp
)) {
1174 (*ifp
->if_watchdog
)(ifp
);
1175 ifnet_deserialize_all(ifp
);
1177 /* try again next timeout */
1185 callout_reset(&if_slowtimo_timer
, hz
/ IFNET_SLOWHZ
, if_slowtimo
, NULL
);
1189 * Map interface name to
1190 * interface structure pointer.
1193 ifunit(const char *name
)
1198 * Search all the interfaces for this name/number
1201 TAILQ_FOREACH(ifp
, &ifnet
, if_link
) {
1202 if (strncmp(ifp
->if_xname
, name
, IFNAMSIZ
) == 0)
1210 * Map interface name in a sockaddr_dl to
1211 * interface structure pointer.
1214 if_withname(struct sockaddr
*sa
)
1216 char ifname
[IFNAMSIZ
+1];
1217 struct sockaddr_dl
*sdl
= (struct sockaddr_dl
*)sa
;
1219 if ( (sa
->sa_family
!= AF_LINK
) || (sdl
->sdl_nlen
== 0) ||
1220 (sdl
->sdl_nlen
> IFNAMSIZ
) )
1224 * ifunit wants a null-terminated name. It may not be null-terminated
1225 * in the sockaddr. We don't want to change the caller's sockaddr,
1226 * and there might not be room to put the trailing null anyway, so we
1227 * make a local copy that we know we can null terminate safely.
1230 bcopy(sdl
->sdl_data
, ifname
, sdl
->sdl_nlen
);
1231 ifname
[sdl
->sdl_nlen
] = '\0';
1232 return ifunit(ifname
);
1240 ifioctl(struct socket
*so
, u_long cmd
, caddr_t data
, struct ucred
*cred
)
1248 size_t namelen
, onamelen
;
1249 char new_name
[IFNAMSIZ
];
1251 struct sockaddr_dl
*sdl
;
1257 return (ifconf(cmd
, data
, cred
));
1259 ifr
= (struct ifreq
*)data
;
1264 if ((error
= priv_check_cred(cred
, PRIV_ROOT
, 0)) != 0)
1266 return ((cmd
== SIOCIFCREATE
) ?
1267 if_clone_create(ifr
->ifr_name
, sizeof(ifr
->ifr_name
)) :
1268 if_clone_destroy(ifr
->ifr_name
));
1270 case SIOCIFGCLONERS
:
1271 return (if_clone_list((struct if_clonereq
*)data
));
1274 ifp
= ifunit(ifr
->ifr_name
);
1280 ifr
->ifr_index
= ifp
->if_index
;
1284 ifr
->ifr_flags
= ifp
->if_flags
;
1285 ifr
->ifr_flagshigh
= ifp
->if_flags
>> 16;
1289 ifr
->ifr_reqcap
= ifp
->if_capabilities
;
1290 ifr
->ifr_curcap
= ifp
->if_capenable
;
1294 ifr
->ifr_metric
= ifp
->if_metric
;
1298 ifr
->ifr_mtu
= ifp
->if_mtu
;
1302 ifr
->ifr_phys
= ifp
->if_physical
;
1305 case SIOCGIFPOLLCPU
:
1306 #ifdef DEVICE_POLLING
1307 ifr
->ifr_pollcpu
= ifp
->if_poll_cpuid
;
1309 ifr
->ifr_pollcpu
= -1;
1313 case SIOCSIFPOLLCPU
:
1314 #ifdef DEVICE_POLLING
1315 if ((ifp
->if_flags
& IFF_POLLING
) == 0)
1316 ether_pollcpu_register(ifp
, ifr
->ifr_pollcpu
);
1321 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1324 new_flags
= (ifr
->ifr_flags
& 0xffff) |
1325 (ifr
->ifr_flagshigh
<< 16);
1326 if (ifp
->if_flags
& IFF_SMART
) {
1327 /* Smart drivers twiddle their own routes */
1328 } else if (ifp
->if_flags
& IFF_UP
&&
1329 (new_flags
& IFF_UP
) == 0) {
1333 } else if (new_flags
& IFF_UP
&&
1334 (ifp
->if_flags
& IFF_UP
) == 0) {
1340 #ifdef DEVICE_POLLING
1341 if ((new_flags
^ ifp
->if_flags
) & IFF_POLLING
) {
1342 if (new_flags
& IFF_POLLING
) {
1343 ether_poll_register(ifp
);
1345 ether_poll_deregister(ifp
);
1349 #ifdef IFPOLL_ENABLE
1350 if ((new_flags
^ ifp
->if_flags
) & IFF_NPOLLING
) {
1351 if (new_flags
& IFF_NPOLLING
)
1352 ifpoll_register(ifp
);
1354 ifpoll_deregister(ifp
);
1358 ifp
->if_flags
= (ifp
->if_flags
& IFF_CANTCHANGE
) |
1359 (new_flags
&~ IFF_CANTCHANGE
);
1360 if (new_flags
& IFF_PPROMISC
) {
1361 /* Permanently promiscuous mode requested */
1362 ifp
->if_flags
|= IFF_PROMISC
;
1363 } else if (ifp
->if_pcount
== 0) {
1364 ifp
->if_flags
&= ~IFF_PROMISC
;
1366 if (ifp
->if_ioctl
) {
1367 ifnet_serialize_all(ifp
);
1368 ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
1369 ifnet_deserialize_all(ifp
);
1371 getmicrotime(&ifp
->if_lastchange
);
1375 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1378 if (ifr
->ifr_reqcap
& ~ifp
->if_capabilities
)
1380 ifnet_serialize_all(ifp
);
1381 ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
1382 ifnet_deserialize_all(ifp
);
1386 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1389 error
= copyinstr(ifr
->ifr_data
, new_name
, IFNAMSIZ
, NULL
);
1392 if (new_name
[0] == '\0')
1394 if (ifunit(new_name
) != NULL
)
1397 EVENTHANDLER_INVOKE(ifnet_detach_event
, ifp
);
1399 /* Announce the departure of the interface. */
1400 rt_ifannouncemsg(ifp
, IFAN_DEPARTURE
);
1402 strlcpy(ifp
->if_xname
, new_name
, sizeof(ifp
->if_xname
));
1403 ifa
= TAILQ_FIRST(&ifp
->if_addrheads
[mycpuid
])->ifa
;
1404 /* XXX IFA_LOCK(ifa); */
1405 sdl
= (struct sockaddr_dl
*)ifa
->ifa_addr
;
1406 namelen
= strlen(new_name
);
1407 onamelen
= sdl
->sdl_nlen
;
1409 * Move the address if needed. This is safe because we
1410 * allocate space for a name of length IFNAMSIZ when we
1411 * create this in if_attach().
1413 if (namelen
!= onamelen
) {
1414 bcopy(sdl
->sdl_data
+ onamelen
,
1415 sdl
->sdl_data
+ namelen
, sdl
->sdl_alen
);
1417 bcopy(new_name
, sdl
->sdl_data
, namelen
);
1418 sdl
->sdl_nlen
= namelen
;
1419 sdl
= (struct sockaddr_dl
*)ifa
->ifa_netmask
;
1420 bzero(sdl
->sdl_data
, onamelen
);
1421 while (namelen
!= 0)
1422 sdl
->sdl_data
[--namelen
] = 0xff;
1423 /* XXX IFA_UNLOCK(ifa) */
1425 EVENTHANDLER_INVOKE(ifnet_attach_event
, ifp
);
1427 /* Announce the return of the interface. */
1428 rt_ifannouncemsg(ifp
, IFAN_ARRIVAL
);
1432 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1435 ifp
->if_metric
= ifr
->ifr_metric
;
1436 getmicrotime(&ifp
->if_lastchange
);
1440 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1445 ifnet_serialize_all(ifp
);
1446 error
= ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
1447 ifnet_deserialize_all(ifp
);
1449 getmicrotime(&ifp
->if_lastchange
);
1454 u_long oldmtu
= ifp
->if_mtu
;
1456 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1459 if (ifp
->if_ioctl
== NULL
)
1460 return (EOPNOTSUPP
);
1461 if (ifr
->ifr_mtu
< IF_MINMTU
|| ifr
->ifr_mtu
> IF_MAXMTU
)
1463 ifnet_serialize_all(ifp
);
1464 error
= ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
1465 ifnet_deserialize_all(ifp
);
1467 getmicrotime(&ifp
->if_lastchange
);
1471 * If the link MTU changed, do network layer specific procedure.
1473 if (ifp
->if_mtu
!= oldmtu
) {
1483 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1487 /* Don't allow group membership on non-multicast interfaces. */
1488 if ((ifp
->if_flags
& IFF_MULTICAST
) == 0)
1491 /* Don't let users screw up protocols' entries. */
1492 if (ifr
->ifr_addr
.sa_family
!= AF_LINK
)
1495 if (cmd
== SIOCADDMULTI
) {
1496 struct ifmultiaddr
*ifma
;
1497 error
= if_addmulti(ifp
, &ifr
->ifr_addr
, &ifma
);
1499 error
= if_delmulti(ifp
, &ifr
->ifr_addr
);
1502 getmicrotime(&ifp
->if_lastchange
);
1505 case SIOCSIFPHYADDR
:
1506 case SIOCDIFPHYADDR
:
1508 case SIOCSIFPHYADDR_IN6
:
1510 case SIOCSLIFPHYADDR
:
1512 case SIOCSIFGENERIC
:
1513 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1516 if (ifp
->if_ioctl
== 0)
1517 return (EOPNOTSUPP
);
1518 ifnet_serialize_all(ifp
);
1519 error
= ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
1520 ifnet_deserialize_all(ifp
);
1522 getmicrotime(&ifp
->if_lastchange
);
1526 ifs
= (struct ifstat
*)data
;
1527 ifs
->ascii
[0] = '\0';
1529 case SIOCGIFPSRCADDR
:
1530 case SIOCGIFPDSTADDR
:
1531 case SIOCGLIFPHYADDR
:
1533 case SIOCGIFGENERIC
:
1534 if (ifp
->if_ioctl
== NULL
)
1535 return (EOPNOTSUPP
);
1536 ifnet_serialize_all(ifp
);
1537 error
= ifp
->if_ioctl(ifp
, cmd
, data
, cred
);
1538 ifnet_deserialize_all(ifp
);
1542 error
= priv_check_cred(cred
, PRIV_ROOT
, 0);
1545 return if_setlladdr(ifp
,
1546 ifr
->ifr_addr
.sa_data
, ifr
->ifr_addr
.sa_len
);
1549 oif_flags
= ifp
->if_flags
;
1550 if (so
->so_proto
== 0)
1551 return (EOPNOTSUPP
);
1553 error
= so_pru_control(so
, cmd
, data
, ifp
);
1560 case SIOCSIFDSTADDR
:
1562 case SIOCSIFBRDADDR
:
1563 case SIOCSIFNETMASK
:
1564 #if BYTE_ORDER != BIG_ENDIAN
1565 if (ifr
->ifr_addr
.sa_family
== 0 &&
1566 ifr
->ifr_addr
.sa_len
< 16) {
1567 ifr
->ifr_addr
.sa_family
= ifr
->ifr_addr
.sa_len
;
1568 ifr
->ifr_addr
.sa_len
= 16;
1571 if (ifr
->ifr_addr
.sa_len
== 0)
1572 ifr
->ifr_addr
.sa_len
= 16;
1580 case OSIOCGIFDSTADDR
:
1581 cmd
= SIOCGIFDSTADDR
;
1584 case OSIOCGIFBRDADDR
:
1585 cmd
= SIOCGIFBRDADDR
;
1588 case OSIOCGIFNETMASK
:
1589 cmd
= SIOCGIFNETMASK
;
1591 error
= so_pru_control(so
, cmd
, data
, ifp
);
1595 case OSIOCGIFDSTADDR
:
1596 case OSIOCGIFBRDADDR
:
1597 case OSIOCGIFNETMASK
:
1598 *(u_short
*)&ifr
->ifr_addr
= ifr
->ifr_addr
.sa_family
;
1602 #endif /* COMPAT_43 */
1604 if ((oif_flags
^ ifp
->if_flags
) & IFF_UP
) {
1606 DELAY(100);/* XXX: temporary workaround for fxp issue*/
1607 if (ifp
->if_flags
& IFF_UP
) {
1621 * Set/clear promiscuous mode on interface ifp based on the truth value
1622 * of pswitch. The calls are reference counted so that only the first
1623 * "on" request actually has an effect, as does the final "off" request.
1624 * Results are undefined if the "off" and "on" requests are not matched.
1627 ifpromisc(struct ifnet
*ifp
, int pswitch
)
1633 oldflags
= ifp
->if_flags
;
1634 if (ifp
->if_flags
& IFF_PPROMISC
) {
1635 /* Do nothing if device is in permanently promiscuous mode */
1636 ifp
->if_pcount
+= pswitch
? 1 : -1;
1641 * If the device is not configured up, we cannot put it in
1644 if ((ifp
->if_flags
& IFF_UP
) == 0)
1646 if (ifp
->if_pcount
++ != 0)
1648 ifp
->if_flags
|= IFF_PROMISC
;
1649 log(LOG_INFO
, "%s: promiscuous mode enabled\n",
1652 if (--ifp
->if_pcount
> 0)
1654 ifp
->if_flags
&= ~IFF_PROMISC
;
1655 log(LOG_INFO
, "%s: promiscuous mode disabled\n",
1658 ifr
.ifr_flags
= ifp
->if_flags
;
1659 ifr
.ifr_flagshigh
= ifp
->if_flags
>> 16;
1660 ifnet_serialize_all(ifp
);
1661 error
= ifp
->if_ioctl(ifp
, SIOCSIFFLAGS
, (caddr_t
)&ifr
, NULL
);
1662 ifnet_deserialize_all(ifp
);
1666 ifp
->if_flags
= oldflags
;
1671 * Return interface configuration
1672 * of system. List may be used
1673 * in later ioctl's (above) to get
1674 * other information.
1677 ifconf(u_long cmd
, caddr_t data
, struct ucred
*cred
)
1679 struct ifconf
*ifc
= (struct ifconf
*)data
;
1681 struct sockaddr
*sa
;
1682 struct ifreq ifr
, *ifrp
;
1683 int space
= ifc
->ifc_len
, error
= 0;
1685 ifrp
= ifc
->ifc_req
;
1686 TAILQ_FOREACH(ifp
, &ifnet
, if_link
) {
1687 struct ifaddr_container
*ifac
;
1690 if (space
<= sizeof ifr
)
1694 * Zero the stack declared structure first to prevent
1695 * memory disclosure.
1697 bzero(&ifr
, sizeof(ifr
));
1698 if (strlcpy(ifr
.ifr_name
, ifp
->if_xname
, sizeof(ifr
.ifr_name
))
1699 >= sizeof(ifr
.ifr_name
)) {
1700 error
= ENAMETOOLONG
;
1705 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
1706 struct ifaddr
*ifa
= ifac
->ifa
;
1708 if (space
<= sizeof ifr
)
1711 if (cred
->cr_prison
&&
1712 prison_if(cred
, sa
))
1716 if (cmd
== OSIOCGIFCONF
) {
1717 struct osockaddr
*osa
=
1718 (struct osockaddr
*)&ifr
.ifr_addr
;
1720 osa
->sa_family
= sa
->sa_family
;
1721 error
= copyout(&ifr
, ifrp
, sizeof ifr
);
1725 if (sa
->sa_len
<= sizeof(*sa
)) {
1727 error
= copyout(&ifr
, ifrp
, sizeof ifr
);
1730 if (space
< (sizeof ifr
) + sa
->sa_len
-
1733 space
-= sa
->sa_len
- sizeof(*sa
);
1734 error
= copyout(&ifr
, ifrp
,
1735 sizeof ifr
.ifr_name
);
1737 error
= copyout(sa
, &ifrp
->ifr_addr
,
1739 ifrp
= (struct ifreq
*)
1740 (sa
->sa_len
+ (caddr_t
)&ifrp
->ifr_addr
);
1744 space
-= sizeof ifr
;
1749 bzero(&ifr
.ifr_addr
, sizeof ifr
.ifr_addr
);
1750 error
= copyout(&ifr
, ifrp
, sizeof ifr
);
1753 space
-= sizeof ifr
;
1757 ifc
->ifc_len
-= space
;
1762 * Just like if_promisc(), but for all-multicast-reception mode.
1765 if_allmulti(struct ifnet
*ifp
, int onswitch
)
1773 if (ifp
->if_amcount
++ == 0) {
1774 ifp
->if_flags
|= IFF_ALLMULTI
;
1775 ifr
.ifr_flags
= ifp
->if_flags
;
1776 ifr
.ifr_flagshigh
= ifp
->if_flags
>> 16;
1777 ifnet_serialize_all(ifp
);
1778 error
= ifp
->if_ioctl(ifp
, SIOCSIFFLAGS
, (caddr_t
)&ifr
,
1780 ifnet_deserialize_all(ifp
);
1783 if (ifp
->if_amcount
> 1) {
1786 ifp
->if_amcount
= 0;
1787 ifp
->if_flags
&= ~IFF_ALLMULTI
;
1788 ifr
.ifr_flags
= ifp
->if_flags
;
1789 ifr
.ifr_flagshigh
= ifp
->if_flags
>> 16;
1790 ifnet_serialize_all(ifp
);
1791 error
= ifp
->if_ioctl(ifp
, SIOCSIFFLAGS
, (caddr_t
)&ifr
,
1793 ifnet_deserialize_all(ifp
);
1805 * Add a multicast listenership to the interface in question.
1806 * The link layer provides a routine which converts
1810 struct ifnet
*ifp
, /* interface to manipulate */
1811 struct sockaddr
*sa
, /* address to add */
1812 struct ifmultiaddr
**retifma
)
1814 struct sockaddr
*llsa
, *dupsa
;
1816 struct ifmultiaddr
*ifma
;
1819 * If the matching multicast address already exists
1820 * then don't add a new one, just add a reference
1822 LIST_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
1823 if (sa_equal(sa
, ifma
->ifma_addr
)) {
1824 ifma
->ifma_refcount
++;
1832 * Give the link layer a chance to accept/reject it, and also
1833 * find out which AF_LINK address this maps to, if it isn't one
1836 if (ifp
->if_resolvemulti
) {
1837 ifnet_serialize_all(ifp
);
1838 error
= ifp
->if_resolvemulti(ifp
, &llsa
, sa
);
1839 ifnet_deserialize_all(ifp
);
1846 MALLOC(ifma
, struct ifmultiaddr
*, sizeof *ifma
, M_IFMADDR
, M_WAITOK
);
1847 MALLOC(dupsa
, struct sockaddr
*, sa
->sa_len
, M_IFMADDR
, M_WAITOK
);
1848 bcopy(sa
, dupsa
, sa
->sa_len
);
1850 ifma
->ifma_addr
= dupsa
;
1851 ifma
->ifma_lladdr
= llsa
;
1852 ifma
->ifma_ifp
= ifp
;
1853 ifma
->ifma_refcount
= 1;
1854 ifma
->ifma_protospec
= 0;
1855 rt_newmaddrmsg(RTM_NEWMADDR
, ifma
);
1858 * Some network interfaces can scan the address list at
1859 * interrupt time; lock them out.
1862 LIST_INSERT_HEAD(&ifp
->if_multiaddrs
, ifma
, ifma_link
);
1867 LIST_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
1868 if (sa_equal(ifma
->ifma_addr
, llsa
))
1872 ifma
->ifma_refcount
++;
1874 MALLOC(ifma
, struct ifmultiaddr
*, sizeof *ifma
,
1875 M_IFMADDR
, M_WAITOK
);
1876 MALLOC(dupsa
, struct sockaddr
*, llsa
->sa_len
,
1877 M_IFMADDR
, M_WAITOK
);
1878 bcopy(llsa
, dupsa
, llsa
->sa_len
);
1879 ifma
->ifma_addr
= dupsa
;
1880 ifma
->ifma_ifp
= ifp
;
1881 ifma
->ifma_refcount
= 1;
1883 LIST_INSERT_HEAD(&ifp
->if_multiaddrs
, ifma
, ifma_link
);
1888 * We are certain we have added something, so call down to the
1889 * interface to let them know about it.
1892 ifnet_serialize_all(ifp
);
1893 ifp
->if_ioctl(ifp
, SIOCADDMULTI
, 0, NULL
);
1894 ifnet_deserialize_all(ifp
);
1901 * Remove a reference to a multicast address on this interface. Yell
1902 * if the request does not match an existing membership.
1905 if_delmulti(struct ifnet
*ifp
, struct sockaddr
*sa
)
1907 struct ifmultiaddr
*ifma
;
1909 LIST_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
)
1910 if (sa_equal(sa
, ifma
->ifma_addr
))
1915 if (ifma
->ifma_refcount
> 1) {
1916 ifma
->ifma_refcount
--;
1920 rt_newmaddrmsg(RTM_DELMADDR
, ifma
);
1921 sa
= ifma
->ifma_lladdr
;
1923 LIST_REMOVE(ifma
, ifma_link
);
1925 * Make sure the interface driver is notified
1926 * in the case of a link layer mcast group being left.
1928 if (ifma
->ifma_addr
->sa_family
== AF_LINK
&& sa
== 0) {
1929 ifnet_serialize_all(ifp
);
1930 ifp
->if_ioctl(ifp
, SIOCDELMULTI
, 0, NULL
);
1931 ifnet_deserialize_all(ifp
);
1934 kfree(ifma
->ifma_addr
, M_IFMADDR
);
1935 kfree(ifma
, M_IFMADDR
);
1940 * Now look for the link-layer address which corresponds to
1941 * this network address. It had been squirreled away in
1942 * ifma->ifma_lladdr for this purpose (so we don't have
1943 * to call ifp->if_resolvemulti() again), and we saved that
1944 * value in sa above. If some nasty deleted the
1945 * link-layer address out from underneath us, we can deal because
1946 * the address we stored was is not the same as the one which was
1947 * in the record for the link-layer address. (So we don't complain
1950 LIST_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
)
1951 if (sa_equal(sa
, ifma
->ifma_addr
))
1956 if (ifma
->ifma_refcount
> 1) {
1957 ifma
->ifma_refcount
--;
1962 ifnet_serialize_all(ifp
);
1963 LIST_REMOVE(ifma
, ifma_link
);
1964 ifp
->if_ioctl(ifp
, SIOCDELMULTI
, 0, NULL
);
1965 ifnet_deserialize_all(ifp
);
1967 kfree(ifma
->ifma_addr
, M_IFMADDR
);
1968 kfree(sa
, M_IFMADDR
);
1969 kfree(ifma
, M_IFMADDR
);
1975 * Set the link layer address on an interface.
1977 * At this time we only support certain types of interfaces,
1978 * and we don't allow the length of the address to change.
1981 if_setlladdr(struct ifnet
*ifp
, const u_char
*lladdr
, int len
)
1983 struct sockaddr_dl
*sdl
;
1986 sdl
= IF_LLSOCKADDR(ifp
);
1989 if (len
!= sdl
->sdl_alen
) /* don't allow length to change */
1991 switch (ifp
->if_type
) {
1992 case IFT_ETHER
: /* these types use struct arpcom */
1995 bcopy(lladdr
, ((struct arpcom
*)ifp
->if_softc
)->ac_enaddr
, len
);
1996 bcopy(lladdr
, LLADDR(sdl
), len
);
2002 * If the interface is already up, we need
2003 * to re-init it in order to reprogram its
2006 ifnet_serialize_all(ifp
);
2007 if ((ifp
->if_flags
& IFF_UP
) != 0) {
2008 struct ifaddr_container
*ifac
;
2010 ifp
->if_flags
&= ~IFF_UP
;
2011 ifr
.ifr_flags
= ifp
->if_flags
;
2012 ifr
.ifr_flagshigh
= ifp
->if_flags
>> 16;
2013 ifp
->if_ioctl(ifp
, SIOCSIFFLAGS
, (caddr_t
)&ifr
,
2015 ifp
->if_flags
|= IFF_UP
;
2016 ifr
.ifr_flags
= ifp
->if_flags
;
2017 ifr
.ifr_flagshigh
= ifp
->if_flags
>> 16;
2018 ifp
->if_ioctl(ifp
, SIOCSIFFLAGS
, (caddr_t
)&ifr
,
2022 * Also send gratuitous ARPs to notify other nodes about
2023 * the address change.
2025 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
2026 struct ifaddr
*ifa
= ifac
->ifa
;
2028 if (ifa
->ifa_addr
!= NULL
&&
2029 ifa
->ifa_addr
->sa_family
== AF_INET
)
2030 arp_ifinit(ifp
, ifa
);
2034 ifnet_deserialize_all(ifp
);
2038 struct ifmultiaddr
*
2039 ifmaof_ifpforaddr(struct sockaddr
*sa
, struct ifnet
*ifp
)
2041 struct ifmultiaddr
*ifma
;
2043 LIST_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
)
2044 if (sa_equal(ifma
->ifma_addr
, sa
))
2051 * This function locates the first real ethernet MAC from a network
2052 * card and loads it into node, returning 0 on success or ENOENT if
2053 * no suitable interfaces were found. It is used by the uuid code to
2054 * generate a unique 6-byte number.
2057 if_getanyethermac(uint16_t *node
, int minlen
)
2060 struct sockaddr_dl
*sdl
;
2062 TAILQ_FOREACH(ifp
, &ifnet
, if_link
) {
2063 if (ifp
->if_type
!= IFT_ETHER
)
2065 sdl
= IF_LLSOCKADDR(ifp
);
2066 if (sdl
->sdl_alen
< minlen
)
2068 bcopy(((struct arpcom
*)ifp
->if_softc
)->ac_enaddr
, node
,
2076 * The name argument must be a pointer to storage which will last as
2077 * long as the interface does. For physical devices, the result of
2078 * device_get_name(dev) is a good choice and for pseudo-devices a
2079 * static string works well.
2082 if_initname(struct ifnet
*ifp
, const char *name
, int unit
)
2084 ifp
->if_dname
= name
;
2085 ifp
->if_dunit
= unit
;
2086 if (unit
!= IF_DUNIT_NONE
)
2087 ksnprintf(ifp
->if_xname
, IFNAMSIZ
, "%s%d", name
, unit
);
2089 strlcpy(ifp
->if_xname
, name
, IFNAMSIZ
);
2093 if_printf(struct ifnet
*ifp
, const char *fmt
, ...)
2098 retval
= kprintf("%s: ", ifp
->if_xname
);
2099 __va_start(ap
, fmt
);
2100 retval
+= kvprintf(fmt
, ap
);
2106 ifq_set_classic(struct ifaltq
*ifq
)
2108 ifq
->altq_enqueue
= ifq_classic_enqueue
;
2109 ifq
->altq_dequeue
= ifq_classic_dequeue
;
2110 ifq
->altq_request
= ifq_classic_request
;
2114 ifq_classic_enqueue(struct ifaltq
*ifq
, struct mbuf
*m
,
2115 struct altq_pktattr
*pa __unused
)
2117 logifq(enqueue
, ifq
);
2118 if (IF_QFULL(ifq
)) {
2128 ifq_classic_dequeue(struct ifaltq
*ifq
, struct mbuf
*mpolled
, int op
)
2137 logifq(dequeue
, ifq
);
2141 panic("unsupported ALTQ dequeue op: %d", op
);
2143 KKASSERT(mpolled
== NULL
|| mpolled
== m
);
2148 ifq_classic_request(struct ifaltq
*ifq
, int req
, void *arg
)
2155 panic("unsupported ALTQ request: %d", req
);
2161 ifq_dispatch(struct ifnet
*ifp
, struct mbuf
*m
, struct altq_pktattr
*pa
)
2163 struct ifaltq
*ifq
= &ifp
->if_snd
;
2164 int running
= 0, error
, start
= 0;
2166 ASSERT_IFNET_NOT_SERIALIZED_TX(ifp
);
2169 error
= ifq_enqueue_locked(ifq
, m
, pa
);
2174 if (!ifq
->altq_started
) {
2176 * Hold the interlock of ifnet.if_start
2178 ifq
->altq_started
= 1;
2183 ifp
->if_obytes
+= m
->m_pkthdr
.len
;
2184 if (m
->m_flags
& M_MCAST
)
2188 logifstart(avoid
, ifp
);
2192 if (ifq_dispatch_schedonly
) {
2194 * Always schedule ifnet.if_start on ifnet's CPU,
2195 * short circuit the rest of this function.
2197 logifstart(sched
, ifp
);
2198 if_start_schedule(ifp
);
2203 * Try to do direct ifnet.if_start first, if there is
2204 * contention on ifnet's serializer, ifnet.if_start will
2205 * be scheduled on ifnet's CPU.
2207 if (!ifnet_tryserialize_tx(ifp
)) {
2209 * ifnet serializer contention happened,
2210 * ifnet.if_start is scheduled on ifnet's
2211 * CPU, and we keep going.
2213 logifstart(contend_sched
, ifp
);
2214 if_start_schedule(ifp
);
2218 if ((ifp
->if_flags
& IFF_OACTIVE
) == 0) {
2219 logifstart(run
, ifp
);
2221 if ((ifp
->if_flags
&
2222 (IFF_OACTIVE
| IFF_RUNNING
)) == IFF_RUNNING
)
2226 ifnet_deserialize_tx(ifp
);
2228 if (ifq_dispatch_schednochk
|| if_start_need_schedule(ifq
, running
)) {
2230 * More data need to be transmitted, ifnet.if_start is
2231 * scheduled on ifnet's CPU, and we keep going.
2232 * NOTE: ifnet.if_start interlock is not released.
2234 logifstart(sched
, ifp
);
2235 if_start_schedule(ifp
);
2241 ifa_create(int size
, int flags
)
2246 KASSERT(size
>= sizeof(*ifa
), ("ifaddr size too small\n"));
2248 ifa
= kmalloc(size
, M_IFADDR
, flags
| M_ZERO
);
2252 ifa
->ifa_containers
= kmalloc(ncpus
* sizeof(struct ifaddr_container
),
2253 M_IFADDR
, M_WAITOK
| M_ZERO
);
2254 ifa
->ifa_ncnt
= ncpus
;
2255 for (i
= 0; i
< ncpus
; ++i
) {
2256 struct ifaddr_container
*ifac
= &ifa
->ifa_containers
[i
];
2258 ifac
->ifa_magic
= IFA_CONTAINER_MAGIC
;
2260 ifac
->ifa_refcnt
= 1;
2263 kprintf("alloc ifa %p %d\n", ifa
, size
);
2269 ifac_free(struct ifaddr_container
*ifac
, int cpu_id
)
2271 struct ifaddr
*ifa
= ifac
->ifa
;
2273 KKASSERT(ifac
->ifa_magic
== IFA_CONTAINER_MAGIC
);
2274 KKASSERT(ifac
->ifa_refcnt
== 0);
2275 KASSERT(ifac
->ifa_listmask
== 0,
2276 ("ifa is still on %#x lists\n", ifac
->ifa_listmask
));
2278 ifac
->ifa_magic
= IFA_CONTAINER_DEAD
;
2280 #ifdef IFADDR_DEBUG_VERBOSE
2281 kprintf("try free ifa %p cpu_id %d\n", ifac
->ifa
, cpu_id
);
2284 KASSERT(ifa
->ifa_ncnt
> 0 && ifa
->ifa_ncnt
<= ncpus
,
2285 ("invalid # of ifac, %d\n", ifa
->ifa_ncnt
));
2286 if (atomic_fetchadd_int(&ifa
->ifa_ncnt
, -1) == 1) {
2288 kprintf("free ifa %p\n", ifa
);
2290 kfree(ifa
->ifa_containers
, M_IFADDR
);
2291 kfree(ifa
, M_IFADDR
);
2296 ifa_iflink_dispatch(struct netmsg
*nmsg
)
2298 struct netmsg_ifaddr
*msg
= (struct netmsg_ifaddr
*)nmsg
;
2299 struct ifaddr
*ifa
= msg
->ifa
;
2300 struct ifnet
*ifp
= msg
->ifp
;
2302 struct ifaddr_container
*ifac
;
2306 ifac
= &ifa
->ifa_containers
[cpu
];
2307 ASSERT_IFAC_VALID(ifac
);
2308 KASSERT((ifac
->ifa_listmask
& IFA_LIST_IFADDRHEAD
) == 0,
2309 ("ifaddr is on if_addrheads\n"));
2311 ifac
->ifa_listmask
|= IFA_LIST_IFADDRHEAD
;
2313 TAILQ_INSERT_TAIL(&ifp
->if_addrheads
[cpu
], ifac
, ifa_link
);
2315 TAILQ_INSERT_HEAD(&ifp
->if_addrheads
[cpu
], ifac
, ifa_link
);
2319 ifa_forwardmsg(&nmsg
->nm_lmsg
, cpu
+ 1);
2323 ifa_iflink(struct ifaddr
*ifa
, struct ifnet
*ifp
, int tail
)
2325 struct netmsg_ifaddr msg
;
2327 netmsg_init(&msg
.netmsg
, &curthread
->td_msgport
, 0,
2328 ifa_iflink_dispatch
);
2333 ifa_domsg(&msg
.netmsg
.nm_lmsg
, 0);
2337 ifa_ifunlink_dispatch(struct netmsg
*nmsg
)
2339 struct netmsg_ifaddr
*msg
= (struct netmsg_ifaddr
*)nmsg
;
2340 struct ifaddr
*ifa
= msg
->ifa
;
2341 struct ifnet
*ifp
= msg
->ifp
;
2343 struct ifaddr_container
*ifac
;
2347 ifac
= &ifa
->ifa_containers
[cpu
];
2348 ASSERT_IFAC_VALID(ifac
);
2349 KASSERT(ifac
->ifa_listmask
& IFA_LIST_IFADDRHEAD
,
2350 ("ifaddr is not on if_addrhead\n"));
2352 TAILQ_REMOVE(&ifp
->if_addrheads
[cpu
], ifac
, ifa_link
);
2353 ifac
->ifa_listmask
&= ~IFA_LIST_IFADDRHEAD
;
2357 ifa_forwardmsg(&nmsg
->nm_lmsg
, cpu
+ 1);
2361 ifa_ifunlink(struct ifaddr
*ifa
, struct ifnet
*ifp
)
2363 struct netmsg_ifaddr msg
;
2365 netmsg_init(&msg
.netmsg
, &curthread
->td_msgport
, 0,
2366 ifa_ifunlink_dispatch
);
2370 ifa_domsg(&msg
.netmsg
.nm_lmsg
, 0);
2374 ifa_destroy_dispatch(struct netmsg
*nmsg
)
2376 struct netmsg_ifaddr
*msg
= (struct netmsg_ifaddr
*)nmsg
;
2379 ifa_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
2383 ifa_destroy(struct ifaddr
*ifa
)
2385 struct netmsg_ifaddr msg
;
2387 netmsg_init(&msg
.netmsg
, &curthread
->td_msgport
, 0,
2388 ifa_destroy_dispatch
);
2391 ifa_domsg(&msg
.netmsg
.nm_lmsg
, 0);
2395 ifnet_portfn(int cpu
)
2397 return &ifnet_threads
[cpu
].td_msgport
;
2401 ifnet_forwardmsg(struct lwkt_msg
*lmsg
, int next_cpu
)
2403 KKASSERT(next_cpu
> mycpuid
&& next_cpu
<= ncpus
);
2405 if (next_cpu
< ncpus
)
2406 lwkt_forwardmsg(ifnet_portfn(next_cpu
), lmsg
);
2408 lwkt_replymsg(lmsg
, 0);
2412 ifnet_domsg(struct lwkt_msg
*lmsg
, int cpu
)
2414 KKASSERT(cpu
< ncpus
);
2415 return lwkt_domsg(ifnet_portfn(cpu
), lmsg
, 0);
2419 ifnet_sendmsg(struct lwkt_msg
*lmsg
, int cpu
)
2421 KKASSERT(cpu
< ncpus
);
2422 lwkt_sendmsg(ifnet_portfn(cpu
), lmsg
);
2426 ifnetinit(void *dummy __unused
)
2430 for (i
= 0; i
< ncpus
; ++i
) {
2431 struct thread
*thr
= &ifnet_threads
[i
];
2433 lwkt_create(netmsg_service_loop
, &ifnet_mpsafe_thread
, NULL
,
2434 thr
, TDF_NETWORK
| TDF_MPSAFE
, i
, "ifnet %d", i
);
2435 netmsg_service_port_init(&thr
->td_msgport
);