4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/param.h>
27 #include <sys/atomic.h>
29 #include <sys/rwlock.h>
30 #include <sys/errno.h>
31 #include <sys/queue.h>
32 #include <sys/sunddi.h>
33 #include <inet/common.h>
41 typedef boolean_t
napplyfn_t(neti_stack_t
*, void *);
43 static void *neti_stack_init(netstackid_t stackid
, netstack_t
*ns
);
44 static void neti_stack_fini(netstackid_t stackid
, void *arg
);
45 static net_instance_int_t
*net_instance_int_create(net_instance_t
*nin
,
46 net_instance_int_t
*parent
);
47 static void neti_stack_shutdown(netstackid_t stackid
, void *arg
);
48 static void net_instance_int_free(net_instance_int_t
*nini
);
50 static boolean_t
neti_stack_apply_create(neti_stack_t
*, void *);
51 static boolean_t
neti_stack_apply_destroy(neti_stack_t
*, void *);
52 static boolean_t
neti_stack_apply_shutdown(neti_stack_t
*, void *);
53 static void neti_apply_all_instances(neti_stack_t
*, napplyfn_t
*);
54 static void neti_apply_all_stacks(void *, napplyfn_t
*);
55 static boolean_t
wait_for_nini_inprogress(neti_stack_t
*,
56 net_instance_int_t
*, uint32_t);
58 static nini_head_t neti_instance_list
;
59 static neti_stack_head_t neti_stack_list
;
60 static kmutex_t neti_stack_lock
;
65 mutex_init(&neti_stack_lock
, NULL
, MUTEX_DRIVER
, NULL
);
67 LIST_INIT(&neti_instance_list
);
68 LIST_INIT(&neti_stack_list
);
70 * We want to be informed each time a netstack is created or
71 * destroyed in the kernel.
73 netstack_register(NS_NETI
, neti_stack_init
, neti_stack_shutdown
,
80 ASSERT(LIST_EMPTY(&neti_instance_list
));
81 ASSERT(LIST_EMPTY(&neti_stack_list
));
83 netstack_unregister(NS_NETI
);
85 mutex_destroy(&neti_stack_lock
);
89 * Initialize the neti stack instance. Because this is called out of the
90 * netstack framework, it is not possible for it to be called twice with
91 * the same values for (stackid,ns). The same also applies to the other
92 * two functions used with netstack_register: neti_stack_shutdown and
96 neti_stack_init(netstackid_t stackid
, netstack_t
*ns
)
98 net_instance_int_t
*dup
;
99 net_instance_int_t
*n
;
102 nts
= kmem_zalloc(sizeof (*nts
), KM_SLEEP
);
103 LIST_INIT(&nts
->nts_instances
);
104 nts
->nts_id
= (netid_t
)stackid
;
105 nts
->nts_stackid
= stackid
;
106 nts
->nts_netstack
= ns
;
107 nts
->nts_zoneid
= netstackid_to_zoneid(stackid
);
108 nts
->nts_flags
= NSF_ZONE_CREATE
;
109 cv_init(&nts
->nts_cv
, NULL
, CV_DRIVER
, NULL
);
110 mutex_init(&nts
->nts_lock
, NULL
, MUTEX_DRIVER
, NULL
);
112 mutex_enter(&neti_stack_lock
);
113 LIST_INSERT_HEAD(&neti_stack_list
, nts
, nts_next
);
115 LIST_FOREACH(n
, &neti_instance_list
, nini_next
) {
117 * This function returns with the NSS_CREATE_NEEDED flag
118 * set in "dup", so it is adequately prepared for the
121 dup
= net_instance_int_create(n
->nini_instance
, n
);
123 mutex_enter(&nts
->nts_lock
);
124 LIST_INSERT_HEAD(&nts
->nts_instances
, dup
, nini_next
);
125 mutex_exit(&nts
->nts_lock
);
128 neti_apply_all_instances(nts
, neti_stack_apply_create
);
130 mutex_enter(&nts
->nts_lock
);
131 nts
->nts_flags
&= ~NSF_ZONE_CREATE
;
132 mutex_exit(&nts
->nts_lock
);
134 mutex_exit(&neti_stack_lock
);
140 * Run the shutdown for all of the hooks.
144 neti_stack_shutdown(netstackid_t stackid
, void *arg
)
146 neti_stack_t
*nts
= arg
;
147 net_instance_int_t
*n
;
152 mutex_enter(&neti_stack_lock
);
153 mutex_enter(&nts
->nts_lock
);
155 * Walk through all of the protocol stacks and mark them as shutting
158 LIST_FOREACH(nd
, &nts
->nts_netd_head
, netd_list
) {
159 nd
->netd_condemned
= 1;
163 * Now proceed to see which callbacks are waiting to hear about the
164 * impending shutdown...
166 LIST_FOREACH(n
, &nts
->nts_instances
, nini_next
) {
167 if (n
->nini_instance
->nin_shutdown
== NULL
) {
169 * If there is no shutdown function registered,
170 * fake that we have completed it.
172 n
->nini_flags
|= NSS_SHUTDOWN_COMPLETED
;
177 * We need to ensure that we don't try and shutdown something
178 * that is already in the process of being shutdown or
179 * destroyed. If it is still being created, that's ok, the
180 * shtudown flag is added to the mix of things to do.
182 if ((n
->nini_flags
& (NSS_DESTROY_ALL
|NSS_SHUTDOWN_ALL
)) == 0)
183 n
->nini_flags
|= NSS_SHUTDOWN_NEEDED
;
185 nts
->nts_flags
|= NSF_ZONE_SHUTDOWN
;
186 mutex_exit(&nts
->nts_lock
);
188 neti_apply_all_instances(nts
, neti_stack_apply_shutdown
);
190 mutex_enter(&nts
->nts_lock
);
192 nts
->nts_netstack
= NULL
;
193 nts
->nts_flags
&= ~NSF_ZONE_SHUTDOWN
;
194 mutex_exit(&nts
->nts_lock
);
196 mutex_exit(&neti_stack_lock
);
201 * Free the neti stack instance.
202 * This function relies on the netstack framework only calling the _destroy
203 * callback once for each stackid. The netstack framework also provides us
204 * with assurance that nobody else will be doing any work (_create, _shutdown)
205 * on it, so there is no need to set and use flags to guard against
206 * simultaneous execution (ie. no need to set NSF_CLOSING.)
207 * What is required, however, is to make sure that we don't corrupt the
208 * list of neti_stack_t's for other code that walks it.
212 neti_stack_fini(netstackid_t stackid
, void *arg
)
214 neti_stack_t
*nts
= arg
;
215 net_instance_int_t
*n
;
218 mutex_enter(&neti_stack_lock
);
219 mutex_enter(&nts
->nts_lock
);
221 LIST_REMOVE(nts
, nts_next
);
224 * Walk through all of the protocol stacks and mark them as being
227 LIST_FOREACH(nd
, &nts
->nts_netd_head
, netd_list
) {
228 nd
->netd_condemned
= 2;
231 LIST_FOREACH(n
, &nts
->nts_instances
, nini_next
) {
232 ASSERT((n
->nini_flags
& NSS_SHUTDOWN_ALL
) != 0);
233 if ((n
->nini_flags
& NSS_DESTROY_ALL
) == 0)
234 n
->nini_flags
|= NSS_DESTROY_NEEDED
;
236 mutex_exit(&nts
->nts_lock
);
238 neti_apply_all_instances(nts
, neti_stack_apply_destroy
);
240 while (!LIST_EMPTY(&nts
->nts_instances
)) {
241 n
= LIST_FIRST(&nts
->nts_instances
);
242 LIST_REMOVE(n
, nini_next
);
244 net_instance_int_free(n
);
246 mutex_exit(&neti_stack_lock
);
248 ASSERT(LIST_EMPTY(&nts
->nts_netd_head
));
250 mutex_destroy(&nts
->nts_lock
);
251 cv_destroy(&nts
->nts_cv
);
253 kmem_free(nts
, sizeof (*nts
));
256 static net_instance_int_t
*
257 net_instance_int_create(net_instance_t
*nin
, net_instance_int_t
*parent
)
259 net_instance_int_t
*nini
;
261 nini
= kmem_zalloc(sizeof (net_instance_int_t
), KM_SLEEP
);
262 nini
->nini_instance
= nin
;
263 nini
->nini_parent
= parent
;
264 if (parent
!= NULL
) {
266 * If the parent pointer is non-NULL then we take that as
267 * an indication that the net_instance_int_t is being
268 * created for an active instance and there will expect
269 * the create function to be called. In contrast, if
270 * parent is NULL then this code assumes the object is
271 * being prepared for insertion onto the master list of
272 * callbacks to be called when an instance is created, etc.
275 nini
->nini_flags
|= NSS_CREATE_NEEDED
;
278 cv_init(&nini
->nini_cv
, NULL
, CV_DRIVER
, NULL
);
284 * Free'ing of a net_instance_int_t is only to be done when we know nobody
285 * else has is using it. For both parents and clones, this is indicated by
286 * nini_ref being greater than 0, however, nini_ref is managed differently
287 * for its two uses. For parents, nini_ref is increased when a new clone is
288 * created and it is decremented here. For clones, nini_ref is adjusted by
289 * code elsewhere (e.g. in neti_stack_apply_*) and is not changed here.
292 net_instance_int_free(net_instance_int_t
*nini
)
295 * This mutex guards the use of nini_ref.
297 ASSERT(mutex_owned(&neti_stack_lock
));
300 * For 'parent' structures, nini_ref will drop to 0 when
301 * the last clone has been free'd... but for clones, it
302 * is possible for nini_ref to be non-zero if we get in
303 * here when all the locks have been given up to execute
304 * a callback or wait_for_nini_inprogress. In that case,
305 * we do not want to free the structure and just indicate
306 * that it is on the "doomed" list, thus we set the
309 if (nini
->nini_parent
!= NULL
) {
310 if (nini
->nini_ref
> 0)
311 nini
->nini_condemned
= B_TRUE
;
312 nini
->nini_parent
->nini_ref
--;
313 if (nini
->nini_parent
->nini_ref
== 0)
314 net_instance_int_free(nini
->nini_parent
);
315 nini
->nini_parent
= NULL
;
318 if (nini
->nini_ref
== 0) {
319 cv_destroy(&nini
->nini_cv
);
320 kmem_free(nini
, sizeof (*nini
));
325 net_instance_alloc(const int version
)
329 if (version
!= NETINFO_VERSION
)
332 nin
= kmem_zalloc(sizeof (net_instance_t
), KM_SLEEP
);
333 nin
->nin_version
= version
;
339 net_instance_free(net_instance_t
*nin
)
341 kmem_free(nin
, sizeof (*nin
));
345 net_instance_register(net_instance_t
*nin
)
347 net_instance_int_t
*parent
;
348 net_instance_int_t
*tmp
;
351 ASSERT(nin
->nin_name
!= NULL
);
353 if (nin
->nin_create
== NULL
|| nin
->nin_destroy
== NULL
)
354 return (DDI_FAILURE
);
356 mutex_enter(&neti_stack_lock
);
358 * Search for duplicate, either on the global list or on any
359 * of the known instances.
361 LIST_FOREACH(tmp
, &neti_instance_list
, nini_next
) {
362 if (strcmp(nin
->nin_name
, tmp
->nini_instance
->nin_name
) == 0) {
363 mutex_exit(&neti_stack_lock
);
364 return (DDI_FAILURE
);
369 * Now insert and activate.
371 parent
= net_instance_int_create(nin
, NULL
);
372 ASSERT(parent
!= NULL
);
373 LIST_INSERT_HEAD(&neti_instance_list
, parent
, nini_next
);
375 LIST_FOREACH(nts
, &neti_stack_list
, nts_next
) {
376 mutex_enter(&nts
->nts_lock
);
378 * If shutdown of the zone has begun then do not add a new
379 * instance of the object being registered.
381 if ((nts
->nts_flags
& NSF_ZONE_SHUTDOWN
) ||
382 (nts
->nts_netstack
== NULL
)) {
383 mutex_exit(&nts
->nts_lock
);
388 * This function returns with the NSS_CREATE_NEEDED flag
389 * set in "dup", so it is adequately prepared for the
392 tmp
= net_instance_int_create(nin
, parent
);
394 LIST_INSERT_HEAD(&nts
->nts_instances
, tmp
, nini_next
);
395 mutex_exit(&nts
->nts_lock
);
399 neti_apply_all_stacks(parent
, neti_stack_apply_create
);
400 mutex_exit(&neti_stack_lock
);
402 return (DDI_SUCCESS
);
406 * While net_instance_register() isn't likely to be racing against itself,
407 * net_instance_unregister() can be entered from various directions that
408 * can compete: shutdown of a zone, unloading of a module (and it calling
409 * _unregister() as part of that) and the module doing an _unregister()
413 net_instance_unregister(net_instance_t
*nin
)
415 net_instance_int_t
*parent
;
416 net_instance_int_t
*tmp
;
419 mutex_enter(&neti_stack_lock
);
421 LIST_FOREACH(tmp
, &neti_instance_list
, nini_next
) {
422 if (strcmp(tmp
->nini_instance
->nin_name
, nin
->nin_name
) == 0) {
423 LIST_REMOVE(tmp
, nini_next
);
429 mutex_exit(&neti_stack_lock
);
430 return (DDI_FAILURE
);
434 LIST_FOREACH(nts
, &neti_stack_list
, nts_next
) {
435 mutex_enter(&nts
->nts_lock
);
436 LIST_FOREACH(tmp
, &nts
->nts_instances
, nini_next
) {
437 if (tmp
->nini_parent
!= parent
)
440 * Netstack difference:
441 * In netstack.c, there is a check for
442 * NSS_CREATE_COMPLETED before setting the other
443 * _NEEDED flags. If we consider that a list
444 * member must always have at least the _CREATE_NEEDED
445 * flag set and that wait_for_nini_inprogress will
446 * also wait for that flag to be cleared in both of
447 * the shutdown and destroy apply functions.
449 * It is possible to optimize out the case where
450 * all three _NEEDED flags are set to being able
451 * to pretend everything has been done and just
452 * set all three _COMPLETE flags. This makes a
453 * special case that we then need to consider in
454 * other locations, so for the sake of simplicity,
455 * we leave it as it is.
457 if ((tmp
->nini_flags
& NSS_SHUTDOWN_ALL
) == 0)
458 tmp
->nini_flags
|= NSS_SHUTDOWN_NEEDED
;
459 if ((tmp
->nini_flags
& NSS_DESTROY_ALL
) == 0)
460 tmp
->nini_flags
|= NSS_DESTROY_NEEDED
;
463 mutex_exit(&nts
->nts_lock
);
467 * Each of these functions ensures that the requisite _COMPLETED
468 * flag is present before calling the apply function. So we are
469 * guaranteed to have NSS_CREATE_COMPLETED|NSS_SHUTDOWN_COMPLETED
470 * both set after the first call here and when the second completes,
471 * NSS_DESTROY_COMPLETED is also set.
473 neti_apply_all_stacks(parent
, neti_stack_apply_shutdown
);
474 neti_apply_all_stacks(parent
, neti_stack_apply_destroy
);
477 * Remove the instance callback information from each stack.
479 LIST_FOREACH(nts
, &neti_stack_list
, nts_next
) {
480 mutex_enter(&nts
->nts_lock
);
481 LIST_FOREACH(tmp
, &nts
->nts_instances
, nini_next
) {
482 if ((tmp
->nini_parent
== parent
) &&
483 (tmp
->nini_flags
& NSS_SHUTDOWN_COMPLETED
) &&
484 (tmp
->nini_flags
& NSS_DESTROY_COMPLETED
)) {
486 * There should only be one entry that has a
487 * matching nini_parent so there is no need to
488 * worry about continuing a loop where we are
489 * free'ing the structure holding the 'next'
492 LIST_REMOVE(tmp
, nini_next
);
493 net_instance_int_free(tmp
);
497 mutex_exit(&nts
->nts_lock
);
500 mutex_exit(&neti_stack_lock
);
502 return (DDI_SUCCESS
);
506 neti_apply_all_instances(neti_stack_t
*nts
, napplyfn_t
*applyfn
)
508 net_instance_int_t
*n
;
510 ASSERT(mutex_owned(&neti_stack_lock
));
512 n
= LIST_FIRST(&nts
->nts_instances
);
514 if ((applyfn
)(nts
, n
->nini_parent
)) {
515 /* Lock dropped - restart at head */
516 n
= LIST_FIRST(&nts
->nts_instances
);
518 n
= LIST_NEXT(n
, nini_next
);
524 neti_apply_all_stacks(void *parent
, napplyfn_t
*applyfn
)
528 ASSERT(mutex_owned(&neti_stack_lock
));
530 nts
= LIST_FIRST(&neti_stack_list
);
531 while (nts
!= NULL
) {
533 * This function differs, in that it doesn't have a call to
534 * a "wait_creator" call, from the zsd/netstack code. The
535 * waiting is pushed into the apply functions which cause
536 * the waiting to be done in wait_for_nini_progress with
537 * the passing in of cmask.
539 if ((applyfn
)(nts
, parent
)) {
540 /* Lock dropped - restart at head */
541 nts
= LIST_FIRST(&neti_stack_list
);
543 nts
= LIST_NEXT(nts
, nts_next
);
549 neti_stack_apply_create(neti_stack_t
*nts
, void *parent
)
552 boolean_t dropped
= B_FALSE
;
553 net_instance_int_t
*tmp
;
556 ASSERT(parent
!= NULL
);
557 ASSERT(mutex_owned(&neti_stack_lock
));
559 mutex_enter(&nts
->nts_lock
);
561 LIST_FOREACH(tmp
, &nts
->nts_instances
, nini_next
) {
562 if (tmp
->nini_parent
== parent
)
566 mutex_exit(&nts
->nts_lock
);
572 if (wait_for_nini_inprogress(nts
, tmp
, 0))
575 if ((tmp
->nini_flags
& NSS_CREATE_NEEDED
) && !tmp
->nini_condemned
) {
576 nin
= tmp
->nini_instance
;
577 tmp
->nini_flags
&= ~NSS_CREATE_NEEDED
;
578 tmp
->nini_flags
|= NSS_CREATE_INPROGRESS
;
579 DTRACE_PROBE2(neti__stack__create__inprogress
,
580 neti_stack_t
*, nts
, net_instance_int_t
*, tmp
);
581 mutex_exit(&nts
->nts_lock
);
582 mutex_exit(&neti_stack_lock
);
585 ASSERT(tmp
->nini_created
== NULL
);
586 ASSERT(nin
->nin_create
!= NULL
);
587 DTRACE_PROBE2(neti__stack__create__start
,
588 netstackid_t
, nts
->nts_id
,
589 neti_stack_t
*, nts
);
590 result
= (nin
->nin_create
)(nts
->nts_id
);
591 DTRACE_PROBE2(neti__stack__create__end
,
592 void *, result
, neti_stack_t
*, nts
);
594 ASSERT(result
!= NULL
);
595 mutex_enter(&neti_stack_lock
);
596 mutex_enter(&nts
->nts_lock
);
597 tmp
->nini_created
= result
;
598 tmp
->nini_flags
&= ~NSS_CREATE_INPROGRESS
;
599 tmp
->nini_flags
|= NSS_CREATE_COMPLETED
;
600 cv_broadcast(&tmp
->nini_cv
);
601 DTRACE_PROBE2(neti__stack__create__completed
,
602 neti_stack_t
*, nts
, net_instance_int_t
*, tmp
);
606 if (tmp
->nini_condemned
) {
607 net_instance_int_free(tmp
);
610 mutex_exit(&nts
->nts_lock
);
616 neti_stack_apply_shutdown(neti_stack_t
*nts
, void *parent
)
618 boolean_t dropped
= B_FALSE
;
619 net_instance_int_t
*tmp
;
622 ASSERT(parent
!= NULL
);
623 ASSERT(mutex_owned(&neti_stack_lock
));
625 mutex_enter(&nts
->nts_lock
);
627 LIST_FOREACH(tmp
, &nts
->nts_instances
, nini_next
) {
628 if (tmp
->nini_parent
== parent
)
632 mutex_exit(&nts
->nts_lock
);
635 ASSERT((tmp
->nini_flags
& NSS_SHUTDOWN_ALL
) != 0);
639 if (wait_for_nini_inprogress(nts
, tmp
, NSS_CREATE_NEEDED
))
642 nin
= tmp
->nini_instance
;
643 if (nin
->nin_shutdown
== NULL
) {
645 * If there is no shutdown function, fake having completed it.
647 if (tmp
->nini_flags
& NSS_SHUTDOWN_NEEDED
) {
648 tmp
->nini_flags
&= ~NSS_SHUTDOWN_NEEDED
;
649 tmp
->nini_flags
|= NSS_SHUTDOWN_COMPLETED
;
653 if (tmp
->nini_condemned
) {
654 net_instance_int_free(tmp
);
658 mutex_exit(&nts
->nts_lock
);
662 if ((tmp
->nini_flags
& NSS_SHUTDOWN_NEEDED
) && !tmp
->nini_condemned
) {
663 ASSERT((tmp
->nini_flags
& NSS_CREATE_COMPLETED
) != 0);
664 tmp
->nini_flags
&= ~NSS_SHUTDOWN_NEEDED
;
665 tmp
->nini_flags
|= NSS_SHUTDOWN_INPROGRESS
;
666 DTRACE_PROBE2(neti__stack__shutdown__inprogress
,
667 neti_stack_t
*, nts
, net_instance_int_t
*, tmp
);
668 mutex_exit(&nts
->nts_lock
);
669 mutex_exit(&neti_stack_lock
);
672 ASSERT(nin
->nin_shutdown
!= NULL
);
673 DTRACE_PROBE2(neti__stack__shutdown__start
,
674 netstackid_t
, nts
->nts_id
,
675 neti_stack_t
*, nts
);
676 (nin
->nin_shutdown
)(nts
->nts_id
, tmp
->nini_created
);
677 DTRACE_PROBE1(neti__stack__shutdown__end
,
678 neti_stack_t
*, nts
);
680 mutex_enter(&neti_stack_lock
);
681 mutex_enter(&nts
->nts_lock
);
682 tmp
->nini_flags
&= ~NSS_SHUTDOWN_INPROGRESS
;
683 tmp
->nini_flags
|= NSS_SHUTDOWN_COMPLETED
;
684 cv_broadcast(&tmp
->nini_cv
);
685 DTRACE_PROBE2(neti__stack__shutdown__completed
,
686 neti_stack_t
*, nts
, net_instance_int_t
*, tmp
);
688 ASSERT((tmp
->nini_flags
& NSS_SHUTDOWN_COMPLETED
) != 0);
691 if (tmp
->nini_condemned
) {
692 net_instance_int_free(tmp
);
695 mutex_exit(&nts
->nts_lock
);
700 neti_stack_apply_destroy(neti_stack_t
*nts
, void *parent
)
702 boolean_t dropped
= B_FALSE
;
703 net_instance_int_t
*tmp
;
706 ASSERT(parent
!= NULL
);
707 ASSERT(mutex_owned(&neti_stack_lock
));
709 mutex_enter(&nts
->nts_lock
);
711 LIST_FOREACH(tmp
, &nts
->nts_instances
, nini_next
) {
712 if (tmp
->nini_parent
== parent
)
716 mutex_exit(&nts
->nts_lock
);
723 * We pause here so that when we continue we know that we're the
724 * only one doing anything active with this node.
726 if (wait_for_nini_inprogress(nts
, tmp
,
727 NSS_CREATE_NEEDED
|NSS_SHUTDOWN_NEEDED
))
730 if ((tmp
->nini_flags
& NSS_DESTROY_NEEDED
) && !tmp
->nini_condemned
) {
731 ASSERT((tmp
->nini_flags
& NSS_SHUTDOWN_COMPLETED
) != 0);
732 nin
= tmp
->nini_instance
;
733 tmp
->nini_flags
&= ~NSS_DESTROY_NEEDED
;
734 tmp
->nini_flags
|= NSS_DESTROY_INPROGRESS
;
735 DTRACE_PROBE2(neti__stack__destroy__inprogress
,
736 neti_stack_t
*, nts
, net_instance_int_t
*, tmp
);
737 mutex_exit(&nts
->nts_lock
);
738 mutex_exit(&neti_stack_lock
);
741 ASSERT(nin
->nin_destroy
!= NULL
);
742 DTRACE_PROBE2(neti__stack__destroy__start
,
743 netstackid_t
, nts
->nts_id
,
744 neti_stack_t
*, nts
);
745 (nin
->nin_destroy
)(nts
->nts_id
, tmp
->nini_created
);
746 DTRACE_PROBE1(neti__stack__destroy__end
,
747 neti_stack_t
*, nts
);
749 mutex_enter(&neti_stack_lock
);
750 mutex_enter(&nts
->nts_lock
);
751 tmp
->nini_flags
&= ~NSS_DESTROY_INPROGRESS
;
752 tmp
->nini_flags
|= NSS_DESTROY_COMPLETED
;
753 cv_broadcast(&tmp
->nini_cv
);
754 DTRACE_PROBE2(neti__stack__destroy__completed
,
755 neti_stack_t
*, nts
, net_instance_int_t
*, tmp
);
759 if (tmp
->nini_condemned
) {
760 net_instance_int_free(tmp
);
763 mutex_exit(&nts
->nts_lock
);
768 wait_for_nini_inprogress(neti_stack_t
*nts
, net_instance_int_t
*nini
,
771 boolean_t dropped
= B_FALSE
;
773 ASSERT(mutex_owned(&neti_stack_lock
));
775 while (nini
->nini_flags
& (NSS_ALL_INPROGRESS
|cmask
)) {
776 DTRACE_PROBE2(neti__wait__nini__inprogress
,
777 neti_stack_t
*, nts
, net_instance_int_t
*, nini
);
779 mutex_exit(&neti_stack_lock
);
781 cv_wait(&nini
->nini_cv
, &nts
->nts_lock
);
783 /* First drop netstack_lock to preserve order */
784 mutex_exit(&nts
->nts_lock
);
785 DTRACE_PROBE2(wait__nini__inprogress__pause
,
786 neti_stack_t
*, nts
, net_instance_int_t
*, nini
);
787 mutex_enter(&neti_stack_lock
);
788 mutex_enter(&nts
->nts_lock
);
790 DTRACE_PROBE2(neti__wait__nini__inprogress__complete
,
791 neti_stack_t
*, nts
, net_instance_int_t
*, nini
);
795 /* ======================================================================= */
798 net_zoneidtonetid(zoneid_t zoneid
)
803 mutex_enter(&neti_stack_lock
);
804 LIST_FOREACH(nts
, &neti_stack_list
, nts_next
) {
805 if (nts
->nts_zoneid
== zoneid
) {
806 mutex_exit(&neti_stack_lock
);
807 return (nts
->nts_id
);
810 mutex_exit(&neti_stack_lock
);
816 net_getzoneidbynetid(netid_t netid
)
820 mutex_enter(&neti_stack_lock
);
821 LIST_FOREACH(nts
, &neti_stack_list
, nts_next
) {
822 if (nts
->nts_id
== netid
) {
823 mutex_exit(&neti_stack_lock
);
824 return (nts
->nts_zoneid
);
827 mutex_exit(&neti_stack_lock
);
833 net_getnetstackidbynetid(netid_t netid
)
837 mutex_enter(&neti_stack_lock
);
838 LIST_FOREACH(nts
, &neti_stack_list
, nts_next
) {
839 if (nts
->nts_id
== netid
) {
840 mutex_exit(&neti_stack_lock
);
841 return (nts
->nts_stackid
);
844 mutex_exit(&neti_stack_lock
);
850 net_getnetidbynetstackid(netstackid_t netstackid
)
854 mutex_enter(&neti_stack_lock
);
855 LIST_FOREACH(nts
, &neti_stack_list
, nts_next
) {
856 if (nts
->nts_stackid
== netstackid
) {
857 mutex_exit(&neti_stack_lock
);
858 return (nts
->nts_id
);
861 mutex_exit(&neti_stack_lock
);
867 net_getnetistackbyid(netid_t netid
)
871 mutex_enter(&neti_stack_lock
);
872 LIST_FOREACH(nts
, &neti_stack_list
, nts_next
) {
873 if (nts
->nts_id
== netid
) {
874 mutex_exit(&neti_stack_lock
);
878 mutex_exit(&neti_stack_lock
);
884 net_instance_notify_register(netid_t netid
, hook_notify_fn_t callback
,
888 return (hook_stack_notify_register(net_getnetstackidbynetid(netid
),
893 net_instance_notify_unregister(netid_t netid
, hook_notify_fn_t callback
)
896 return (hook_stack_notify_unregister(net_getnetstackidbynetid(netid
),