9042 multiples of tty streams modules cause weirdness
[unleashed.git] / usr / src / uts / common / os / strsubr.c
blob8cc27df4ebf104ad66ccf5c6b3b6062cda798306
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 /* All Rights Reserved */
26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
27 * Use is subject to license terms.
28 * Copyright (c) 2016 by Delphix. All rights reserved.
29 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
34 #include <sys/param.h>
35 #include <sys/errno.h>
36 #include <sys/signal.h>
37 #include <sys/proc.h>
38 #include <sys/conf.h>
39 #include <sys/cred.h>
40 #include <sys/user.h>
41 #include <sys/vnode.h>
42 #include <sys/file.h>
43 #include <sys/session.h>
44 #include <sys/stream.h>
45 #include <sys/strsubr.h>
46 #include <sys/stropts.h>
47 #include <sys/poll.h>
48 #include <sys/systm.h>
49 #include <sys/cpuvar.h>
50 #include <sys/uio.h>
51 #include <sys/cmn_err.h>
52 #include <sys/priocntl.h>
53 #include <sys/procset.h>
54 #include <sys/vmem.h>
55 #include <sys/bitmap.h>
56 #include <sys/kmem.h>
57 #include <sys/siginfo.h>
58 #include <sys/vtrace.h>
59 #include <sys/callb.h>
60 #include <sys/debug.h>
61 #include <sys/modctl.h>
62 #include <sys/vmsystm.h>
63 #include <vm/page.h>
64 #include <sys/atomic.h>
65 #include <sys/suntpi.h>
66 #include <sys/strlog.h>
67 #include <sys/promif.h>
68 #include <sys/project.h>
69 #include <sys/vm.h>
70 #include <sys/taskq.h>
71 #include <sys/sunddi.h>
72 #include <sys/sunldi_impl.h>
73 #include <sys/strsun.h>
74 #include <sys/isa_defs.h>
75 #include <sys/multidata.h>
76 #include <sys/pattr.h>
77 #include <sys/strft.h>
78 #include <sys/fs/snode.h>
79 #include <sys/zone.h>
80 #include <sys/open.h>
81 #include <sys/sunldi.h>
82 #include <sys/sad.h>
83 #include <sys/netstack.h>
85 #define O_SAMESTR(q) (((q)->q_next) && \
86 (((q)->q_flag & QREADR) == ((q)->q_next->q_flag & QREADR)))
89 * WARNING:
90 * The variables and routines in this file are private, belonging
91 * to the STREAMS subsystem. These should not be used by modules
92 * or drivers. Compatibility will not be guaranteed.
96 * Id value used to distinguish between different multiplexor links.
98 static int32_t lnk_id = 0;
100 #define STREAMS_LOPRI MINCLSYSPRI
101 static pri_t streams_lopri = STREAMS_LOPRI;
103 #define STRSTAT(x) (str_statistics.x.value.ui64++)
104 typedef struct str_stat {
105 kstat_named_t sqenables;
106 kstat_named_t stenables;
107 kstat_named_t syncqservice;
108 kstat_named_t freebs;
109 kstat_named_t qwr_outer;
110 kstat_named_t rservice;
111 kstat_named_t strwaits;
112 kstat_named_t taskqfails;
113 kstat_named_t bufcalls;
114 kstat_named_t qhelps;
115 kstat_named_t qremoved;
116 kstat_named_t sqremoved;
117 kstat_named_t bcwaits;
118 kstat_named_t sqtoomany;
119 } str_stat_t;
121 static str_stat_t str_statistics = {
122 { "sqenables", KSTAT_DATA_UINT64 },
123 { "stenables", KSTAT_DATA_UINT64 },
124 { "syncqservice", KSTAT_DATA_UINT64 },
125 { "freebs", KSTAT_DATA_UINT64 },
126 { "qwr_outer", KSTAT_DATA_UINT64 },
127 { "rservice", KSTAT_DATA_UINT64 },
128 { "strwaits", KSTAT_DATA_UINT64 },
129 { "taskqfails", KSTAT_DATA_UINT64 },
130 { "bufcalls", KSTAT_DATA_UINT64 },
131 { "qhelps", KSTAT_DATA_UINT64 },
132 { "qremoved", KSTAT_DATA_UINT64 },
133 { "sqremoved", KSTAT_DATA_UINT64 },
134 { "bcwaits", KSTAT_DATA_UINT64 },
135 { "sqtoomany", KSTAT_DATA_UINT64 },
138 static kstat_t *str_kstat;
141 * qrunflag was used previously to control background scheduling of queues. It
142 * is not used anymore, but kept here in case some module still wants to access
143 * it via qready() and setqsched macros.
145 char qrunflag; /* Unused */
148 * Most of the streams scheduling is done via task queues. Task queues may fail
149 * for non-sleep dispatches, so there are two backup threads servicing failed
150 * requests for queues and syncqs. Both of these threads also service failed
151 * dispatches freebs requests. Queues are put in the list specified by `qhead'
152 * and `qtail' pointers, syncqs use `sqhead' and `sqtail' pointers and freebs
153 * requests are put into `freebs_list' which has no tail pointer. All three
154 * lists are protected by a single `service_queue' lock and use
155 * `services_to_run' condition variable for signaling background threads. Use of
156 * a single lock should not be a problem because it is only used under heavy
157 * loads when task queues start to fail and at that time it may be a good idea
158 * to throttle scheduling requests.
160 * NOTE: queues and syncqs should be scheduled by two separate threads because
161 * queue servicing may be blocked waiting for a syncq which may be also
162 * scheduled for background execution. This may create a deadlock when only one
163 * thread is used for both.
166 static taskq_t *streams_taskq; /* Used for most STREAMS scheduling */
168 static kmutex_t service_queue; /* protects all of servicing vars */
169 static kcondvar_t services_to_run; /* wake up background service thread */
170 static kcondvar_t syncqs_to_run; /* wake up background service thread */
173 * List of queues scheduled for background processing due to lack of resources
174 * in the task queues. Protected by service_queue lock;
176 static struct queue *qhead;
177 static struct queue *qtail;
180 * Same list for syncqs
182 static syncq_t *sqhead;
183 static syncq_t *sqtail;
185 static mblk_t *freebs_list; /* list of buffers to free */
188 * Backup threads for servicing queues and syncqs
190 kthread_t *streams_qbkgrnd_thread;
191 kthread_t *streams_sqbkgrnd_thread;
194 * Bufcalls related variables.
196 struct bclist strbcalls; /* list of waiting bufcalls */
197 kmutex_t strbcall_lock; /* protects bufcall list (strbcalls) */
198 kcondvar_t strbcall_cv; /* Signaling when a bufcall is added */
199 kmutex_t bcall_monitor; /* sleep/wakeup style monitor */
200 kcondvar_t bcall_cv; /* wait 'till executing bufcall completes */
201 kthread_t *bc_bkgrnd_thread; /* Thread to service bufcall requests */
203 kmutex_t strresources; /* protects global resources */
204 kmutex_t muxifier; /* single-threads multiplexor creation */
206 static void *str_stack_init(netstackid_t stackid, netstack_t *ns);
207 static void str_stack_shutdown(netstackid_t stackid, void *arg);
208 static void str_stack_fini(netstackid_t stackid, void *arg);
211 * run_queues is no longer used, but is kept in case some 3rd party
212 * module/driver decides to use it.
214 int run_queues = 0;
217 * sq_max_size is the depth of the syncq (in number of messages) before
218 * qfill_syncq() starts QFULL'ing destination queues. As its primary
219 * consumer - IP is no longer D_MTPERMOD, but there may be other
220 * modules/drivers depend on this syncq flow control, we prefer to
221 * choose a large number as the default value. For potential
222 * performance gain, this value is tunable in /etc/system.
224 int sq_max_size = 10000;
227 * The number of ciputctrl structures per syncq and stream we create when
228 * needed.
230 int n_ciputctrl;
231 int max_n_ciputctrl = 16;
233 * If n_ciputctrl is < min_n_ciputctrl don't even create ciputctrl_cache.
235 int min_n_ciputctrl = 2;
238 * Per-driver/module syncqs
239 * ========================
241 * For drivers/modules that use PERMOD or outer syncqs we keep a list of
242 * perdm structures, new entries being added (and new syncqs allocated) when
243 * setq() encounters a module/driver with a streamtab that it hasn't seen
244 * before.
245 * The reason for this mechanism is that some modules and drivers share a
246 * common streamtab and it is necessary for those modules and drivers to also
247 * share a common PERMOD syncq.
249 * perdm_list --> dm_str == streamtab_1
250 * dm_sq == syncq_1
251 * dm_ref
252 * dm_next --> dm_str == streamtab_2
253 * dm_sq == syncq_2
254 * dm_ref
255 * dm_next --> ... NULL
257 * The dm_ref field is incremented for each new driver/module that takes
258 * a reference to the perdm structure and hence shares the syncq.
259 * References are held in the fmodsw_impl_t structure for each STREAMS module
260 * or the dev_impl array (indexed by device major number) for each driver.
262 * perdm_list -> [dm_ref == 1] -> [dm_ref == 2] -> [dm_ref == 1] -> NULL
263 * ^ ^ ^ ^
264 * | ______________/ | |
265 * | / | |
266 * dev_impl: ...|x|y|... module A module B
268 * When a module/driver is unloaded the reference count is decremented and,
269 * when it falls to zero, the perdm structure is removed from the list and
270 * the syncq is freed (see rele_dm()).
272 perdm_t *perdm_list = NULL;
273 static krwlock_t perdm_rwlock;
274 cdevsw_impl_t *devimpl;
276 extern struct qinit strdata;
277 extern struct qinit stwdata;
279 static void runservice(queue_t *);
280 static void streams_bufcall_service(void);
281 static void streams_qbkgrnd_service(void);
282 static void streams_sqbkgrnd_service(void);
283 static syncq_t *new_syncq(void);
284 static void free_syncq(syncq_t *);
285 static void outer_insert(syncq_t *, syncq_t *);
286 static void outer_remove(syncq_t *, syncq_t *);
287 static void write_now(syncq_t *);
288 static void clr_qfull(queue_t *);
289 static void runbufcalls(void);
290 static void sqenable(syncq_t *);
291 static void sqfill_events(syncq_t *, queue_t *, mblk_t *, void (*)());
292 static void wait_q_syncq(queue_t *);
293 static void backenable_insertedq(queue_t *);
295 static void queue_service(queue_t *);
296 static void stream_service(stdata_t *);
297 static void syncq_service(syncq_t *);
298 static void qwriter_outer_service(syncq_t *);
299 static void mblk_free(mblk_t *);
300 #ifdef DEBUG
301 static int qprocsareon(queue_t *);
302 #endif
304 static void set_nfsrv_ptr(queue_t *, queue_t *, queue_t *, queue_t *);
305 static void reset_nfsrv_ptr(queue_t *, queue_t *);
306 void set_qfull(queue_t *);
308 static void sq_run_events(syncq_t *);
309 static int propagate_syncq(queue_t *);
311 static void blocksq(syncq_t *, ushort_t, int);
312 static void unblocksq(syncq_t *, ushort_t, int);
313 static int dropsq(syncq_t *, uint16_t);
314 static void emptysq(syncq_t *);
315 static sqlist_t *sqlist_alloc(struct stdata *, int);
316 static void sqlist_free(sqlist_t *);
317 static sqlist_t *sqlist_build(queue_t *, struct stdata *, boolean_t);
318 static void sqlist_insert(sqlist_t *, syncq_t *);
319 static void sqlist_insertall(sqlist_t *, queue_t *);
321 static void strsetuio(stdata_t *);
323 struct kmem_cache *stream_head_cache;
324 struct kmem_cache *queue_cache;
325 struct kmem_cache *syncq_cache;
326 struct kmem_cache *qband_cache;
327 struct kmem_cache *linkinfo_cache;
328 struct kmem_cache *ciputctrl_cache = NULL;
330 static linkinfo_t *linkinfo_list;
332 /* Global esballoc throttling queue */
333 static esb_queue_t system_esbq;
335 /* Array of esballoc throttling queues, of length esbq_nelem */
336 static esb_queue_t *volatile system_esbq_array;
337 static int esbq_nelem;
338 static kmutex_t esbq_lock;
339 static int esbq_log2_cpus_per_q = 0;
341 /* Scale the system_esbq length by setting number of CPUs per queue. */
342 uint_t esbq_cpus_per_q = 1;
345 * esballoc tunable parameters.
347 int esbq_max_qlen = 0x16; /* throttled queue length */
348 clock_t esbq_timeout = 0x8; /* timeout to process esb queue */
351 * Routines to handle esballoc queueing.
353 static void esballoc_process_queue(esb_queue_t *);
354 static void esballoc_enqueue_mblk(mblk_t *);
355 static void esballoc_timer(void *);
356 static void esballoc_set_timer(esb_queue_t *, clock_t);
357 static void esballoc_mblk_free(mblk_t *);
360 * Qinit structure and Module_info structures
361 * for passthru read and write queues
364 static void pass_wput(queue_t *, mblk_t *);
365 static queue_t *link_addpassthru(stdata_t *);
366 static void link_rempassthru(queue_t *);
368 struct module_info passthru_info = {
370 "passthru",
372 INFPSZ,
373 STRHIGH,
374 STRLOW
377 struct qinit passthru_rinit = {
378 (int (*)())putnext,
379 NULL,
380 NULL,
381 NULL,
382 NULL,
383 &passthru_info,
384 NULL
387 struct qinit passthru_winit = {
388 (int (*)()) pass_wput,
389 NULL,
390 NULL,
391 NULL,
392 NULL,
393 &passthru_info,
394 NULL
398 * Verify correctness of list head/tail pointers.
400 #define LISTCHECK(head, tail, link) { \
401 EQUIV(head, tail); \
402 IMPLY(tail != NULL, tail->link == NULL); \
406 * Enqueue a list element `el' in the end of a list denoted by `head' and `tail'
407 * using a `link' field.
409 #define ENQUEUE(el, head, tail, link) { \
410 ASSERT(el->link == NULL); \
411 LISTCHECK(head, tail, link); \
412 if (head == NULL) \
413 head = el; \
414 else \
415 tail->link = el; \
416 tail = el; \
420 * Dequeue the first element of the list denoted by `head' and `tail' pointers
421 * using a `link' field and put result into `el'.
423 #define DQ(el, head, tail, link) { \
424 LISTCHECK(head, tail, link); \
425 el = head; \
426 if (head != NULL) { \
427 head = head->link; \
428 if (head == NULL) \
429 tail = NULL; \
430 el->link = NULL; \
435 * Remove `el' from the list using `chase' and `curr' pointers and return result
436 * in `succeed'.
438 #define RMQ(el, head, tail, link, chase, curr, succeed) { \
439 LISTCHECK(head, tail, link); \
440 chase = NULL; \
441 succeed = 0; \
442 for (curr = head; (curr != el) && (curr != NULL); curr = curr->link) \
443 chase = curr; \
444 if (curr != NULL) { \
445 succeed = 1; \
446 ASSERT(curr == el); \
447 if (chase != NULL) \
448 chase->link = curr->link; \
449 else \
450 head = curr->link; \
451 curr->link = NULL; \
452 if (curr == tail) \
453 tail = chase; \
455 LISTCHECK(head, tail, link); \
458 /* Handling of delayed messages on the inner syncq. */
461 * DEBUG versions should use function versions (to simplify tracing) and
462 * non-DEBUG kernels should use macro versions.
466 * Put a queue on the syncq list of queues.
467 * Assumes SQLOCK held.
469 #define SQPUT_Q(sq, qp) \
471 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
472 if (!(qp->q_sqflags & Q_SQQUEUED)) { \
473 /* The queue should not be linked anywhere */ \
474 ASSERT((qp->q_sqprev == NULL) && (qp->q_sqnext == NULL)); \
475 /* Head and tail may only be NULL simultaneously */ \
476 EQUIV(sq->sq_head, sq->sq_tail); \
477 /* Queue may be only enqueued on its syncq */ \
478 ASSERT(sq == qp->q_syncq); \
479 /* Check the correctness of SQ_MESSAGES flag */ \
480 EQUIV(sq->sq_head, (sq->sq_flags & SQ_MESSAGES)); \
481 /* Sanity check first/last elements of the list */ \
482 IMPLY(sq->sq_head != NULL, sq->sq_head->q_sqprev == NULL);\
483 IMPLY(sq->sq_tail != NULL, sq->sq_tail->q_sqnext == NULL);\
484 /* \
485 * Sanity check of priority field: empty queue should \
486 * have zero priority \
487 * and nqueues equal to zero. \
488 */ \
489 IMPLY(sq->sq_head == NULL, sq->sq_pri == 0); \
490 /* Sanity check of sq_nqueues field */ \
491 EQUIV(sq->sq_head, sq->sq_nqueues); \
492 if (sq->sq_head == NULL) { \
493 sq->sq_head = sq->sq_tail = qp; \
494 sq->sq_flags |= SQ_MESSAGES; \
495 } else if (qp->q_spri == 0) { \
496 qp->q_sqprev = sq->sq_tail; \
497 sq->sq_tail->q_sqnext = qp; \
498 sq->sq_tail = qp; \
499 } else { \
500 /* \
501 * Put this queue in priority order: higher \
502 * priority gets closer to the head. \
503 */ \
504 queue_t **qpp = &sq->sq_tail; \
505 queue_t *qnext = NULL; \
507 while (*qpp != NULL && qp->q_spri > (*qpp)->q_spri) { \
508 qnext = *qpp; \
509 qpp = &(*qpp)->q_sqprev; \
511 qp->q_sqnext = qnext; \
512 qp->q_sqprev = *qpp; \
513 if (*qpp != NULL) { \
514 (*qpp)->q_sqnext = qp; \
515 } else { \
516 sq->sq_head = qp; \
517 sq->sq_pri = sq->sq_head->q_spri; \
519 *qpp = qp; \
521 qp->q_sqflags |= Q_SQQUEUED; \
522 qp->q_sqtstamp = ddi_get_lbolt(); \
523 sq->sq_nqueues++; \
528 * Remove a queue from the syncq list
529 * Assumes SQLOCK held.
531 #define SQRM_Q(sq, qp) \
533 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
534 ASSERT(qp->q_sqflags & Q_SQQUEUED); \
535 ASSERT(sq->sq_head != NULL && sq->sq_tail != NULL); \
536 ASSERT((sq->sq_flags & SQ_MESSAGES) != 0); \
537 /* Check that the queue is actually in the list */ \
538 ASSERT(qp->q_sqnext != NULL || sq->sq_tail == qp); \
539 ASSERT(qp->q_sqprev != NULL || sq->sq_head == qp); \
540 ASSERT(sq->sq_nqueues != 0); \
541 if (qp->q_sqprev == NULL) { \
542 /* First queue on list, make head q_sqnext */ \
543 sq->sq_head = qp->q_sqnext; \
544 } else { \
545 /* Make prev->next == next */ \
546 qp->q_sqprev->q_sqnext = qp->q_sqnext; \
548 if (qp->q_sqnext == NULL) { \
549 /* Last queue on list, make tail sqprev */ \
550 sq->sq_tail = qp->q_sqprev; \
551 } else { \
552 /* Make next->prev == prev */ \
553 qp->q_sqnext->q_sqprev = qp->q_sqprev; \
555 /* clear out references on this queue */ \
556 qp->q_sqprev = qp->q_sqnext = NULL; \
557 qp->q_sqflags &= ~Q_SQQUEUED; \
558 /* If there is nothing queued, clear SQ_MESSAGES */ \
559 if (sq->sq_head != NULL) { \
560 sq->sq_pri = sq->sq_head->q_spri; \
561 } else { \
562 sq->sq_flags &= ~SQ_MESSAGES; \
563 sq->sq_pri = 0; \
565 sq->sq_nqueues--; \
566 ASSERT(sq->sq_head != NULL || sq->sq_evhead != NULL || \
567 (sq->sq_flags & SQ_QUEUED) == 0); \
570 /* Hide the definition from the header file. */
571 #ifdef SQPUT_MP
572 #undef SQPUT_MP
573 #endif
576 * Put a message on the queue syncq.
577 * Assumes QLOCK held.
579 #define SQPUT_MP(qp, mp) \
581 ASSERT(MUTEX_HELD(QLOCK(qp))); \
582 ASSERT(qp->q_sqhead == NULL || \
583 (qp->q_sqtail != NULL && \
584 qp->q_sqtail->b_next == NULL)); \
585 qp->q_syncqmsgs++; \
586 ASSERT(qp->q_syncqmsgs != 0); /* Wraparound */ \
587 if (qp->q_sqhead == NULL) { \
588 qp->q_sqhead = qp->q_sqtail = mp; \
589 } else { \
590 qp->q_sqtail->b_next = mp; \
591 qp->q_sqtail = mp; \
593 ASSERT(qp->q_syncqmsgs > 0); \
594 set_qfull(qp); \
597 #define SQ_PUTCOUNT_SETFAST_LOCKED(sq) { \
598 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
599 if ((sq)->sq_ciputctrl != NULL) { \
600 int i; \
601 int nlocks = (sq)->sq_nciputctrl; \
602 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
603 ASSERT((sq)->sq_type & SQ_CIPUT); \
604 for (i = 0; i <= nlocks; i++) { \
605 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \
606 cip[i].ciputctrl_count |= SQ_FASTPUT; \
612 #define SQ_PUTCOUNT_CLRFAST_LOCKED(sq) { \
613 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
614 if ((sq)->sq_ciputctrl != NULL) { \
615 int i; \
616 int nlocks = (sq)->sq_nciputctrl; \
617 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
618 ASSERT((sq)->sq_type & SQ_CIPUT); \
619 for (i = 0; i <= nlocks; i++) { \
620 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \
621 cip[i].ciputctrl_count &= ~SQ_FASTPUT; \
627 * Run service procedures for all queues in the stream head.
629 #define STR_SERVICE(stp, q) { \
630 ASSERT(MUTEX_HELD(&stp->sd_qlock)); \
631 while (stp->sd_qhead != NULL) { \
632 DQ(q, stp->sd_qhead, stp->sd_qtail, q_link); \
633 ASSERT(stp->sd_nqueues > 0); \
634 stp->sd_nqueues--; \
635 ASSERT(!(q->q_flag & QINSERVICE)); \
636 mutex_exit(&stp->sd_qlock); \
637 queue_service(q); \
638 mutex_enter(&stp->sd_qlock); \
640 ASSERT(stp->sd_nqueues == 0); \
641 ASSERT((stp->sd_qhead == NULL) && (stp->sd_qtail == NULL)); \
645 * Constructor/destructor routines for the stream head cache
647 /* ARGSUSED */
648 static int
649 stream_head_constructor(void *buf, void *cdrarg, int kmflags)
651 stdata_t *stp = buf;
653 mutex_init(&stp->sd_lock, NULL, MUTEX_DEFAULT, NULL);
654 mutex_init(&stp->sd_reflock, NULL, MUTEX_DEFAULT, NULL);
655 mutex_init(&stp->sd_qlock, NULL, MUTEX_DEFAULT, NULL);
656 cv_init(&stp->sd_monitor, NULL, CV_DEFAULT, NULL);
657 cv_init(&stp->sd_iocmonitor, NULL, CV_DEFAULT, NULL);
658 cv_init(&stp->sd_refmonitor, NULL, CV_DEFAULT, NULL);
659 cv_init(&stp->sd_qcv, NULL, CV_DEFAULT, NULL);
660 cv_init(&stp->sd_zcopy_wait, NULL, CV_DEFAULT, NULL);
661 stp->sd_wrq = NULL;
663 return (0);
666 /* ARGSUSED */
667 static void
668 stream_head_destructor(void *buf, void *cdrarg)
670 stdata_t *stp = buf;
672 mutex_destroy(&stp->sd_lock);
673 mutex_destroy(&stp->sd_reflock);
674 mutex_destroy(&stp->sd_qlock);
675 cv_destroy(&stp->sd_monitor);
676 cv_destroy(&stp->sd_iocmonitor);
677 cv_destroy(&stp->sd_refmonitor);
678 cv_destroy(&stp->sd_qcv);
679 cv_destroy(&stp->sd_zcopy_wait);
683 * Constructor/destructor routines for the queue cache
685 /* ARGSUSED */
686 static int
687 queue_constructor(void *buf, void *cdrarg, int kmflags)
689 queinfo_t *qip = buf;
690 queue_t *qp = &qip->qu_rqueue;
691 queue_t *wqp = &qip->qu_wqueue;
692 syncq_t *sq = &qip->qu_syncq;
694 qp->q_first = NULL;
695 qp->q_link = NULL;
696 qp->q_count = 0;
697 qp->q_mblkcnt = 0;
698 qp->q_sqhead = NULL;
699 qp->q_sqtail = NULL;
700 qp->q_sqnext = NULL;
701 qp->q_sqprev = NULL;
702 qp->q_sqflags = 0;
703 qp->q_rwcnt = 0;
704 qp->q_spri = 0;
706 mutex_init(QLOCK(qp), NULL, MUTEX_DEFAULT, NULL);
707 cv_init(&qp->q_wait, NULL, CV_DEFAULT, NULL);
709 wqp->q_first = NULL;
710 wqp->q_link = NULL;
711 wqp->q_count = 0;
712 wqp->q_mblkcnt = 0;
713 wqp->q_sqhead = NULL;
714 wqp->q_sqtail = NULL;
715 wqp->q_sqnext = NULL;
716 wqp->q_sqprev = NULL;
717 wqp->q_sqflags = 0;
718 wqp->q_rwcnt = 0;
719 wqp->q_spri = 0;
721 mutex_init(QLOCK(wqp), NULL, MUTEX_DEFAULT, NULL);
722 cv_init(&wqp->q_wait, NULL, CV_DEFAULT, NULL);
724 sq->sq_head = NULL;
725 sq->sq_tail = NULL;
726 sq->sq_evhead = NULL;
727 sq->sq_evtail = NULL;
728 sq->sq_callbpend = NULL;
729 sq->sq_outer = NULL;
730 sq->sq_onext = NULL;
731 sq->sq_oprev = NULL;
732 sq->sq_next = NULL;
733 sq->sq_svcflags = 0;
734 sq->sq_servcount = 0;
735 sq->sq_needexcl = 0;
736 sq->sq_nqueues = 0;
737 sq->sq_pri = 0;
739 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL);
740 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL);
741 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL);
743 return (0);
746 /* ARGSUSED */
747 static void
748 queue_destructor(void *buf, void *cdrarg)
750 queinfo_t *qip = buf;
751 queue_t *qp = &qip->qu_rqueue;
752 queue_t *wqp = &qip->qu_wqueue;
753 syncq_t *sq = &qip->qu_syncq;
755 ASSERT(qp->q_sqhead == NULL);
756 ASSERT(wqp->q_sqhead == NULL);
757 ASSERT(qp->q_sqnext == NULL);
758 ASSERT(wqp->q_sqnext == NULL);
759 ASSERT(qp->q_rwcnt == 0);
760 ASSERT(wqp->q_rwcnt == 0);
762 mutex_destroy(&qp->q_lock);
763 cv_destroy(&qp->q_wait);
765 mutex_destroy(&wqp->q_lock);
766 cv_destroy(&wqp->q_wait);
768 mutex_destroy(&sq->sq_lock);
769 cv_destroy(&sq->sq_wait);
770 cv_destroy(&sq->sq_exitwait);
774 * Constructor/destructor routines for the syncq cache
776 /* ARGSUSED */
777 static int
778 syncq_constructor(void *buf, void *cdrarg, int kmflags)
780 syncq_t *sq = buf;
782 bzero(buf, sizeof (syncq_t));
784 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL);
785 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL);
786 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL);
788 return (0);
791 /* ARGSUSED */
792 static void
793 syncq_destructor(void *buf, void *cdrarg)
795 syncq_t *sq = buf;
797 ASSERT(sq->sq_head == NULL);
798 ASSERT(sq->sq_tail == NULL);
799 ASSERT(sq->sq_evhead == NULL);
800 ASSERT(sq->sq_evtail == NULL);
801 ASSERT(sq->sq_callbpend == NULL);
802 ASSERT(sq->sq_callbflags == 0);
803 ASSERT(sq->sq_outer == NULL);
804 ASSERT(sq->sq_onext == NULL);
805 ASSERT(sq->sq_oprev == NULL);
806 ASSERT(sq->sq_next == NULL);
807 ASSERT(sq->sq_needexcl == 0);
808 ASSERT(sq->sq_svcflags == 0);
809 ASSERT(sq->sq_servcount == 0);
810 ASSERT(sq->sq_nqueues == 0);
811 ASSERT(sq->sq_pri == 0);
812 ASSERT(sq->sq_count == 0);
813 ASSERT(sq->sq_rmqcount == 0);
814 ASSERT(sq->sq_cancelid == 0);
815 ASSERT(sq->sq_ciputctrl == NULL);
816 ASSERT(sq->sq_nciputctrl == 0);
817 ASSERT(sq->sq_type == 0);
818 ASSERT(sq->sq_flags == 0);
820 mutex_destroy(&sq->sq_lock);
821 cv_destroy(&sq->sq_wait);
822 cv_destroy(&sq->sq_exitwait);
825 /* ARGSUSED */
826 static int
827 ciputctrl_constructor(void *buf, void *cdrarg, int kmflags)
829 ciputctrl_t *cip = buf;
830 int i;
832 for (i = 0; i < n_ciputctrl; i++) {
833 cip[i].ciputctrl_count = SQ_FASTPUT;
834 mutex_init(&cip[i].ciputctrl_lock, NULL, MUTEX_DEFAULT, NULL);
837 return (0);
840 /* ARGSUSED */
841 static void
842 ciputctrl_destructor(void *buf, void *cdrarg)
844 ciputctrl_t *cip = buf;
845 int i;
847 for (i = 0; i < n_ciputctrl; i++) {
848 ASSERT(cip[i].ciputctrl_count & SQ_FASTPUT);
849 mutex_destroy(&cip[i].ciputctrl_lock);
854 * Init routine run from main at boot time.
856 void
857 strinit(void)
859 int ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus);
861 stream_head_cache = kmem_cache_create("stream_head_cache",
862 sizeof (stdata_t), 0,
863 stream_head_constructor, stream_head_destructor, NULL,
864 NULL, NULL, 0);
866 queue_cache = kmem_cache_create("queue_cache", sizeof (queinfo_t), 0,
867 queue_constructor, queue_destructor, NULL, NULL, NULL, 0);
869 syncq_cache = kmem_cache_create("syncq_cache", sizeof (syncq_t), 0,
870 syncq_constructor, syncq_destructor, NULL, NULL, NULL, 0);
872 qband_cache = kmem_cache_create("qband_cache",
873 sizeof (qband_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
875 linkinfo_cache = kmem_cache_create("linkinfo_cache",
876 sizeof (linkinfo_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
878 n_ciputctrl = ncpus;
879 n_ciputctrl = 1 << highbit(n_ciputctrl - 1);
880 ASSERT(n_ciputctrl >= 1);
881 n_ciputctrl = MIN(n_ciputctrl, max_n_ciputctrl);
882 if (n_ciputctrl >= min_n_ciputctrl) {
883 ciputctrl_cache = kmem_cache_create("ciputctrl_cache",
884 sizeof (ciputctrl_t) * n_ciputctrl,
885 sizeof (ciputctrl_t), ciputctrl_constructor,
886 ciputctrl_destructor, NULL, NULL, NULL, 0);
889 streams_taskq = system_taskq;
891 if (streams_taskq == NULL)
892 panic("strinit: no memory for streams taskq!");
894 bc_bkgrnd_thread = thread_create(NULL, 0,
895 streams_bufcall_service, NULL, 0, &p0, TS_RUN, streams_lopri);
897 streams_qbkgrnd_thread = thread_create(NULL, 0,
898 streams_qbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri);
900 streams_sqbkgrnd_thread = thread_create(NULL, 0,
901 streams_sqbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri);
904 * Create STREAMS kstats.
906 str_kstat = kstat_create("streams", 0, "strstat",
907 "net", KSTAT_TYPE_NAMED,
908 sizeof (str_statistics) / sizeof (kstat_named_t),
909 KSTAT_FLAG_VIRTUAL);
911 if (str_kstat != NULL) {
912 str_kstat->ks_data = &str_statistics;
913 kstat_install(str_kstat);
917 * TPI support routine initialisation.
919 tpi_init();
922 * Handle to have autopush and persistent link information per
923 * zone.
924 * Note: uses shutdown hook instead of destroy hook so that the
925 * persistent links can be torn down before the destroy hooks
926 * in the TCP/IP stack are called.
928 netstack_register(NS_STR, str_stack_init, str_stack_shutdown,
929 str_stack_fini);
932 void
933 str_sendsig(vnode_t *vp, int event, uchar_t band, int error)
935 struct stdata *stp;
937 ASSERT(vp->v_stream);
938 stp = vp->v_stream;
939 /* Have to hold sd_lock to prevent siglist from changing */
940 mutex_enter(&stp->sd_lock);
941 if (stp->sd_sigflags & event)
942 strsendsig(stp->sd_siglist, event, band, error);
943 mutex_exit(&stp->sd_lock);
947 * Send the "sevent" set of signals to a process.
948 * This might send more than one signal if the process is registered
949 * for multiple events. The caller should pass in an sevent that only
950 * includes the events for which the process has registered.
952 static void
953 dosendsig(proc_t *proc, int events, int sevent, k_siginfo_t *info,
954 uchar_t band, int error)
956 ASSERT(MUTEX_HELD(&proc->p_lock));
958 info->si_band = 0;
959 info->si_errno = 0;
961 if (sevent & S_ERROR) {
962 sevent &= ~S_ERROR;
963 info->si_code = POLL_ERR;
964 info->si_errno = error;
965 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
966 "strsendsig:proc %p info %p", proc, info);
967 sigaddq(proc, NULL, info, KM_NOSLEEP);
968 info->si_errno = 0;
970 if (sevent & S_HANGUP) {
971 sevent &= ~S_HANGUP;
972 info->si_code = POLL_HUP;
973 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
974 "strsendsig:proc %p info %p", proc, info);
975 sigaddq(proc, NULL, info, KM_NOSLEEP);
977 if (sevent & S_HIPRI) {
978 sevent &= ~S_HIPRI;
979 info->si_code = POLL_PRI;
980 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
981 "strsendsig:proc %p info %p", proc, info);
982 sigaddq(proc, NULL, info, KM_NOSLEEP);
984 if (sevent & S_RDBAND) {
985 sevent &= ~S_RDBAND;
986 if (events & S_BANDURG)
987 sigtoproc(proc, NULL, SIGURG);
988 else
989 sigtoproc(proc, NULL, SIGPOLL);
991 if (sevent & S_WRBAND) {
992 sevent &= ~S_WRBAND;
993 sigtoproc(proc, NULL, SIGPOLL);
995 if (sevent & S_INPUT) {
996 sevent &= ~S_INPUT;
997 info->si_code = POLL_IN;
998 info->si_band = band;
999 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
1000 "strsendsig:proc %p info %p", proc, info);
1001 sigaddq(proc, NULL, info, KM_NOSLEEP);
1002 info->si_band = 0;
1004 if (sevent & S_OUTPUT) {
1005 sevent &= ~S_OUTPUT;
1006 info->si_code = POLL_OUT;
1007 info->si_band = band;
1008 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
1009 "strsendsig:proc %p info %p", proc, info);
1010 sigaddq(proc, NULL, info, KM_NOSLEEP);
1011 info->si_band = 0;
1013 if (sevent & S_MSG) {
1014 sevent &= ~S_MSG;
1015 info->si_code = POLL_MSG;
1016 info->si_band = band;
1017 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
1018 "strsendsig:proc %p info %p", proc, info);
1019 sigaddq(proc, NULL, info, KM_NOSLEEP);
1020 info->si_band = 0;
1022 if (sevent & S_RDNORM) {
1023 sevent &= ~S_RDNORM;
1024 sigtoproc(proc, NULL, SIGPOLL);
1026 if (sevent != 0) {
1027 panic("strsendsig: unknown event(s) %x", sevent);
1032 * Send SIGPOLL/SIGURG signal to all processes and process groups
1033 * registered on the given signal list that want a signal for at
1034 * least one of the specified events.
1036 * Must be called with exclusive access to siglist (caller holding sd_lock).
1038 * strioctl(I_SETSIG/I_ESETSIG) will only change siglist when holding
1039 * sd_lock and the ioctl code maintains a PID_HOLD on the pid structure
1040 * while it is in the siglist.
1042 * For performance reasons (MP scalability) the code drops pidlock
1043 * when sending signals to a single process.
1044 * When sending to a process group the code holds
1045 * pidlock to prevent the membership in the process group from changing
1046 * while walking the p_pglink list.
1048 void
1049 strsendsig(strsig_t *siglist, int event, uchar_t band, int error)
1051 strsig_t *ssp;
1052 k_siginfo_t info;
1053 struct pid *pidp;
1054 proc_t *proc;
1056 info.si_signo = SIGPOLL;
1057 info.si_errno = 0;
1058 for (ssp = siglist; ssp; ssp = ssp->ss_next) {
1059 int sevent;
1061 sevent = ssp->ss_events & event;
1062 if (sevent == 0)
1063 continue;
1065 if ((pidp = ssp->ss_pidp) == NULL) {
1066 /* pid was released but still on event list */
1067 continue;
1071 if (ssp->ss_pid > 0) {
1073 * XXX This unfortunately still generates
1074 * a signal when a fd is closed but
1075 * the proc is active.
1077 ASSERT(ssp->ss_pid == pidp->pid_id);
1079 mutex_enter(&pidlock);
1080 proc = prfind_zone(pidp->pid_id, ALL_ZONES);
1081 if (proc == NULL) {
1082 mutex_exit(&pidlock);
1083 continue;
1085 mutex_enter(&proc->p_lock);
1086 mutex_exit(&pidlock);
1087 dosendsig(proc, ssp->ss_events, sevent, &info,
1088 band, error);
1089 mutex_exit(&proc->p_lock);
1090 } else {
1092 * Send to process group. Hold pidlock across
1093 * calls to dosendsig().
1095 pid_t pgrp = -ssp->ss_pid;
1097 mutex_enter(&pidlock);
1098 proc = pgfind_zone(pgrp, ALL_ZONES);
1099 while (proc != NULL) {
1100 mutex_enter(&proc->p_lock);
1101 dosendsig(proc, ssp->ss_events, sevent,
1102 &info, band, error);
1103 mutex_exit(&proc->p_lock);
1104 proc = proc->p_pglink;
1106 mutex_exit(&pidlock);
1112 * Attach a stream device or module.
1113 * qp is a read queue; the new queue goes in so its next
1114 * read ptr is the argument, and the write queue corresponding
1115 * to the argument points to this queue. Return 0 on success,
1116 * or a non-zero errno on failure.
1119 qattach(queue_t *qp, dev_t *devp, int oflag, cred_t *crp, fmodsw_impl_t *fp,
1120 boolean_t is_insert)
1122 major_t major;
1123 cdevsw_impl_t *dp;
1124 struct streamtab *str;
1125 queue_t *rq;
1126 queue_t *wrq;
1127 uint32_t qflag;
1128 uint32_t sqtype;
1129 perdm_t *dmp;
1130 int error;
1131 int sflag;
1133 rq = allocq();
1134 wrq = _WR(rq);
1135 STREAM(rq) = STREAM(wrq) = STREAM(qp);
1137 if (fp != NULL) {
1138 str = fp->f_str;
1139 qflag = fp->f_qflag;
1140 sqtype = fp->f_sqtype;
1141 dmp = fp->f_dmp;
1142 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL);
1143 sflag = MODOPEN;
1146 * stash away a pointer to the module structure so we can
1147 * unref it in qdetach.
1149 rq->q_fp = fp;
1150 } else {
1151 ASSERT(!is_insert);
1153 major = getmajor(*devp);
1154 dp = &devimpl[major];
1156 str = dp->d_str;
1157 ASSERT(str == STREAMSTAB(major));
1159 qflag = dp->d_qflag;
1160 ASSERT(qflag & QISDRV);
1161 sqtype = dp->d_sqtype;
1163 /* create perdm_t if needed */
1164 if (NEED_DM(dp->d_dmp, qflag))
1165 dp->d_dmp = hold_dm(str, qflag, sqtype);
1167 dmp = dp->d_dmp;
1168 sflag = 0;
1171 TRACE_2(TR_FAC_STREAMS_FR, TR_QATTACH_FLAGS,
1172 "qattach:qflag == %X(%X)", qflag, *devp);
1174 /* setq might sleep in allocator - avoid holding locks. */
1175 setq(rq, str->st_rdinit, str->st_wrinit, dmp, qflag, sqtype, B_FALSE);
1178 * Before calling the module's open routine, set up the q_next
1179 * pointer for inserting a module in the middle of a stream.
1181 * Note that we can always set _QINSERTING and set up q_next
1182 * pointer for both inserting and pushing a module. Then there
1183 * is no need for the is_insert parameter. In insertq(), called
1184 * by qprocson(), assume that q_next of the new module always points
1185 * to the correct queue and use it for insertion. Everything should
1186 * work out fine. But in the first release of _I_INSERT, we
1187 * distinguish between inserting and pushing to make sure that
1188 * pushing a module follows the same code path as before.
1190 if (is_insert) {
1191 rq->q_flag |= _QINSERTING;
1192 rq->q_next = qp;
1196 * If there is an outer perimeter get exclusive access during
1197 * the open procedure. Bump up the reference count on the queue.
1199 entersq(rq->q_syncq, SQ_OPENCLOSE);
1200 error = (*rq->q_qinfo->qi_qopen)(rq, devp, oflag, sflag, crp);
1201 if (error != 0)
1202 goto failed;
1203 leavesq(rq->q_syncq, SQ_OPENCLOSE);
1204 ASSERT(qprocsareon(rq));
1205 return (0);
1207 failed:
1208 rq->q_flag &= ~_QINSERTING;
1209 if (backq(wrq) != NULL && backq(wrq)->q_next == wrq)
1210 qprocsoff(rq);
1211 leavesq(rq->q_syncq, SQ_OPENCLOSE);
1212 rq->q_next = wrq->q_next = NULL;
1213 qdetach(rq, 0, 0, crp, B_FALSE);
1214 return (error);
1218 * Handle second open of stream. For modules, set the
1219 * last argument to MODOPEN and do not pass any open flags.
1220 * Ignore dummydev since this is not the first open.
1223 qreopen(queue_t *qp, dev_t *devp, int flag, cred_t *crp)
1225 int error;
1226 dev_t dummydev;
1227 queue_t *wqp = _WR(qp);
1229 ASSERT(qp->q_flag & QREADR);
1230 entersq(qp->q_syncq, SQ_OPENCLOSE);
1232 dummydev = *devp;
1233 if (error = ((*qp->q_qinfo->qi_qopen)(qp, &dummydev,
1234 (wqp->q_next ? 0 : flag), (wqp->q_next ? MODOPEN : 0), crp))) {
1235 leavesq(qp->q_syncq, SQ_OPENCLOSE);
1236 mutex_enter(&STREAM(qp)->sd_lock);
1237 qp->q_stream->sd_flag |= STREOPENFAIL;
1238 mutex_exit(&STREAM(qp)->sd_lock);
1239 return (error);
1241 leavesq(qp->q_syncq, SQ_OPENCLOSE);
1244 * successful open should have done qprocson()
1246 ASSERT(qprocsareon(_RD(qp)));
1247 return (0);
1251 * Detach a stream module or device.
1252 * If clmode == 1 then the module or driver was opened and its
1253 * close routine must be called. If clmode == 0, the module
1254 * or driver was never opened or the open failed, and so its close
1255 * should not be called.
1257 void
1258 qdetach(queue_t *qp, int clmode, int flag, cred_t *crp, boolean_t is_remove)
1260 queue_t *wqp = _WR(qp);
1261 ASSERT(STREAM(qp)->sd_flag & (STRCLOSE|STWOPEN|STRPLUMB));
1263 if (STREAM_NEEDSERVICE(STREAM(qp)))
1264 stream_runservice(STREAM(qp));
1266 if (clmode) {
1268 * Make sure that all the messages on the write side syncq are
1269 * processed and nothing is left. Since we are closing, no new
1270 * messages may appear there.
1272 wait_q_syncq(wqp);
1274 entersq(qp->q_syncq, SQ_OPENCLOSE);
1275 if (is_remove) {
1276 mutex_enter(QLOCK(qp));
1277 qp->q_flag |= _QREMOVING;
1278 mutex_exit(QLOCK(qp));
1280 (*qp->q_qinfo->qi_qclose)(qp, flag, crp);
1282 * Check that qprocsoff() was actually called.
1284 ASSERT((qp->q_flag & QWCLOSE) && (wqp->q_flag & QWCLOSE));
1286 leavesq(qp->q_syncq, SQ_OPENCLOSE);
1287 } else {
1288 disable_svc(qp);
1292 * Allow any threads blocked in entersq to proceed and discover
1293 * the QWCLOSE is set.
1294 * Note: This assumes that all users of entersq check QWCLOSE.
1295 * Currently runservice is the only entersq that can happen
1296 * after removeq has finished.
1297 * Removeq will have discarded all messages destined to the closing
1298 * pair of queues from the syncq.
1299 * NOTE: Calling a function inside an assert is unconventional.
1300 * However, it does not cause any problem since flush_syncq() does
1301 * not change any state except when it returns non-zero i.e.
1302 * when the assert will trigger.
1304 ASSERT(flush_syncq(qp->q_syncq, qp) == 0);
1305 ASSERT(flush_syncq(wqp->q_syncq, wqp) == 0);
1306 ASSERT((qp->q_flag & QPERMOD) ||
1307 ((qp->q_syncq->sq_head == NULL) &&
1308 (wqp->q_syncq->sq_head == NULL)));
1310 /* release any fmodsw_impl_t structure held on behalf of the queue */
1311 ASSERT(qp->q_fp != NULL || qp->q_flag & QISDRV);
1312 if (qp->q_fp != NULL)
1313 fmodsw_rele(qp->q_fp);
1315 /* freeq removes us from the outer perimeter if any */
1316 freeq(qp);
1319 /* Prevent service procedures from being called */
1320 void
1321 disable_svc(queue_t *qp)
1323 queue_t *wqp = _WR(qp);
1325 ASSERT(qp->q_flag & QREADR);
1326 mutex_enter(QLOCK(qp));
1327 qp->q_flag |= QWCLOSE;
1328 mutex_exit(QLOCK(qp));
1329 mutex_enter(QLOCK(wqp));
1330 wqp->q_flag |= QWCLOSE;
1331 mutex_exit(QLOCK(wqp));
1334 /* Allow service procedures to be called again */
1335 void
1336 enable_svc(queue_t *qp)
1338 queue_t *wqp = _WR(qp);
1340 ASSERT(qp->q_flag & QREADR);
1341 mutex_enter(QLOCK(qp));
1342 qp->q_flag &= ~QWCLOSE;
1343 mutex_exit(QLOCK(qp));
1344 mutex_enter(QLOCK(wqp));
1345 wqp->q_flag &= ~QWCLOSE;
1346 mutex_exit(QLOCK(wqp));
1350 * Remove queue from qhead/qtail if it is enabled.
1351 * Only reset QENAB if the queue was removed from the runlist.
1352 * A queue goes through 3 stages:
1353 * It is on the service list and QENAB is set.
1354 * It is removed from the service list but QENAB is still set.
1355 * QENAB gets changed to QINSERVICE.
1356 * QINSERVICE is reset (when the service procedure is done)
1357 * Thus we can not reset QENAB unless we actually removed it from the service
1358 * queue.
1360 void
1361 remove_runlist(queue_t *qp)
1363 if (qp->q_flag & QENAB && qhead != NULL) {
1364 queue_t *q_chase;
1365 queue_t *q_curr;
1366 int removed;
1368 mutex_enter(&service_queue);
1369 RMQ(qp, qhead, qtail, q_link, q_chase, q_curr, removed);
1370 mutex_exit(&service_queue);
1371 if (removed) {
1372 STRSTAT(qremoved);
1373 qp->q_flag &= ~QENAB;
1380 * Wait for any pending service processing to complete.
1381 * The removal of queues from the runlist is not atomic with the
1382 * clearing of the QENABLED flag and setting the INSERVICE flag.
1383 * consequently it is possible for remove_runlist in strclose
1384 * to not find the queue on the runlist but for it to be QENABLED
1385 * and not yet INSERVICE -> hence wait_svc needs to check QENABLED
1386 * as well as INSERVICE.
1388 void
1389 wait_svc(queue_t *qp)
1391 queue_t *wqp = _WR(qp);
1393 ASSERT(qp->q_flag & QREADR);
1396 * Try to remove queues from qhead/qtail list.
1398 if (qhead != NULL) {
1399 remove_runlist(qp);
1400 remove_runlist(wqp);
1403 * Wait till the syncqs associated with the queue disappear from the
1404 * background processing list.
1405 * This only needs to be done for non-PERMOD perimeters since
1406 * for PERMOD perimeters the syncq may be shared and will only be freed
1407 * when the last module/driver is unloaded.
1408 * If for PERMOD perimeters queue was on the syncq list, removeq()
1409 * should call propagate_syncq() or drain_syncq() for it. Both of these
1410 * functions remove the queue from its syncq list, so sqthread will not
1411 * try to access the queue.
1413 if (!(qp->q_flag & QPERMOD)) {
1414 syncq_t *rsq = qp->q_syncq;
1415 syncq_t *wsq = wqp->q_syncq;
1418 * Disable rsq and wsq and wait for any background processing of
1419 * syncq to complete.
1421 wait_sq_svc(rsq);
1422 if (wsq != rsq)
1423 wait_sq_svc(wsq);
1426 mutex_enter(QLOCK(qp));
1427 while (qp->q_flag & (QINSERVICE|QENAB))
1428 cv_wait(&qp->q_wait, QLOCK(qp));
1429 mutex_exit(QLOCK(qp));
1430 mutex_enter(QLOCK(wqp));
1431 while (wqp->q_flag & (QINSERVICE|QENAB))
1432 cv_wait(&wqp->q_wait, QLOCK(wqp));
1433 mutex_exit(QLOCK(wqp));
1437 * Put ioctl data from userland buffer `arg' into the mblk chain `bp'.
1438 * `flag' must always contain either K_TO_K or U_TO_K; STR_NOSIG may
1439 * also be set, and is passed through to allocb_cred_wait().
1441 * Returns errno on failure, zero on success.
1444 putiocd(mblk_t *bp, char *arg, int flag, cred_t *cr)
1446 mblk_t *tmp;
1447 ssize_t count;
1448 int error = 0;
1450 ASSERT((flag & (U_TO_K | K_TO_K)) == U_TO_K ||
1451 (flag & (U_TO_K | K_TO_K)) == K_TO_K);
1453 if (bp->b_datap->db_type == M_IOCTL) {
1454 count = ((struct iocblk *)bp->b_rptr)->ioc_count;
1455 } else {
1456 ASSERT(bp->b_datap->db_type == M_COPYIN);
1457 count = ((struct copyreq *)bp->b_rptr)->cq_size;
1460 * strdoioctl validates ioc_count, so if this assert fails it
1461 * cannot be due to user error.
1463 ASSERT(count >= 0);
1465 if ((tmp = allocb_cred_wait(count, (flag & STR_NOSIG), &error, cr,
1466 curproc->p_pid)) == NULL) {
1467 return (error);
1469 error = strcopyin(arg, tmp->b_wptr, count, flag & (U_TO_K|K_TO_K));
1470 if (error != 0) {
1471 freeb(tmp);
1472 return (error);
1474 DB_CPID(tmp) = curproc->p_pid;
1475 tmp->b_wptr += count;
1476 bp->b_cont = tmp;
1478 return (0);
1482 * Copy ioctl data to user-land. Return non-zero errno on failure,
1483 * 0 for success.
1486 getiocd(mblk_t *bp, char *arg, int copymode)
1488 ssize_t count;
1489 size_t n;
1490 int error;
1492 if (bp->b_datap->db_type == M_IOCACK)
1493 count = ((struct iocblk *)bp->b_rptr)->ioc_count;
1494 else {
1495 ASSERT(bp->b_datap->db_type == M_COPYOUT);
1496 count = ((struct copyreq *)bp->b_rptr)->cq_size;
1498 ASSERT(count >= 0);
1500 for (bp = bp->b_cont; bp && count;
1501 count -= n, bp = bp->b_cont, arg += n) {
1502 n = MIN(count, bp->b_wptr - bp->b_rptr);
1503 error = strcopyout(bp->b_rptr, arg, n, copymode);
1504 if (error)
1505 return (error);
1507 ASSERT(count == 0);
1508 return (0);
1512 * Allocate a linkinfo entry given the write queue of the
1513 * bottom module of the top stream and the write queue of the
1514 * stream head of the bottom stream.
1516 linkinfo_t *
1517 alloclink(queue_t *qup, queue_t *qdown, file_t *fpdown)
1519 linkinfo_t *linkp;
1521 linkp = kmem_cache_alloc(linkinfo_cache, KM_SLEEP);
1523 linkp->li_lblk.l_qtop = qup;
1524 linkp->li_lblk.l_qbot = qdown;
1525 linkp->li_fpdown = fpdown;
1527 mutex_enter(&strresources);
1528 linkp->li_next = linkinfo_list;
1529 linkp->li_prev = NULL;
1530 if (linkp->li_next)
1531 linkp->li_next->li_prev = linkp;
1532 linkinfo_list = linkp;
1533 linkp->li_lblk.l_index = ++lnk_id;
1534 ASSERT(lnk_id != 0); /* this should never wrap in practice */
1535 mutex_exit(&strresources);
1537 return (linkp);
1541 * Free a linkinfo entry.
1543 void
1544 lbfree(linkinfo_t *linkp)
1546 mutex_enter(&strresources);
1547 if (linkp->li_next)
1548 linkp->li_next->li_prev = linkp->li_prev;
1549 if (linkp->li_prev)
1550 linkp->li_prev->li_next = linkp->li_next;
1551 else
1552 linkinfo_list = linkp->li_next;
1553 mutex_exit(&strresources);
1555 kmem_cache_free(linkinfo_cache, linkp);
1559 * Check for a potential linking cycle.
1560 * Return 1 if a link will result in a cycle,
1561 * and 0 otherwise.
1564 linkcycle(stdata_t *upstp, stdata_t *lostp, str_stack_t *ss)
1566 struct mux_node *np;
1567 struct mux_edge *ep;
1568 int i;
1569 major_t lomaj;
1570 major_t upmaj;
1572 * if the lower stream is a pipe/FIFO, return, since link
1573 * cycles can not happen on pipes/FIFOs
1575 if (lostp->sd_vnode->v_type == VFIFO)
1576 return (0);
1578 for (i = 0; i < ss->ss_devcnt; i++) {
1579 np = &ss->ss_mux_nodes[i];
1580 MUX_CLEAR(np);
1582 lomaj = getmajor(lostp->sd_vnode->v_rdev);
1583 upmaj = getmajor(upstp->sd_vnode->v_rdev);
1584 np = &ss->ss_mux_nodes[lomaj];
1585 for (;;) {
1586 if (!MUX_DIDVISIT(np)) {
1587 if (np->mn_imaj == upmaj)
1588 return (1);
1589 if (np->mn_outp == NULL) {
1590 MUX_VISIT(np);
1591 if (np->mn_originp == NULL)
1592 return (0);
1593 np = np->mn_originp;
1594 continue;
1596 MUX_VISIT(np);
1597 np->mn_startp = np->mn_outp;
1598 } else {
1599 if (np->mn_startp == NULL) {
1600 if (np->mn_originp == NULL)
1601 return (0);
1602 else {
1603 np = np->mn_originp;
1604 continue;
1608 * If ep->me_nodep is a FIFO (me_nodep == NULL),
1609 * ignore the edge and move on. ep->me_nodep gets
1610 * set to NULL in mux_addedge() if it is a FIFO.
1613 ep = np->mn_startp;
1614 np->mn_startp = ep->me_nextp;
1615 if (ep->me_nodep == NULL)
1616 continue;
1617 ep->me_nodep->mn_originp = np;
1618 np = ep->me_nodep;
1624 * Find linkinfo entry corresponding to the parameters.
1626 linkinfo_t *
1627 findlinks(stdata_t *stp, int index, int type, str_stack_t *ss)
1629 linkinfo_t *linkp;
1630 struct mux_edge *mep;
1631 struct mux_node *mnp;
1632 queue_t *qup;
1634 mutex_enter(&strresources);
1635 if ((type & LINKTYPEMASK) == LINKNORMAL) {
1636 qup = getendq(stp->sd_wrq);
1637 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) {
1638 if ((qup == linkp->li_lblk.l_qtop) &&
1639 (!index || (index == linkp->li_lblk.l_index))) {
1640 mutex_exit(&strresources);
1641 return (linkp);
1644 } else {
1645 ASSERT((type & LINKTYPEMASK) == LINKPERSIST);
1646 mnp = &ss->ss_mux_nodes[getmajor(stp->sd_vnode->v_rdev)];
1647 mep = mnp->mn_outp;
1648 while (mep) {
1649 if ((index == 0) || (index == mep->me_muxid))
1650 break;
1651 mep = mep->me_nextp;
1653 if (!mep) {
1654 mutex_exit(&strresources);
1655 return (NULL);
1657 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) {
1658 if ((!linkp->li_lblk.l_qtop) &&
1659 (mep->me_muxid == linkp->li_lblk.l_index)) {
1660 mutex_exit(&strresources);
1661 return (linkp);
1665 mutex_exit(&strresources);
1666 return (NULL);
1670 * Given a queue ptr, follow the chain of q_next pointers until you reach the
1671 * last queue on the chain and return it.
1673 queue_t *
1674 getendq(queue_t *q)
1676 ASSERT(q != NULL);
1677 while (_SAMESTR(q))
1678 q = q->q_next;
1679 return (q);
1683 * Wait for the syncq count to drop to zero.
1684 * sq could be either outer or inner.
1687 static void
1688 wait_syncq(syncq_t *sq)
1690 uint16_t count;
1692 mutex_enter(SQLOCK(sq));
1693 count = sq->sq_count;
1694 SQ_PUTLOCKS_ENTER(sq);
1695 SUM_SQ_PUTCOUNTS(sq, count);
1696 while (count != 0) {
1697 sq->sq_flags |= SQ_WANTWAKEUP;
1698 SQ_PUTLOCKS_EXIT(sq);
1699 cv_wait(&sq->sq_wait, SQLOCK(sq));
1700 count = sq->sq_count;
1701 SQ_PUTLOCKS_ENTER(sq);
1702 SUM_SQ_PUTCOUNTS(sq, count);
1704 SQ_PUTLOCKS_EXIT(sq);
1705 mutex_exit(SQLOCK(sq));
1709 * Wait while there are any messages for the queue in its syncq.
1711 static void
1712 wait_q_syncq(queue_t *q)
1714 if ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) {
1715 syncq_t *sq = q->q_syncq;
1717 mutex_enter(SQLOCK(sq));
1718 while ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) {
1719 sq->sq_flags |= SQ_WANTWAKEUP;
1720 cv_wait(&sq->sq_wait, SQLOCK(sq));
1722 mutex_exit(SQLOCK(sq));
1728 mlink_file(vnode_t *vp, int cmd, struct file *fpdown, cred_t *crp, int *rvalp,
1729 int lhlink)
1731 struct stdata *stp;
1732 struct strioctl strioc;
1733 struct linkinfo *linkp;
1734 struct stdata *stpdown;
1735 struct streamtab *str;
1736 queue_t *passq;
1737 syncq_t *passyncq;
1738 queue_t *rq;
1739 cdevsw_impl_t *dp;
1740 uint32_t qflag;
1741 uint32_t sqtype;
1742 perdm_t *dmp;
1743 int error = 0;
1744 netstack_t *ns;
1745 str_stack_t *ss;
1747 stp = vp->v_stream;
1748 TRACE_1(TR_FAC_STREAMS_FR,
1749 TR_I_LINK, "I_LINK/I_PLINK:stp %p", stp);
1751 * Test for invalid upper stream
1753 if (stp->sd_flag & STRHUP) {
1754 return (ENXIO);
1756 if (vp->v_type == VFIFO) {
1757 return (EINVAL);
1759 if (stp->sd_strtab == NULL) {
1760 return (EINVAL);
1762 if (!stp->sd_strtab->st_muxwinit) {
1763 return (EINVAL);
1765 if (fpdown == NULL) {
1766 return (EBADF);
1768 ns = netstack_find_by_cred(crp);
1769 ASSERT(ns != NULL);
1770 ss = ns->netstack_str;
1771 ASSERT(ss != NULL);
1773 if (getmajor(stp->sd_vnode->v_rdev) >= ss->ss_devcnt) {
1774 netstack_rele(ss->ss_netstack);
1775 return (EINVAL);
1777 mutex_enter(&muxifier);
1778 if (stp->sd_flag & STPLEX) {
1779 mutex_exit(&muxifier);
1780 netstack_rele(ss->ss_netstack);
1781 return (ENXIO);
1785 * Test for invalid lower stream.
1786 * The check for the v_type != VFIFO and having a major
1787 * number not >= devcnt is done to avoid problems with
1788 * adding mux_node entry past the end of mux_nodes[].
1789 * For FIFO's we don't add an entry so this isn't a
1790 * problem.
1792 if (((stpdown = fpdown->f_vnode->v_stream) == NULL) ||
1793 (stpdown == stp) || (stpdown->sd_flag &
1794 (STPLEX|STRHUP|STRDERR|STWRERR|IOCWAIT|STRPLUMB)) ||
1795 ((stpdown->sd_vnode->v_type != VFIFO) &&
1796 (getmajor(stpdown->sd_vnode->v_rdev) >= ss->ss_devcnt)) ||
1797 linkcycle(stp, stpdown, ss)) {
1798 mutex_exit(&muxifier);
1799 netstack_rele(ss->ss_netstack);
1800 return (EINVAL);
1802 TRACE_1(TR_FAC_STREAMS_FR,
1803 TR_STPDOWN, "stpdown:%p", stpdown);
1804 rq = getendq(stp->sd_wrq);
1805 if (cmd == I_PLINK)
1806 rq = NULL;
1808 linkp = alloclink(rq, stpdown->sd_wrq, fpdown);
1810 strioc.ic_cmd = cmd;
1811 strioc.ic_timout = INFTIM;
1812 strioc.ic_len = sizeof (struct linkblk);
1813 strioc.ic_dp = (char *)&linkp->li_lblk;
1816 * STRPLUMB protects plumbing changes and should be set before
1817 * link_addpassthru()/link_rempassthru() are called, so it is set here
1818 * and cleared in the end of mlink when passthru queue is removed.
1819 * Setting of STRPLUMB prevents reopens of the stream while passthru
1820 * queue is in-place (it is not a proper module and doesn't have open
1821 * entry point).
1823 * STPLEX prevents any threads from entering the stream from above. It
1824 * can't be set before the call to link_addpassthru() because putnext
1825 * from below may cause stream head I/O routines to be called and these
1826 * routines assert that STPLEX is not set. After link_addpassthru()
1827 * nothing may come from below since the pass queue syncq is blocked.
1828 * Note also that STPLEX should be cleared before the call to
1829 * link_rempassthru() since when messages start flowing to the stream
1830 * head (e.g. because of message propagation from the pass queue) stream
1831 * head I/O routines may be called with STPLEX flag set.
1833 * When STPLEX is set, nothing may come into the stream from above and
1834 * it is safe to do a setq which will change stream head. So, the
1835 * correct sequence of actions is:
1837 * 1) Set STRPLUMB
1838 * 2) Call link_addpassthru()
1839 * 3) Set STPLEX
1840 * 4) Call setq and update the stream state
1841 * 5) Clear STPLEX
1842 * 6) Call link_rempassthru()
1843 * 7) Clear STRPLUMB
1845 * The same sequence applies to munlink() code.
1847 mutex_enter(&stpdown->sd_lock);
1848 stpdown->sd_flag |= STRPLUMB;
1849 mutex_exit(&stpdown->sd_lock);
1851 * Add passthru queue below lower mux. This will block
1852 * syncqs of lower muxs read queue during I_LINK/I_UNLINK.
1854 passq = link_addpassthru(stpdown);
1856 mutex_enter(&stpdown->sd_lock);
1857 stpdown->sd_flag |= STPLEX;
1858 mutex_exit(&stpdown->sd_lock);
1860 rq = _RD(stpdown->sd_wrq);
1862 * There may be messages in the streamhead's syncq due to messages
1863 * that arrived before link_addpassthru() was done. To avoid
1864 * background processing of the syncq happening simultaneous with
1865 * setq processing, we disable the streamhead syncq and wait until
1866 * existing background thread finishes working on it.
1868 wait_sq_svc(rq->q_syncq);
1869 passyncq = passq->q_syncq;
1870 if (!(passyncq->sq_flags & SQ_BLOCKED))
1871 blocksq(passyncq, SQ_BLOCKED, 0);
1873 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE);
1874 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq));
1875 rq->q_ptr = _WR(rq)->q_ptr = NULL;
1877 /* setq might sleep in allocator - avoid holding locks. */
1878 /* Note: we are holding muxifier here. */
1880 str = stp->sd_strtab;
1881 dp = &devimpl[getmajor(vp->v_rdev)];
1882 ASSERT(dp->d_str == str);
1884 qflag = dp->d_qflag;
1885 sqtype = dp->d_sqtype;
1887 /* create perdm_t if needed */
1888 if (NEED_DM(dp->d_dmp, qflag))
1889 dp->d_dmp = hold_dm(str, qflag, sqtype);
1891 dmp = dp->d_dmp;
1893 setq(rq, str->st_muxrinit, str->st_muxwinit, dmp, qflag, sqtype,
1894 B_TRUE);
1897 * XXX Remove any "odd" messages from the queue.
1898 * Keep only M_DATA, M_PROTO, M_PCPROTO.
1900 error = strdoioctl(stp, &strioc, FNATIVE,
1901 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp);
1902 if (error != 0) {
1903 lbfree(linkp);
1905 if (!(passyncq->sq_flags & SQ_BLOCKED))
1906 blocksq(passyncq, SQ_BLOCKED, 0);
1908 * Restore the stream head queue and then remove
1909 * the passq. Turn off STPLEX before we turn on
1910 * the stream by removing the passq.
1912 rq->q_ptr = _WR(rq)->q_ptr = stpdown;
1913 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO,
1914 B_TRUE);
1916 mutex_enter(&stpdown->sd_lock);
1917 stpdown->sd_flag &= ~STPLEX;
1918 mutex_exit(&stpdown->sd_lock);
1920 link_rempassthru(passq);
1922 mutex_enter(&stpdown->sd_lock);
1923 stpdown->sd_flag &= ~STRPLUMB;
1924 /* Wakeup anyone waiting for STRPLUMB to clear. */
1925 cv_broadcast(&stpdown->sd_monitor);
1926 mutex_exit(&stpdown->sd_lock);
1928 mutex_exit(&muxifier);
1929 netstack_rele(ss->ss_netstack);
1930 return (error);
1932 mutex_enter(&fpdown->f_tlock);
1933 fpdown->f_count++;
1934 mutex_exit(&fpdown->f_tlock);
1937 * if we've made it here the linkage is all set up so we should also
1938 * set up the layered driver linkages
1941 ASSERT((cmd == I_LINK) || (cmd == I_PLINK));
1942 if (cmd == I_LINK) {
1943 ldi_mlink_fp(stp, fpdown, lhlink, LINKNORMAL);
1944 } else {
1945 ldi_mlink_fp(stp, fpdown, lhlink, LINKPERSIST);
1948 link_rempassthru(passq);
1950 mux_addedge(stp, stpdown, linkp->li_lblk.l_index, ss);
1953 * Mark the upper stream as having dependent links
1954 * so that strclose can clean it up.
1956 if (cmd == I_LINK) {
1957 mutex_enter(&stp->sd_lock);
1958 stp->sd_flag |= STRHASLINKS;
1959 mutex_exit(&stp->sd_lock);
1962 * Wake up any other processes that may have been
1963 * waiting on the lower stream. These will all
1964 * error out.
1966 mutex_enter(&stpdown->sd_lock);
1967 /* The passthru module is removed so we may release STRPLUMB */
1968 stpdown->sd_flag &= ~STRPLUMB;
1969 cv_broadcast(&rq->q_wait);
1970 cv_broadcast(&_WR(rq)->q_wait);
1971 cv_broadcast(&stpdown->sd_monitor);
1972 mutex_exit(&stpdown->sd_lock);
1973 mutex_exit(&muxifier);
1974 *rvalp = linkp->li_lblk.l_index;
1975 netstack_rele(ss->ss_netstack);
1976 return (0);
1980 mlink(vnode_t *vp, int cmd, int arg, cred_t *crp, int *rvalp, int lhlink)
1982 int ret;
1983 struct file *fpdown;
1985 fpdown = getf(arg);
1986 ret = mlink_file(vp, cmd, fpdown, crp, rvalp, lhlink);
1987 if (fpdown != NULL)
1988 releasef(arg);
1989 return (ret);
1993 * Unlink a multiplexor link. Stp is the controlling stream for the
1994 * link, and linkp points to the link's entry in the linkinfo list.
1995 * The muxifier lock must be held on entry and is dropped on exit.
1997 * NOTE : Currently it is assumed that mux would process all the messages
1998 * sitting on it's queue before ACKing the UNLINK. It is the responsibility
1999 * of the mux to handle all the messages that arrive before UNLINK.
2000 * If the mux has to send down messages on its lower stream before
2001 * ACKing I_UNLINK, then it *should* know to handle messages even
2002 * after the UNLINK is acked (actually it should be able to handle till we
2003 * re-block the read side of the pass queue here). If the mux does not
2004 * open up the lower stream, any messages that arrive during UNLINK
2005 * will be put in the stream head. In the case of lower stream opening
2006 * up, some messages might land in the stream head depending on when
2007 * the message arrived and when the read side of the pass queue was
2008 * re-blocked.
2011 munlink(stdata_t *stp, linkinfo_t *linkp, int flag, cred_t *crp, int *rvalp,
2012 str_stack_t *ss)
2014 struct strioctl strioc;
2015 struct stdata *stpdown;
2016 queue_t *rq, *wrq;
2017 queue_t *passq;
2018 syncq_t *passyncq;
2019 int error = 0;
2020 file_t *fpdown;
2022 ASSERT(MUTEX_HELD(&muxifier));
2024 stpdown = linkp->li_fpdown->f_vnode->v_stream;
2027 * See the comment in mlink() concerning STRPLUMB/STPLEX flags.
2029 mutex_enter(&stpdown->sd_lock);
2030 stpdown->sd_flag |= STRPLUMB;
2031 mutex_exit(&stpdown->sd_lock);
2034 * Add passthru queue below lower mux. This will block
2035 * syncqs of lower muxs read queue during I_LINK/I_UNLINK.
2037 passq = link_addpassthru(stpdown);
2039 if ((flag & LINKTYPEMASK) == LINKNORMAL)
2040 strioc.ic_cmd = I_UNLINK;
2041 else
2042 strioc.ic_cmd = I_PUNLINK;
2043 strioc.ic_timout = INFTIM;
2044 strioc.ic_len = sizeof (struct linkblk);
2045 strioc.ic_dp = (char *)&linkp->li_lblk;
2047 error = strdoioctl(stp, &strioc, FNATIVE,
2048 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp);
2051 * If there was an error and this is not called via strclose,
2052 * return to the user. Otherwise, pretend there was no error
2053 * and close the link.
2055 if (error) {
2056 if (flag & LINKCLOSE) {
2057 cmn_err(CE_WARN, "KERNEL: munlink: could not perform "
2058 "unlink ioctl, closing anyway (%d)\n", error);
2059 } else {
2060 link_rempassthru(passq);
2061 mutex_enter(&stpdown->sd_lock);
2062 stpdown->sd_flag &= ~STRPLUMB;
2063 cv_broadcast(&stpdown->sd_monitor);
2064 mutex_exit(&stpdown->sd_lock);
2065 mutex_exit(&muxifier);
2066 return (error);
2070 mux_rmvedge(stp, linkp->li_lblk.l_index, ss);
2071 fpdown = linkp->li_fpdown;
2072 lbfree(linkp);
2075 * We go ahead and drop muxifier here--it's a nasty global lock that
2076 * can slow others down. It's okay to since attempts to mlink() this
2077 * stream will be stopped because STPLEX is still set in the stdata
2078 * structure, and munlink() is stopped because mux_rmvedge() and
2079 * lbfree() have removed it from mux_nodes[] and linkinfo_list,
2080 * respectively. Note that we defer the closef() of fpdown until
2081 * after we drop muxifier since strclose() can call munlinkall().
2083 mutex_exit(&muxifier);
2085 wrq = stpdown->sd_wrq;
2086 rq = _RD(wrq);
2089 * Get rid of outstanding service procedure runs, before we make
2090 * it a stream head, since a stream head doesn't have any service
2091 * procedure.
2093 disable_svc(rq);
2094 wait_svc(rq);
2097 * Since we don't disable the syncq for QPERMOD, we wait for whatever
2098 * is queued up to be finished. mux should take care that nothing is
2099 * send down to this queue. We should do it now as we're going to block
2100 * passyncq if it was unblocked.
2102 if (wrq->q_flag & QPERMOD) {
2103 syncq_t *sq = wrq->q_syncq;
2105 mutex_enter(SQLOCK(sq));
2106 while (wrq->q_sqflags & Q_SQQUEUED) {
2107 sq->sq_flags |= SQ_WANTWAKEUP;
2108 cv_wait(&sq->sq_wait, SQLOCK(sq));
2110 mutex_exit(SQLOCK(sq));
2112 passyncq = passq->q_syncq;
2113 if (!(passyncq->sq_flags & SQ_BLOCKED)) {
2115 syncq_t *sq, *outer;
2118 * Messages could be flowing from underneath. We will
2119 * block the read side of the passq. This would be
2120 * sufficient for QPAIR and QPERQ muxes to ensure
2121 * that no data is flowing up into this queue
2122 * and hence no thread active in this instance of
2123 * lower mux. But for QPERMOD and QMTOUTPERIM there
2124 * could be messages on the inner and outer/inner
2125 * syncqs respectively. We will wait for them to drain.
2126 * Because passq is blocked messages end up in the syncq
2127 * And qfill_syncq could possibly end up setting QFULL
2128 * which will access the rq->q_flag. Hence, we have to
2129 * acquire the QLOCK in setq.
2131 * XXX Messages can also flow from top into this
2132 * queue though the unlink is over (Ex. some instance
2133 * in putnext() called from top that has still not
2134 * accessed this queue. And also putq(lowerq) ?).
2135 * Solution : How about blocking the l_qtop queue ?
2136 * Do we really care about such pure D_MP muxes ?
2139 blocksq(passyncq, SQ_BLOCKED, 0);
2141 sq = rq->q_syncq;
2142 if ((outer = sq->sq_outer) != NULL) {
2145 * We have to just wait for the outer sq_count
2146 * drop to zero. As this does not prevent new
2147 * messages to enter the outer perimeter, this
2148 * is subject to starvation.
2150 * NOTE :Because of blocksq above, messages could
2151 * be in the inner syncq only because of some
2152 * thread holding the outer perimeter exclusively.
2153 * Hence it would be sufficient to wait for the
2154 * exclusive holder of the outer perimeter to drain
2155 * the inner and outer syncqs. But we will not depend
2156 * on this feature and hence check the inner syncqs
2157 * separately.
2159 wait_syncq(outer);
2164 * There could be messages destined for
2165 * this queue. Let the exclusive holder
2166 * drain it.
2169 wait_syncq(sq);
2170 ASSERT((rq->q_flag & QPERMOD) ||
2171 ((rq->q_syncq->sq_head == NULL) &&
2172 (_WR(rq)->q_syncq->sq_head == NULL)));
2176 * We haven't taken care of QPERMOD case yet. QPERMOD is a special
2177 * case as we don't disable its syncq or remove it off the syncq
2178 * service list.
2180 if (rq->q_flag & QPERMOD) {
2181 syncq_t *sq = rq->q_syncq;
2183 mutex_enter(SQLOCK(sq));
2184 while (rq->q_sqflags & Q_SQQUEUED) {
2185 sq->sq_flags |= SQ_WANTWAKEUP;
2186 cv_wait(&sq->sq_wait, SQLOCK(sq));
2188 mutex_exit(SQLOCK(sq));
2192 * flush_syncq changes states only when there are some messages to
2193 * free, i.e. when it returns non-zero value to return.
2195 ASSERT(flush_syncq(rq->q_syncq, rq) == 0);
2196 ASSERT(flush_syncq(wrq->q_syncq, wrq) == 0);
2199 * Nobody else should know about this queue now.
2200 * If the mux did not process the messages before
2201 * acking the I_UNLINK, free them now.
2204 flushq(rq, FLUSHALL);
2205 flushq(_WR(rq), FLUSHALL);
2208 * Convert the mux lower queue into a stream head queue.
2209 * Turn off STPLEX before we turn on the stream by removing the passq.
2211 rq->q_ptr = wrq->q_ptr = stpdown;
2212 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_TRUE);
2214 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE);
2215 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq));
2217 enable_svc(rq);
2220 * Now it is a proper stream, so STPLEX is cleared. But STRPLUMB still
2221 * needs to be set to prevent reopen() of the stream - such reopen may
2222 * try to call non-existent pass queue open routine and panic.
2224 mutex_enter(&stpdown->sd_lock);
2225 stpdown->sd_flag &= ~STPLEX;
2226 mutex_exit(&stpdown->sd_lock);
2228 ASSERT(((flag & LINKTYPEMASK) == LINKNORMAL) ||
2229 ((flag & LINKTYPEMASK) == LINKPERSIST));
2231 /* clean up the layered driver linkages */
2232 if ((flag & LINKTYPEMASK) == LINKNORMAL) {
2233 ldi_munlink_fp(stp, fpdown, LINKNORMAL);
2234 } else {
2235 ldi_munlink_fp(stp, fpdown, LINKPERSIST);
2238 link_rempassthru(passq);
2241 * Now all plumbing changes are finished and STRPLUMB is no
2242 * longer needed.
2244 mutex_enter(&stpdown->sd_lock);
2245 stpdown->sd_flag &= ~STRPLUMB;
2246 cv_broadcast(&stpdown->sd_monitor);
2247 mutex_exit(&stpdown->sd_lock);
2249 (void) closef(fpdown);
2250 return (0);
2254 * Unlink all multiplexor links for which stp is the controlling stream.
2255 * Return 0, or a non-zero errno on failure.
2258 munlinkall(stdata_t *stp, int flag, cred_t *crp, int *rvalp, str_stack_t *ss)
2260 linkinfo_t *linkp;
2261 int error = 0;
2263 mutex_enter(&muxifier);
2264 while (linkp = findlinks(stp, 0, flag, ss)) {
2266 * munlink() releases the muxifier lock.
2268 if (error = munlink(stp, linkp, flag, crp, rvalp, ss))
2269 return (error);
2270 mutex_enter(&muxifier);
2272 mutex_exit(&muxifier);
2273 return (0);
2277 * A multiplexor link has been made. Add an
2278 * edge to the directed graph.
2280 void
2281 mux_addedge(stdata_t *upstp, stdata_t *lostp, int muxid, str_stack_t *ss)
2283 struct mux_node *np;
2284 struct mux_edge *ep;
2285 major_t upmaj;
2286 major_t lomaj;
2288 upmaj = getmajor(upstp->sd_vnode->v_rdev);
2289 lomaj = getmajor(lostp->sd_vnode->v_rdev);
2290 np = &ss->ss_mux_nodes[upmaj];
2291 if (np->mn_outp) {
2292 ep = np->mn_outp;
2293 while (ep->me_nextp)
2294 ep = ep->me_nextp;
2295 ep->me_nextp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP);
2296 ep = ep->me_nextp;
2297 } else {
2298 np->mn_outp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP);
2299 ep = np->mn_outp;
2301 ep->me_nextp = NULL;
2302 ep->me_muxid = muxid;
2304 * Save the dev_t for the purposes of str_stack_shutdown.
2305 * str_stack_shutdown assumes that the device allows reopen, since
2306 * this dev_t is the one after any cloning by xx_open().
2307 * Would prefer finding the dev_t from before any cloning,
2308 * but specfs doesn't retain that.
2310 ep->me_dev = upstp->sd_vnode->v_rdev;
2311 if (lostp->sd_vnode->v_type == VFIFO)
2312 ep->me_nodep = NULL;
2313 else
2314 ep->me_nodep = &ss->ss_mux_nodes[lomaj];
2318 * A multiplexor link has been removed. Remove the
2319 * edge in the directed graph.
2321 void
2322 mux_rmvedge(stdata_t *upstp, int muxid, str_stack_t *ss)
2324 struct mux_node *np;
2325 struct mux_edge *ep;
2326 struct mux_edge *pep = NULL;
2327 major_t upmaj;
2329 upmaj = getmajor(upstp->sd_vnode->v_rdev);
2330 np = &ss->ss_mux_nodes[upmaj];
2331 ASSERT(np->mn_outp != NULL);
2332 ep = np->mn_outp;
2333 while (ep) {
2334 if (ep->me_muxid == muxid) {
2335 if (pep)
2336 pep->me_nextp = ep->me_nextp;
2337 else
2338 np->mn_outp = ep->me_nextp;
2339 kmem_free(ep, sizeof (struct mux_edge));
2340 return;
2342 pep = ep;
2343 ep = ep->me_nextp;
2345 ASSERT(0); /* should not reach here */
2349 * Translate the device flags (from conf.h) to the corresponding
2350 * qflag and sq_flag (type) values.
2353 devflg_to_qflag(struct streamtab *stp, uint32_t devflag, uint32_t *qflagp,
2354 uint32_t *sqtypep)
2356 uint32_t qflag = 0;
2357 uint32_t sqtype = 0;
2359 if (devflag & _D_OLD)
2360 goto bad;
2362 /* Inner perimeter presence and scope */
2363 switch (devflag & D_MTINNER_MASK) {
2364 case D_MP:
2365 qflag |= QMTSAFE;
2366 sqtype |= SQ_CI;
2367 break;
2368 case D_MTPERQ|D_MP:
2369 qflag |= QPERQ;
2370 break;
2371 case D_MTQPAIR|D_MP:
2372 qflag |= QPAIR;
2373 break;
2374 case D_MTPERMOD|D_MP:
2375 qflag |= QPERMOD;
2376 break;
2377 default:
2378 goto bad;
2381 /* Outer perimeter */
2382 if (devflag & D_MTOUTPERIM) {
2383 switch (devflag & D_MTINNER_MASK) {
2384 case D_MP:
2385 case D_MTPERQ|D_MP:
2386 case D_MTQPAIR|D_MP:
2387 break;
2388 default:
2389 goto bad;
2391 qflag |= QMTOUTPERIM;
2394 /* Inner perimeter modifiers */
2395 if (devflag & D_MTINNER_MOD) {
2396 switch (devflag & D_MTINNER_MASK) {
2397 case D_MP:
2398 goto bad;
2399 default:
2400 break;
2402 if (devflag & D_MTPUTSHARED)
2403 sqtype |= SQ_CIPUT;
2404 if (devflag & _D_MTOCSHARED) {
2406 * The code in putnext assumes that it has the
2407 * highest concurrency by not checking sq_count.
2408 * Thus _D_MTOCSHARED can only be supported when
2409 * D_MTPUTSHARED is set.
2411 if (!(devflag & D_MTPUTSHARED))
2412 goto bad;
2413 sqtype |= SQ_CIOC;
2415 if (devflag & _D_MTCBSHARED) {
2417 * The code in putnext assumes that it has the
2418 * highest concurrency by not checking sq_count.
2419 * Thus _D_MTCBSHARED can only be supported when
2420 * D_MTPUTSHARED is set.
2422 if (!(devflag & D_MTPUTSHARED))
2423 goto bad;
2424 sqtype |= SQ_CICB;
2426 if (devflag & _D_MTSVCSHARED) {
2428 * The code in putnext assumes that it has the
2429 * highest concurrency by not checking sq_count.
2430 * Thus _D_MTSVCSHARED can only be supported when
2431 * D_MTPUTSHARED is set. Also _D_MTSVCSHARED is
2432 * supported only for QPERMOD.
2434 if (!(devflag & D_MTPUTSHARED) || !(qflag & QPERMOD))
2435 goto bad;
2436 sqtype |= SQ_CISVC;
2440 /* Default outer perimeter concurrency */
2441 sqtype |= SQ_CO;
2443 /* Outer perimeter modifiers */
2444 if (devflag & D_MTOCEXCL) {
2445 if (!(devflag & D_MTOUTPERIM)) {
2446 /* No outer perimeter */
2447 goto bad;
2449 sqtype &= ~SQ_COOC;
2452 /* Synchronous Streams extended qinit structure */
2453 if (devflag & D_SYNCSTR)
2454 qflag |= QSYNCSTR;
2457 * Private flag used by a transport module to indicate
2458 * to sockfs that it supports direct-access mode without
2459 * having to go through STREAMS.
2461 if (devflag & _D_DIRECT) {
2462 /* Reject unless the module is fully-MT (no perimeter) */
2463 if ((qflag & QMT_TYPEMASK) != QMTSAFE)
2464 goto bad;
2465 qflag |= _QDIRECT;
2469 * Private flag used to indicate that a streams module should only
2470 * be pushed once. The TTY streams modules have this flag since if
2471 * libc believes itself to be an xpg4 process then it will
2472 * automatically and unconditionally push them when a PTS device is
2473 * opened. If an application is not aware of this then without this
2474 * flag we would end up with duplicate modules.
2476 if (devflag & _D_SINGLE_INSTANCE)
2477 qflag |= _QSINGLE_INSTANCE;
2479 *qflagp = qflag;
2480 *sqtypep = sqtype;
2481 return (0);
2483 bad:
2484 cmn_err(CE_WARN,
2485 "stropen: bad MT flags (0x%x) in driver '%s'",
2486 (int)(qflag & D_MTSAFETY_MASK),
2487 stp->st_rdinit->qi_minfo->mi_idname);
2489 return (EINVAL);
2493 * Set the interface values for a pair of queues (qinit structure,
2494 * packet sizes, water marks).
2495 * setq assumes that the caller does not have a claim (entersq or claimq)
2496 * on the queue.
2498 void
2499 setq(queue_t *rq, struct qinit *rinit, struct qinit *winit,
2500 perdm_t *dmp, uint32_t qflag, uint32_t sqtype, boolean_t lock_needed)
2502 queue_t *wq;
2503 syncq_t *sq, *outer;
2505 ASSERT(rq->q_flag & QREADR);
2506 ASSERT((qflag & QMT_TYPEMASK) != 0);
2507 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL);
2509 wq = _WR(rq);
2510 rq->q_qinfo = rinit;
2511 rq->q_hiwat = rinit->qi_minfo->mi_hiwat;
2512 rq->q_lowat = rinit->qi_minfo->mi_lowat;
2513 rq->q_minpsz = rinit->qi_minfo->mi_minpsz;
2514 rq->q_maxpsz = rinit->qi_minfo->mi_maxpsz;
2515 wq->q_qinfo = winit;
2516 wq->q_hiwat = winit->qi_minfo->mi_hiwat;
2517 wq->q_lowat = winit->qi_minfo->mi_lowat;
2518 wq->q_minpsz = winit->qi_minfo->mi_minpsz;
2519 wq->q_maxpsz = winit->qi_minfo->mi_maxpsz;
2521 /* Remove old syncqs */
2522 sq = rq->q_syncq;
2523 outer = sq->sq_outer;
2524 if (outer != NULL) {
2525 ASSERT(wq->q_syncq->sq_outer == outer);
2526 outer_remove(outer, rq->q_syncq);
2527 if (wq->q_syncq != rq->q_syncq)
2528 outer_remove(outer, wq->q_syncq);
2530 ASSERT(sq->sq_outer == NULL);
2531 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL);
2533 if (sq != SQ(rq)) {
2534 if (!(rq->q_flag & QPERMOD))
2535 free_syncq(sq);
2536 if (wq->q_syncq == rq->q_syncq)
2537 wq->q_syncq = NULL;
2538 rq->q_syncq = NULL;
2540 if (wq->q_syncq != NULL && wq->q_syncq != sq &&
2541 wq->q_syncq != SQ(rq)) {
2542 free_syncq(wq->q_syncq);
2543 wq->q_syncq = NULL;
2545 ASSERT(rq->q_syncq == NULL || (rq->q_syncq->sq_head == NULL &&
2546 rq->q_syncq->sq_tail == NULL));
2547 ASSERT(wq->q_syncq == NULL || (wq->q_syncq->sq_head == NULL &&
2548 wq->q_syncq->sq_tail == NULL));
2550 if (!(rq->q_flag & QPERMOD) &&
2551 rq->q_syncq != NULL && rq->q_syncq->sq_ciputctrl != NULL) {
2552 ASSERT(rq->q_syncq->sq_nciputctrl == n_ciputctrl - 1);
2553 SUMCHECK_CIPUTCTRL_COUNTS(rq->q_syncq->sq_ciputctrl,
2554 rq->q_syncq->sq_nciputctrl, 0);
2555 ASSERT(ciputctrl_cache != NULL);
2556 kmem_cache_free(ciputctrl_cache, rq->q_syncq->sq_ciputctrl);
2557 rq->q_syncq->sq_ciputctrl = NULL;
2558 rq->q_syncq->sq_nciputctrl = 0;
2561 if (!(wq->q_flag & QPERMOD) &&
2562 wq->q_syncq != NULL && wq->q_syncq->sq_ciputctrl != NULL) {
2563 ASSERT(wq->q_syncq->sq_nciputctrl == n_ciputctrl - 1);
2564 SUMCHECK_CIPUTCTRL_COUNTS(wq->q_syncq->sq_ciputctrl,
2565 wq->q_syncq->sq_nciputctrl, 0);
2566 ASSERT(ciputctrl_cache != NULL);
2567 kmem_cache_free(ciputctrl_cache, wq->q_syncq->sq_ciputctrl);
2568 wq->q_syncq->sq_ciputctrl = NULL;
2569 wq->q_syncq->sq_nciputctrl = 0;
2572 sq = SQ(rq);
2573 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL);
2574 ASSERT(sq->sq_outer == NULL);
2575 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL);
2578 * Create syncqs based on qflag and sqtype. Set the SQ_TYPES_IN_FLAGS
2579 * bits in sq_flag based on the sqtype.
2581 ASSERT((sq->sq_flags & ~SQ_TYPES_IN_FLAGS) == 0);
2583 rq->q_syncq = wq->q_syncq = sq;
2584 sq->sq_type = sqtype;
2585 sq->sq_flags = (sqtype & SQ_TYPES_IN_FLAGS);
2588 * We are making sq_svcflags zero,
2589 * resetting SQ_DISABLED in case it was set by
2590 * wait_svc() in the munlink path.
2593 ASSERT((sq->sq_svcflags & SQ_SERVICE) == 0);
2594 sq->sq_svcflags = 0;
2597 * We need to acquire the lock here for the mlink and munlink case,
2598 * where canputnext, backenable, etc can access the q_flag.
2600 if (lock_needed) {
2601 mutex_enter(QLOCK(rq));
2602 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2603 mutex_exit(QLOCK(rq));
2604 mutex_enter(QLOCK(wq));
2605 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2606 mutex_exit(QLOCK(wq));
2607 } else {
2608 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2609 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2612 if (qflag & QPERQ) {
2613 /* Allocate a separate syncq for the write side */
2614 sq = new_syncq();
2615 sq->sq_type = rq->q_syncq->sq_type;
2616 sq->sq_flags = rq->q_syncq->sq_flags;
2617 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL &&
2618 sq->sq_oprev == NULL);
2619 wq->q_syncq = sq;
2621 if (qflag & QPERMOD) {
2622 sq = dmp->dm_sq;
2625 * Assert that we do have an inner perimeter syncq and that it
2626 * does not have an outer perimeter associated with it.
2628 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL &&
2629 sq->sq_oprev == NULL);
2630 rq->q_syncq = wq->q_syncq = sq;
2632 if (qflag & QMTOUTPERIM) {
2633 outer = dmp->dm_sq;
2635 ASSERT(outer->sq_outer == NULL);
2636 outer_insert(outer, rq->q_syncq);
2637 if (wq->q_syncq != rq->q_syncq)
2638 outer_insert(outer, wq->q_syncq);
2640 ASSERT((rq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) ==
2641 (rq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS));
2642 ASSERT((wq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) ==
2643 (wq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS));
2644 ASSERT((rq->q_flag & QMT_TYPEMASK) == (qflag & QMT_TYPEMASK));
2647 * Initialize struio() types.
2649 rq->q_struiot =
2650 (rq->q_flag & QSYNCSTR) ? rinit->qi_struiot : STRUIOT_NONE;
2651 wq->q_struiot =
2652 (wq->q_flag & QSYNCSTR) ? winit->qi_struiot : STRUIOT_NONE;
2655 perdm_t *
2656 hold_dm(struct streamtab *str, uint32_t qflag, uint32_t sqtype)
2658 syncq_t *sq;
2659 perdm_t **pp;
2660 perdm_t *p;
2661 perdm_t *dmp;
2663 ASSERT(str != NULL);
2664 ASSERT(qflag & (QPERMOD | QMTOUTPERIM));
2666 rw_enter(&perdm_rwlock, RW_READER);
2667 for (p = perdm_list; p != NULL; p = p->dm_next) {
2668 if (p->dm_str == str) { /* found one */
2669 atomic_inc_32(&(p->dm_ref));
2670 rw_exit(&perdm_rwlock);
2671 return (p);
2674 rw_exit(&perdm_rwlock);
2676 sq = new_syncq();
2677 if (qflag & QPERMOD) {
2678 sq->sq_type = sqtype | SQ_PERMOD;
2679 sq->sq_flags = sqtype & SQ_TYPES_IN_FLAGS;
2680 } else {
2681 ASSERT(qflag & QMTOUTPERIM);
2682 sq->sq_onext = sq->sq_oprev = sq;
2685 dmp = kmem_alloc(sizeof (perdm_t), KM_SLEEP);
2686 dmp->dm_sq = sq;
2687 dmp->dm_str = str;
2688 dmp->dm_ref = 1;
2689 dmp->dm_next = NULL;
2691 rw_enter(&perdm_rwlock, RW_WRITER);
2692 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) {
2693 if (p->dm_str == str) { /* already present */
2694 p->dm_ref++;
2695 rw_exit(&perdm_rwlock);
2696 free_syncq(sq);
2697 kmem_free(dmp, sizeof (perdm_t));
2698 return (p);
2702 *pp = dmp;
2703 rw_exit(&perdm_rwlock);
2704 return (dmp);
2707 void
2708 rele_dm(perdm_t *dmp)
2710 perdm_t **pp;
2711 perdm_t *p;
2713 rw_enter(&perdm_rwlock, RW_WRITER);
2714 ASSERT(dmp->dm_ref > 0);
2716 if (--dmp->dm_ref > 0) {
2717 rw_exit(&perdm_rwlock);
2718 return;
2721 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next))
2722 if (p == dmp)
2723 break;
2724 ASSERT(p == dmp);
2725 *pp = p->dm_next;
2726 rw_exit(&perdm_rwlock);
2729 * Wait for any background processing that relies on the
2730 * syncq to complete before it is freed.
2732 wait_sq_svc(p->dm_sq);
2733 free_syncq(p->dm_sq);
2734 kmem_free(p, sizeof (perdm_t));
2738 * Make a protocol message given control and data buffers.
2739 * n.b., this can block; be careful of what locks you hold when calling it.
2741 * If sd_maxblk is less than *iosize this routine can fail part way through
2742 * (due to an allocation failure). In this case on return *iosize will contain
2743 * the amount that was consumed. Otherwise *iosize will not be modified
2744 * i.e. it will contain the amount that was consumed.
2747 strmakemsg(
2748 struct strbuf *mctl,
2749 ssize_t *iosize,
2750 struct uio *uiop,
2751 stdata_t *stp,
2752 int32_t flag,
2753 mblk_t **mpp)
2755 mblk_t *mpctl = NULL;
2756 mblk_t *mpdata = NULL;
2757 int error;
2759 ASSERT(uiop != NULL);
2761 *mpp = NULL;
2762 /* Create control part, if any */
2763 if ((mctl != NULL) && (mctl->len >= 0)) {
2764 error = strmakectl(mctl, flag, uiop->uio_fmode, &mpctl);
2765 if (error)
2766 return (error);
2768 /* Create data part, if any */
2769 if (*iosize >= 0) {
2770 error = strmakedata(iosize, uiop, stp, flag, &mpdata);
2771 if (error) {
2772 freemsg(mpctl);
2773 return (error);
2776 if (mpctl != NULL) {
2777 if (mpdata != NULL)
2778 linkb(mpctl, mpdata);
2779 *mpp = mpctl;
2780 } else {
2781 *mpp = mpdata;
2783 return (0);
2787 * Make the control part of a protocol message given a control buffer.
2788 * n.b., this can block; be careful of what locks you hold when calling it.
2791 strmakectl(
2792 struct strbuf *mctl,
2793 int32_t flag,
2794 int32_t fflag,
2795 mblk_t **mpp)
2797 mblk_t *bp = NULL;
2798 unsigned char msgtype;
2799 int error = 0;
2800 cred_t *cr = CRED();
2802 /* We do not support interrupt threads using the stream head to send */
2803 ASSERT(cr != NULL);
2805 *mpp = NULL;
2807 * Create control part of message, if any.
2809 if ((mctl != NULL) && (mctl->len >= 0)) {
2810 caddr_t base;
2811 int ctlcount;
2812 int allocsz;
2814 if (flag & RS_HIPRI)
2815 msgtype = M_PCPROTO;
2816 else
2817 msgtype = M_PROTO;
2819 ctlcount = mctl->len;
2820 base = mctl->buf;
2823 * Give modules a better chance to reuse M_PROTO/M_PCPROTO
2824 * blocks by increasing the size to something more usable.
2826 allocsz = MAX(ctlcount, 64);
2829 * Range checking has already been done; simply try
2830 * to allocate a message block for the ctl part.
2832 while ((bp = allocb_cred(allocsz, cr,
2833 curproc->p_pid)) == NULL) {
2834 if (fflag & (FNDELAY|FNONBLOCK))
2835 return (EAGAIN);
2836 if (error = strwaitbuf(allocsz, BPRI_MED))
2837 return (error);
2840 bp->b_datap->db_type = msgtype;
2841 if (copyin(base, bp->b_wptr, ctlcount)) {
2842 freeb(bp);
2843 return (EFAULT);
2845 bp->b_wptr += ctlcount;
2847 *mpp = bp;
2848 return (0);
2852 * Make a protocol message given data buffers.
2853 * n.b., this can block; be careful of what locks you hold when calling it.
2855 * If sd_maxblk is less than *iosize this routine can fail part way through
2856 * (due to an allocation failure). In this case on return *iosize will contain
2857 * the amount that was consumed. Otherwise *iosize will not be modified
2858 * i.e. it will contain the amount that was consumed.
2861 strmakedata(
2862 ssize_t *iosize,
2863 struct uio *uiop,
2864 stdata_t *stp,
2865 int32_t flag,
2866 mblk_t **mpp)
2868 mblk_t *mp = NULL;
2869 mblk_t *bp;
2870 int wroff = (int)stp->sd_wroff;
2871 int tail_len = (int)stp->sd_tail;
2872 int extra = wroff + tail_len;
2873 int error = 0;
2874 ssize_t maxblk;
2875 ssize_t count = *iosize;
2876 cred_t *cr;
2878 *mpp = NULL;
2879 if (count < 0)
2880 return (0);
2882 /* We do not support interrupt threads using the stream head to send */
2883 cr = CRED();
2884 ASSERT(cr != NULL);
2886 maxblk = stp->sd_maxblk;
2887 if (maxblk == INFPSZ)
2888 maxblk = count;
2891 * Create data part of message, if any.
2893 do {
2894 ssize_t size;
2895 dblk_t *dp;
2897 ASSERT(uiop);
2899 size = MIN(count, maxblk);
2901 while ((bp = allocb_cred(size + extra, cr,
2902 curproc->p_pid)) == NULL) {
2903 error = EAGAIN;
2904 if ((uiop->uio_fmode & (FNDELAY|FNONBLOCK)) ||
2905 (error = strwaitbuf(size + extra, BPRI_MED)) != 0) {
2906 if (count == *iosize) {
2907 freemsg(mp);
2908 return (error);
2909 } else {
2910 *iosize -= count;
2911 *mpp = mp;
2912 return (0);
2916 dp = bp->b_datap;
2917 dp->db_cpid = curproc->p_pid;
2918 ASSERT(wroff <= dp->db_lim - bp->b_wptr);
2919 bp->b_wptr = bp->b_rptr = bp->b_rptr + wroff;
2921 if (flag & STRUIO_POSTPONE) {
2923 * Setup the stream uio portion of the
2924 * dblk for subsequent use by struioget().
2926 dp->db_struioflag = STRUIO_SPEC;
2927 dp->db_cksumstart = 0;
2928 dp->db_cksumstuff = 0;
2929 dp->db_cksumend = size;
2930 *(long long *)dp->db_struioun.data = 0ll;
2931 bp->b_wptr += size;
2932 } else {
2933 if (stp->sd_copyflag & STRCOPYCACHED)
2934 uiop->uio_extflg |= UIO_COPY_CACHED;
2936 if (size != 0) {
2937 error = uiomove(bp->b_wptr, size, UIO_WRITE,
2938 uiop);
2939 if (error != 0) {
2940 freeb(bp);
2941 freemsg(mp);
2942 return (error);
2945 bp->b_wptr += size;
2947 if (stp->sd_wputdatafunc != NULL) {
2948 mblk_t *newbp;
2950 newbp = (stp->sd_wputdatafunc)(stp->sd_vnode,
2951 bp, NULL, NULL, NULL, NULL);
2952 if (newbp == NULL) {
2953 freeb(bp);
2954 freemsg(mp);
2955 return (ECOMM);
2957 bp = newbp;
2961 count -= size;
2963 if (mp == NULL)
2964 mp = bp;
2965 else
2966 linkb(mp, bp);
2967 } while (count > 0);
2969 *mpp = mp;
2970 return (0);
2974 * Wait for a buffer to become available. Return non-zero errno
2975 * if not able to wait, 0 if buffer is probably there.
2978 strwaitbuf(size_t size, int pri)
2980 bufcall_id_t id;
2982 mutex_enter(&bcall_monitor);
2983 if ((id = bufcall(size, pri, (void (*)(void *))cv_broadcast,
2984 &ttoproc(curthread)->p_flag_cv)) == 0) {
2985 mutex_exit(&bcall_monitor);
2986 return (ENOSR);
2988 if (!cv_wait_sig(&(ttoproc(curthread)->p_flag_cv), &bcall_monitor)) {
2989 unbufcall(id);
2990 mutex_exit(&bcall_monitor);
2991 return (EINTR);
2993 unbufcall(id);
2994 mutex_exit(&bcall_monitor);
2995 return (0);
2999 * This function waits for a read or write event to happen on a stream.
3000 * fmode can specify FNDELAY and/or FNONBLOCK.
3001 * The timeout is in ms with -1 meaning infinite.
3002 * The flag values work as follows:
3003 * READWAIT Check for read side errors, send M_READ
3004 * GETWAIT Check for read side errors, no M_READ
3005 * WRITEWAIT Check for write side errors.
3006 * NOINTR Do not return error if nonblocking or timeout.
3007 * STR_NOERROR Ignore all errors except STPLEX.
3008 * STR_NOSIG Ignore/hold signals during the duration of the call.
3009 * STR_PEEK Pass through the strgeterr().
3012 strwaitq(stdata_t *stp, int flag, ssize_t count, int fmode, clock_t timout,
3013 int *done)
3015 int slpflg, errs;
3016 int error;
3017 kcondvar_t *sleepon;
3018 mblk_t *mp;
3019 ssize_t *rd_count;
3020 clock_t rval;
3022 ASSERT(MUTEX_HELD(&stp->sd_lock));
3023 if ((flag & READWAIT) || (flag & GETWAIT)) {
3024 slpflg = RSLEEP;
3025 sleepon = &_RD(stp->sd_wrq)->q_wait;
3026 errs = STRDERR|STPLEX;
3027 } else {
3028 slpflg = WSLEEP;
3029 sleepon = &stp->sd_wrq->q_wait;
3030 errs = STWRERR|STRHUP|STPLEX;
3032 if (flag & STR_NOERROR)
3033 errs = STPLEX;
3035 if (stp->sd_wakeq & slpflg) {
3037 * A strwakeq() is pending, no need to sleep.
3039 stp->sd_wakeq &= ~slpflg;
3040 *done = 0;
3041 return (0);
3044 if (stp->sd_flag & errs) {
3046 * Check for errors before going to sleep since the
3047 * caller might not have checked this while holding
3048 * sd_lock.
3050 error = strgeterr(stp, errs, (flag & STR_PEEK));
3051 if (error != 0) {
3052 *done = 1;
3053 return (error);
3058 * If any module downstream has requested read notification
3059 * by setting SNDMREAD flag using M_SETOPTS, send a message
3060 * down stream.
3062 if ((flag & READWAIT) && (stp->sd_flag & SNDMREAD)) {
3063 mutex_exit(&stp->sd_lock);
3064 if (!(mp = allocb_wait(sizeof (ssize_t), BPRI_MED,
3065 (flag & STR_NOSIG), &error))) {
3066 mutex_enter(&stp->sd_lock);
3067 *done = 1;
3068 return (error);
3070 mp->b_datap->db_type = M_READ;
3071 rd_count = (ssize_t *)mp->b_wptr;
3072 *rd_count = count;
3073 mp->b_wptr += sizeof (ssize_t);
3075 * Send the number of bytes requested by the
3076 * read as the argument to M_READ.
3078 stream_willservice(stp);
3079 putnext(stp->sd_wrq, mp);
3080 stream_runservice(stp);
3081 mutex_enter(&stp->sd_lock);
3084 * If any data arrived due to inline processing
3085 * of putnext(), don't sleep.
3087 if (_RD(stp->sd_wrq)->q_first != NULL) {
3088 *done = 0;
3089 return (0);
3093 if (fmode & (FNDELAY|FNONBLOCK)) {
3094 if (!(flag & NOINTR))
3095 error = EAGAIN;
3096 else
3097 error = 0;
3098 *done = 1;
3099 return (error);
3102 stp->sd_flag |= slpflg;
3103 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAIT2,
3104 "strwaitq sleeps (2):%p, %X, %lX, %X, %p",
3105 stp, flag, count, fmode, done);
3107 rval = str_cv_wait(sleepon, &stp->sd_lock, timout, flag & STR_NOSIG);
3108 if (rval > 0) {
3109 /* EMPTY */
3110 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAKE2,
3111 "strwaitq awakes(2):%X, %X, %X, %X, %X",
3112 stp, flag, count, fmode, done);
3113 } else if (rval == 0) {
3114 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_INTR2,
3115 "strwaitq interrupt #2:%p, %X, %lX, %X, %p",
3116 stp, flag, count, fmode, done);
3117 stp->sd_flag &= ~slpflg;
3118 cv_broadcast(sleepon);
3119 if (!(flag & NOINTR))
3120 error = EINTR;
3121 else
3122 error = 0;
3123 *done = 1;
3124 return (error);
3125 } else {
3126 /* timeout */
3127 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_TIME,
3128 "strwaitq timeout:%p, %X, %lX, %X, %p",
3129 stp, flag, count, fmode, done);
3130 *done = 1;
3131 if (!(flag & NOINTR))
3132 return (ETIME);
3133 else
3134 return (0);
3137 * If the caller implements delayed errors (i.e. queued after data)
3138 * we can not check for errors here since data as well as an
3139 * error might have arrived at the stream head. We return to
3140 * have the caller check the read queue before checking for errors.
3142 if ((stp->sd_flag & errs) && !(flag & STR_DELAYERR)) {
3143 error = strgeterr(stp, errs, (flag & STR_PEEK));
3144 if (error != 0) {
3145 *done = 1;
3146 return (error);
3149 *done = 0;
3150 return (0);
3154 * Perform job control discipline access checks.
3155 * Return 0 for success and the errno for failure.
3158 #define cantsend(p, t, sig) \
3159 (sigismember(&(p)->p_ignore, sig) || signal_is_blocked((t), sig))
3162 straccess(struct stdata *stp, enum jcaccess mode)
3164 extern kcondvar_t lbolt_cv; /* XXX: should be in a header file */
3165 kthread_t *t = curthread;
3166 proc_t *p = ttoproc(t);
3167 sess_t *sp;
3169 ASSERT(mutex_owned(&stp->sd_lock));
3171 if (stp->sd_sidp == NULL || stp->sd_vnode->v_type == VFIFO)
3172 return (0);
3174 mutex_enter(&p->p_lock); /* protects p_pgidp */
3176 for (;;) {
3177 mutex_enter(&p->p_splock); /* protects p->p_sessp */
3178 sp = p->p_sessp;
3179 mutex_enter(&sp->s_lock); /* protects sp->* */
3182 * If this is not the calling process's controlling terminal
3183 * or if the calling process is already in the foreground
3184 * then allow access.
3186 if (sp->s_dev != stp->sd_vnode->v_rdev ||
3187 p->p_pgidp == stp->sd_pgidp) {
3188 mutex_exit(&sp->s_lock);
3189 mutex_exit(&p->p_splock);
3190 mutex_exit(&p->p_lock);
3191 return (0);
3195 * Check to see if controlling terminal has been deallocated.
3197 if (sp->s_vp == NULL) {
3198 if (!cantsend(p, t, SIGHUP))
3199 sigtoproc(p, t, SIGHUP);
3200 mutex_exit(&sp->s_lock);
3201 mutex_exit(&p->p_splock);
3202 mutex_exit(&p->p_lock);
3203 return (EIO);
3206 mutex_exit(&sp->s_lock);
3207 mutex_exit(&p->p_splock);
3209 if (mode == JCGETP) {
3210 mutex_exit(&p->p_lock);
3211 return (0);
3214 if (mode == JCREAD) {
3215 if (p->p_detached || cantsend(p, t, SIGTTIN)) {
3216 mutex_exit(&p->p_lock);
3217 return (EIO);
3219 mutex_exit(&p->p_lock);
3220 mutex_exit(&stp->sd_lock);
3221 pgsignal(p->p_pgidp, SIGTTIN);
3222 mutex_enter(&stp->sd_lock);
3223 mutex_enter(&p->p_lock);
3224 } else { /* mode == JCWRITE or JCSETP */
3225 if ((mode == JCWRITE && !(stp->sd_flag & STRTOSTOP)) ||
3226 cantsend(p, t, SIGTTOU)) {
3227 mutex_exit(&p->p_lock);
3228 return (0);
3230 if (p->p_detached) {
3231 mutex_exit(&p->p_lock);
3232 return (EIO);
3234 mutex_exit(&p->p_lock);
3235 mutex_exit(&stp->sd_lock);
3236 pgsignal(p->p_pgidp, SIGTTOU);
3237 mutex_enter(&stp->sd_lock);
3238 mutex_enter(&p->p_lock);
3242 * We call cv_wait_sig_swap() to cause the appropriate
3243 * action for the jobcontrol signal to take place.
3244 * If the signal is being caught, we will take the
3245 * EINTR error return. Otherwise, the default action
3246 * of causing the process to stop will take place.
3247 * In this case, we rely on the periodic cv_broadcast() on
3248 * &lbolt_cv to wake us up to loop around and test again.
3249 * We can't get here if the signal is ignored or
3250 * if the current thread is blocking the signal.
3252 mutex_exit(&stp->sd_lock);
3253 if (!cv_wait_sig_swap(&lbolt_cv, &p->p_lock)) {
3254 mutex_exit(&p->p_lock);
3255 mutex_enter(&stp->sd_lock);
3256 return (EINTR);
3258 mutex_exit(&p->p_lock);
3259 mutex_enter(&stp->sd_lock);
3260 mutex_enter(&p->p_lock);
3265 * Return size of message of block type (bp->b_datap->db_type)
3267 size_t
3268 xmsgsize(mblk_t *bp)
3270 unsigned char type;
3271 size_t count = 0;
3273 type = bp->b_datap->db_type;
3275 for (; bp; bp = bp->b_cont) {
3276 if (type != bp->b_datap->db_type)
3277 break;
3278 ASSERT(bp->b_wptr >= bp->b_rptr);
3279 count += bp->b_wptr - bp->b_rptr;
3281 return (count);
3285 * Allocate a stream head.
3287 struct stdata *
3288 shalloc(queue_t *qp)
3290 stdata_t *stp;
3292 stp = kmem_cache_alloc(stream_head_cache, KM_SLEEP);
3294 stp->sd_wrq = _WR(qp);
3295 stp->sd_strtab = NULL;
3296 stp->sd_iocid = 0;
3297 stp->sd_mate = NULL;
3298 stp->sd_freezer = NULL;
3299 stp->sd_refcnt = 0;
3300 stp->sd_wakeq = 0;
3301 stp->sd_anchor = 0;
3302 stp->sd_struiowrq = NULL;
3303 stp->sd_struiordq = NULL;
3304 stp->sd_struiodnak = 0;
3305 stp->sd_struionak = NULL;
3306 stp->sd_t_audit_data = NULL;
3307 stp->sd_rput_opt = 0;
3308 stp->sd_wput_opt = 0;
3309 stp->sd_read_opt = 0;
3310 stp->sd_rprotofunc = strrput_proto;
3311 stp->sd_rmiscfunc = strrput_misc;
3312 stp->sd_rderrfunc = stp->sd_wrerrfunc = NULL;
3313 stp->sd_rputdatafunc = stp->sd_wputdatafunc = NULL;
3314 stp->sd_ciputctrl = NULL;
3315 stp->sd_nciputctrl = 0;
3316 stp->sd_qhead = NULL;
3317 stp->sd_qtail = NULL;
3318 stp->sd_servid = NULL;
3319 stp->sd_nqueues = 0;
3320 stp->sd_svcflags = 0;
3321 stp->sd_copyflag = 0;
3323 return (stp);
3327 * Free a stream head.
3329 void
3330 shfree(stdata_t *stp)
3332 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock));
3334 stp->sd_wrq = NULL;
3336 mutex_enter(&stp->sd_qlock);
3337 while (stp->sd_svcflags & STRS_SCHEDULED) {
3338 STRSTAT(strwaits);
3339 cv_wait(&stp->sd_qcv, &stp->sd_qlock);
3341 mutex_exit(&stp->sd_qlock);
3343 if (stp->sd_ciputctrl != NULL) {
3344 ASSERT(stp->sd_nciputctrl == n_ciputctrl - 1);
3345 SUMCHECK_CIPUTCTRL_COUNTS(stp->sd_ciputctrl,
3346 stp->sd_nciputctrl, 0);
3347 ASSERT(ciputctrl_cache != NULL);
3348 kmem_cache_free(ciputctrl_cache, stp->sd_ciputctrl);
3349 stp->sd_ciputctrl = NULL;
3350 stp->sd_nciputctrl = 0;
3352 ASSERT(stp->sd_qhead == NULL);
3353 ASSERT(stp->sd_qtail == NULL);
3354 ASSERT(stp->sd_nqueues == 0);
3355 kmem_cache_free(stream_head_cache, stp);
3359 * Allocate a pair of queues and a syncq for the pair
3361 queue_t *
3362 allocq(void)
3364 queinfo_t *qip;
3365 queue_t *qp, *wqp;
3366 syncq_t *sq;
3368 qip = kmem_cache_alloc(queue_cache, KM_SLEEP);
3370 qp = &qip->qu_rqueue;
3371 wqp = &qip->qu_wqueue;
3372 sq = &qip->qu_syncq;
3374 qp->q_last = NULL;
3375 qp->q_next = NULL;
3376 qp->q_ptr = NULL;
3377 qp->q_flag = QUSE | QREADR;
3378 qp->q_bandp = NULL;
3379 qp->q_stream = NULL;
3380 qp->q_syncq = sq;
3381 qp->q_nband = 0;
3382 qp->q_nfsrv = NULL;
3383 qp->q_draining = 0;
3384 qp->q_syncqmsgs = 0;
3385 qp->q_spri = 0;
3386 qp->q_qtstamp = 0;
3387 qp->q_sqtstamp = 0;
3388 qp->q_fp = NULL;
3390 wqp->q_last = NULL;
3391 wqp->q_next = NULL;
3392 wqp->q_ptr = NULL;
3393 wqp->q_flag = QUSE;
3394 wqp->q_bandp = NULL;
3395 wqp->q_stream = NULL;
3396 wqp->q_syncq = sq;
3397 wqp->q_nband = 0;
3398 wqp->q_nfsrv = NULL;
3399 wqp->q_draining = 0;
3400 wqp->q_syncqmsgs = 0;
3401 wqp->q_qtstamp = 0;
3402 wqp->q_sqtstamp = 0;
3403 wqp->q_spri = 0;
3405 sq->sq_count = 0;
3406 sq->sq_rmqcount = 0;
3407 sq->sq_flags = 0;
3408 sq->sq_type = 0;
3409 sq->sq_callbflags = 0;
3410 sq->sq_cancelid = 0;
3411 sq->sq_ciputctrl = NULL;
3412 sq->sq_nciputctrl = 0;
3413 sq->sq_needexcl = 0;
3414 sq->sq_svcflags = 0;
3416 return (qp);
3420 * Free a pair of queues and the "attached" syncq.
3421 * Discard any messages left on the syncq(s), remove the syncq(s) from the
3422 * outer perimeter, and free the syncq(s) if they are not the "attached" syncq.
3424 void
3425 freeq(queue_t *qp)
3427 qband_t *qbp, *nqbp;
3428 syncq_t *sq, *outer;
3429 queue_t *wqp = _WR(qp);
3431 ASSERT(qp->q_flag & QREADR);
3434 * If a previously dispatched taskq job is scheduled to run
3435 * sync_service() or a service routine is scheduled for the
3436 * queues about to be freed, wait here until all service is
3437 * done on the queue and all associated queues and syncqs.
3439 wait_svc(qp);
3441 (void) flush_syncq(qp->q_syncq, qp);
3442 (void) flush_syncq(wqp->q_syncq, wqp);
3443 ASSERT(qp->q_syncqmsgs == 0 && wqp->q_syncqmsgs == 0);
3446 * Flush the queues before q_next is set to NULL This is needed
3447 * in order to backenable any downstream queue before we go away.
3448 * Note: we are already removed from the stream so that the
3449 * backenabling will not cause any messages to be delivered to our
3450 * put procedures.
3452 flushq(qp, FLUSHALL);
3453 flushq(wqp, FLUSHALL);
3455 /* Tidy up - removeq only does a half-remove from stream */
3456 qp->q_next = wqp->q_next = NULL;
3457 ASSERT(!(qp->q_flag & QENAB));
3458 ASSERT(!(wqp->q_flag & QENAB));
3460 outer = qp->q_syncq->sq_outer;
3461 if (outer != NULL) {
3462 outer_remove(outer, qp->q_syncq);
3463 if (wqp->q_syncq != qp->q_syncq)
3464 outer_remove(outer, wqp->q_syncq);
3467 * Free any syncqs that are outside what allocq returned.
3469 if (qp->q_syncq != SQ(qp) && !(qp->q_flag & QPERMOD))
3470 free_syncq(qp->q_syncq);
3471 if (qp->q_syncq != wqp->q_syncq && wqp->q_syncq != SQ(qp))
3472 free_syncq(wqp->q_syncq);
3474 ASSERT((qp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0);
3475 ASSERT((wqp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0);
3476 ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
3477 ASSERT(MUTEX_NOT_HELD(QLOCK(wqp)));
3478 sq = SQ(qp);
3479 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
3480 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL);
3481 ASSERT(sq->sq_outer == NULL);
3482 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL);
3483 ASSERT(sq->sq_callbpend == NULL);
3484 ASSERT(sq->sq_needexcl == 0);
3486 if (sq->sq_ciputctrl != NULL) {
3487 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1);
3488 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl,
3489 sq->sq_nciputctrl, 0);
3490 ASSERT(ciputctrl_cache != NULL);
3491 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl);
3492 sq->sq_ciputctrl = NULL;
3493 sq->sq_nciputctrl = 0;
3496 ASSERT(qp->q_first == NULL && wqp->q_first == NULL);
3497 ASSERT(qp->q_count == 0 && wqp->q_count == 0);
3498 ASSERT(qp->q_mblkcnt == 0 && wqp->q_mblkcnt == 0);
3500 qp->q_flag &= ~QUSE;
3501 wqp->q_flag &= ~QUSE;
3503 /* NOTE: Uncomment the assert below once bugid 1159635 is fixed. */
3504 /* ASSERT((qp->q_flag & QWANTW) == 0 && (wqp->q_flag & QWANTW) == 0); */
3506 qbp = qp->q_bandp;
3507 while (qbp) {
3508 nqbp = qbp->qb_next;
3509 freeband(qbp);
3510 qbp = nqbp;
3512 qbp = wqp->q_bandp;
3513 while (qbp) {
3514 nqbp = qbp->qb_next;
3515 freeband(qbp);
3516 qbp = nqbp;
3518 kmem_cache_free(queue_cache, qp);
3522 * Allocate a qband structure.
3524 qband_t *
3525 allocband(void)
3527 qband_t *qbp;
3529 qbp = kmem_cache_alloc(qband_cache, KM_NOSLEEP);
3530 if (qbp == NULL)
3531 return (NULL);
3533 qbp->qb_next = NULL;
3534 qbp->qb_count = 0;
3535 qbp->qb_mblkcnt = 0;
3536 qbp->qb_first = NULL;
3537 qbp->qb_last = NULL;
3538 qbp->qb_flag = 0;
3540 return (qbp);
3544 * Free a qband structure.
3546 void
3547 freeband(qband_t *qbp)
3549 kmem_cache_free(qband_cache, qbp);
3553 * Just like putnextctl(9F), except that allocb_wait() is used.
3555 * Consolidation Private, and of course only callable from the stream head or
3556 * routines that may block.
3559 putnextctl_wait(queue_t *q, int type)
3561 mblk_t *bp;
3562 int error;
3564 if ((datamsg(type) && (type != M_DELAY)) ||
3565 (bp = allocb_wait(0, BPRI_HI, 0, &error)) == NULL)
3566 return (0);
3568 bp->b_datap->db_type = (unsigned char)type;
3569 putnext(q, bp);
3570 return (1);
3574 * Run any possible bufcalls.
3576 void
3577 runbufcalls(void)
3579 strbufcall_t *bcp;
3581 mutex_enter(&bcall_monitor);
3582 mutex_enter(&strbcall_lock);
3584 if (strbcalls.bc_head) {
3585 size_t count;
3586 int nevent;
3589 * count how many events are on the list
3590 * now so we can check to avoid looping
3591 * in low memory situations
3593 nevent = 0;
3594 for (bcp = strbcalls.bc_head; bcp; bcp = bcp->bc_next)
3595 nevent++;
3598 * get estimate of available memory from kmem_avail().
3599 * awake all bufcall functions waiting for
3600 * memory whose request could be satisfied
3601 * by 'count' memory and let 'em fight for it.
3603 count = kmem_avail();
3604 while ((bcp = strbcalls.bc_head) != NULL && nevent) {
3605 STRSTAT(bufcalls);
3606 --nevent;
3607 if (bcp->bc_size <= count) {
3608 bcp->bc_executor = curthread;
3609 mutex_exit(&strbcall_lock);
3610 (*bcp->bc_func)(bcp->bc_arg);
3611 mutex_enter(&strbcall_lock);
3612 bcp->bc_executor = NULL;
3613 cv_broadcast(&bcall_cv);
3614 strbcalls.bc_head = bcp->bc_next;
3615 kmem_free(bcp, sizeof (strbufcall_t));
3616 } else {
3618 * too big, try again later - note
3619 * that nevent was decremented above
3620 * so we won't retry this one on this
3621 * iteration of the loop
3623 if (bcp->bc_next != NULL) {
3624 strbcalls.bc_head = bcp->bc_next;
3625 bcp->bc_next = NULL;
3626 strbcalls.bc_tail->bc_next = bcp;
3627 strbcalls.bc_tail = bcp;
3631 if (strbcalls.bc_head == NULL)
3632 strbcalls.bc_tail = NULL;
3635 mutex_exit(&strbcall_lock);
3636 mutex_exit(&bcall_monitor);
3641 * Actually run queue's service routine.
3643 static void
3644 runservice(queue_t *q)
3646 qband_t *qbp;
3648 ASSERT(q->q_qinfo->qi_srvp);
3649 again:
3650 entersq(q->q_syncq, SQ_SVC);
3651 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_START,
3652 "runservice starts:%p", q);
3654 if (!(q->q_flag & QWCLOSE))
3655 (*q->q_qinfo->qi_srvp)(q);
3657 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_END,
3658 "runservice ends:(%p)", q);
3660 leavesq(q->q_syncq, SQ_SVC);
3662 mutex_enter(QLOCK(q));
3663 if (q->q_flag & QENAB) {
3664 q->q_flag &= ~QENAB;
3665 mutex_exit(QLOCK(q));
3666 goto again;
3668 q->q_flag &= ~QINSERVICE;
3669 q->q_flag &= ~QBACK;
3670 for (qbp = q->q_bandp; qbp; qbp = qbp->qb_next)
3671 qbp->qb_flag &= ~QB_BACK;
3673 * Wakeup thread waiting for the service procedure
3674 * to be run (strclose and qdetach).
3676 cv_broadcast(&q->q_wait);
3678 mutex_exit(QLOCK(q));
3682 * Background processing of bufcalls.
3684 void
3685 streams_bufcall_service(void)
3687 callb_cpr_t cprinfo;
3689 CALLB_CPR_INIT(&cprinfo, &strbcall_lock, callb_generic_cpr,
3690 "streams_bufcall_service");
3692 mutex_enter(&strbcall_lock);
3694 for (;;) {
3695 if (strbcalls.bc_head != NULL && kmem_avail() > 0) {
3696 mutex_exit(&strbcall_lock);
3697 runbufcalls();
3698 mutex_enter(&strbcall_lock);
3700 if (strbcalls.bc_head != NULL) {
3701 STRSTAT(bcwaits);
3702 /* Wait for memory to become available */
3703 CALLB_CPR_SAFE_BEGIN(&cprinfo);
3704 (void) cv_reltimedwait(&memavail_cv, &strbcall_lock,
3705 SEC_TO_TICK(60), TR_CLOCK_TICK);
3706 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock);
3709 /* Wait for new work to arrive */
3710 if (strbcalls.bc_head == NULL) {
3711 CALLB_CPR_SAFE_BEGIN(&cprinfo);
3712 cv_wait(&strbcall_cv, &strbcall_lock);
3713 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock);
3719 * Background processing of streams background tasks which failed
3720 * taskq_dispatch.
3722 static void
3723 streams_qbkgrnd_service(void)
3725 callb_cpr_t cprinfo;
3726 queue_t *q;
3728 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr,
3729 "streams_bkgrnd_service");
3731 mutex_enter(&service_queue);
3733 for (;;) {
3735 * Wait for work to arrive.
3737 while ((freebs_list == NULL) && (qhead == NULL)) {
3738 CALLB_CPR_SAFE_BEGIN(&cprinfo);
3739 cv_wait(&services_to_run, &service_queue);
3740 CALLB_CPR_SAFE_END(&cprinfo, &service_queue);
3743 * Handle all pending freebs requests to free memory.
3745 while (freebs_list != NULL) {
3746 mblk_t *mp = freebs_list;
3747 freebs_list = mp->b_next;
3748 mutex_exit(&service_queue);
3749 mblk_free(mp);
3750 mutex_enter(&service_queue);
3753 * Run pending queues.
3755 while (qhead != NULL) {
3756 DQ(q, qhead, qtail, q_link);
3757 ASSERT(q != NULL);
3758 mutex_exit(&service_queue);
3759 queue_service(q);
3760 mutex_enter(&service_queue);
3762 ASSERT(qhead == NULL && qtail == NULL);
3767 * Background processing of streams background tasks which failed
3768 * taskq_dispatch.
3770 static void
3771 streams_sqbkgrnd_service(void)
3773 callb_cpr_t cprinfo;
3774 syncq_t *sq;
3776 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr,
3777 "streams_sqbkgrnd_service");
3779 mutex_enter(&service_queue);
3781 for (;;) {
3783 * Wait for work to arrive.
3785 while (sqhead == NULL) {
3786 CALLB_CPR_SAFE_BEGIN(&cprinfo);
3787 cv_wait(&syncqs_to_run, &service_queue);
3788 CALLB_CPR_SAFE_END(&cprinfo, &service_queue);
3792 * Run pending syncqs.
3794 while (sqhead != NULL) {
3795 DQ(sq, sqhead, sqtail, sq_next);
3796 ASSERT(sq != NULL);
3797 ASSERT(sq->sq_svcflags & SQ_BGTHREAD);
3798 mutex_exit(&service_queue);
3799 syncq_service(sq);
3800 mutex_enter(&service_queue);
3806 * Disable the syncq and wait for background syncq processing to complete.
3807 * If the syncq is placed on the sqhead/sqtail queue, try to remove it from the
3808 * list.
3810 void
3811 wait_sq_svc(syncq_t *sq)
3813 mutex_enter(SQLOCK(sq));
3814 sq->sq_svcflags |= SQ_DISABLED;
3815 if (sq->sq_svcflags & SQ_BGTHREAD) {
3816 syncq_t *sq_chase;
3817 syncq_t *sq_curr;
3818 int removed;
3820 ASSERT(sq->sq_servcount == 1);
3821 mutex_enter(&service_queue);
3822 RMQ(sq, sqhead, sqtail, sq_next, sq_chase, sq_curr, removed);
3823 mutex_exit(&service_queue);
3824 if (removed) {
3825 sq->sq_svcflags &= ~SQ_BGTHREAD;
3826 sq->sq_servcount = 0;
3827 STRSTAT(sqremoved);
3828 goto done;
3831 while (sq->sq_servcount != 0) {
3832 sq->sq_flags |= SQ_WANTWAKEUP;
3833 cv_wait(&sq->sq_wait, SQLOCK(sq));
3835 done:
3836 mutex_exit(SQLOCK(sq));
3840 * Put a syncq on the list of syncq's to be serviced by the sqthread.
3841 * Add the argument to the end of the sqhead list and set the flag
3842 * indicating this syncq has been enabled. If it has already been
3843 * enabled, don't do anything.
3844 * This routine assumes that SQLOCK is held.
3845 * NOTE that the lock order is to have the SQLOCK first,
3846 * so if the service_syncq lock is held, we need to release it
3847 * before acquiring the SQLOCK (mostly relevant for the background
3848 * thread, and this seems to be common among the STREAMS global locks).
3849 * Note that the sq_svcflags are protected by the SQLOCK.
3851 void
3852 sqenable(syncq_t *sq)
3855 * This is probably not important except for where I believe it
3856 * is being called. At that point, it should be held (and it
3857 * is a pain to release it just for this routine, so don't do
3858 * it).
3860 ASSERT(MUTEX_HELD(SQLOCK(sq)));
3862 IMPLY(sq->sq_servcount == 0, sq->sq_next == NULL);
3863 IMPLY(sq->sq_next != NULL, sq->sq_svcflags & SQ_BGTHREAD);
3866 * Do not put on list if background thread is scheduled or
3867 * syncq is disabled.
3869 if (sq->sq_svcflags & (SQ_DISABLED | SQ_BGTHREAD))
3870 return;
3873 * Check whether we should enable sq at all.
3874 * Non PERMOD syncqs may be drained by at most one thread.
3875 * PERMOD syncqs may be drained by several threads but we limit the
3876 * total amount to the lesser of
3877 * Number of queues on the squeue and
3878 * Number of CPUs.
3880 if (sq->sq_servcount != 0) {
3881 if (((sq->sq_type & SQ_PERMOD) == 0) ||
3882 (sq->sq_servcount >= MIN(sq->sq_nqueues, ncpus_online))) {
3883 STRSTAT(sqtoomany);
3884 return;
3888 sq->sq_tstamp = ddi_get_lbolt();
3889 STRSTAT(sqenables);
3891 /* Attempt a taskq dispatch */
3892 sq->sq_servid = (void *)taskq_dispatch(streams_taskq,
3893 (task_func_t *)syncq_service, sq, TQ_NOSLEEP | TQ_NOQUEUE);
3894 if (sq->sq_servid != NULL) {
3895 sq->sq_servcount++;
3896 return;
3900 * This taskq dispatch failed, but a previous one may have succeeded.
3901 * Don't try to schedule on the background thread whilst there is
3902 * outstanding taskq processing.
3904 if (sq->sq_servcount != 0)
3905 return;
3908 * System is low on resources and can't perform a non-sleeping
3909 * dispatch. Schedule the syncq for a background thread and mark the
3910 * syncq to avoid any further taskq dispatch attempts.
3912 mutex_enter(&service_queue);
3913 STRSTAT(taskqfails);
3914 ENQUEUE(sq, sqhead, sqtail, sq_next);
3915 sq->sq_svcflags |= SQ_BGTHREAD;
3916 sq->sq_servcount = 1;
3917 cv_signal(&syncqs_to_run);
3918 mutex_exit(&service_queue);
3922 * Note: fifo_close() depends on the mblk_t on the queue being freed
3923 * asynchronously. The asynchronous freeing of messages breaks the
3924 * recursive call chain of fifo_close() while there are I_SENDFD type of
3925 * messages referring to other file pointers on the queue. Then when
3926 * closing pipes it can avoid stack overflow in case of daisy-chained
3927 * pipes, and also avoid deadlock in case of fifonode_t pairs (which
3928 * share the same fifolock_t).
3930 * No need to kpreempt_disable to access cpu_seqid. If we migrate and
3931 * the esb queue does not match the new CPU, that is OK.
3933 void
3934 freebs_enqueue(mblk_t *mp, dblk_t *dbp)
3936 int qindex = CPU->cpu_seqid >> esbq_log2_cpus_per_q;
3937 esb_queue_t *eqp;
3939 ASSERT(dbp->db_mblk == mp);
3940 ASSERT(qindex < esbq_nelem);
3942 eqp = system_esbq_array;
3943 if (eqp != NULL) {
3944 eqp += qindex;
3945 } else {
3946 mutex_enter(&esbq_lock);
3947 if (kmem_ready && system_esbq_array == NULL)
3948 system_esbq_array = (esb_queue_t *)kmem_zalloc(
3949 esbq_nelem * sizeof (esb_queue_t), KM_NOSLEEP);
3950 mutex_exit(&esbq_lock);
3951 eqp = system_esbq_array;
3952 if (eqp != NULL)
3953 eqp += qindex;
3954 else
3955 eqp = &system_esbq;
3959 * Check data sanity. The dblock should have non-empty free function.
3960 * It is better to panic here then later when the dblock is freed
3961 * asynchronously when the context is lost.
3963 if (dbp->db_frtnp->free_func == NULL) {
3964 panic("freebs_enqueue: dblock %p has a NULL free callback",
3965 (void *)dbp);
3968 mutex_enter(&eqp->eq_lock);
3969 /* queue the new mblk on the esballoc queue */
3970 if (eqp->eq_head == NULL) {
3971 eqp->eq_head = eqp->eq_tail = mp;
3972 } else {
3973 eqp->eq_tail->b_next = mp;
3974 eqp->eq_tail = mp;
3976 eqp->eq_len++;
3978 /* If we're the first thread to reach the threshold, process */
3979 if (eqp->eq_len >= esbq_max_qlen &&
3980 !(eqp->eq_flags & ESBQ_PROCESSING))
3981 esballoc_process_queue(eqp);
3983 esballoc_set_timer(eqp, esbq_timeout);
3984 mutex_exit(&eqp->eq_lock);
3987 static void
3988 esballoc_process_queue(esb_queue_t *eqp)
3990 mblk_t *mp;
3992 ASSERT(MUTEX_HELD(&eqp->eq_lock));
3994 eqp->eq_flags |= ESBQ_PROCESSING;
3996 do {
3998 * Detach the message chain for processing.
4000 mp = eqp->eq_head;
4001 eqp->eq_tail->b_next = NULL;
4002 eqp->eq_head = eqp->eq_tail = NULL;
4003 eqp->eq_len = 0;
4004 mutex_exit(&eqp->eq_lock);
4007 * Process the message chain.
4009 esballoc_enqueue_mblk(mp);
4010 mutex_enter(&eqp->eq_lock);
4011 } while ((eqp->eq_len >= esbq_max_qlen) && (eqp->eq_len > 0));
4013 eqp->eq_flags &= ~ESBQ_PROCESSING;
4017 * taskq callback routine to free esballoced mblk's
4019 static void
4020 esballoc_mblk_free(mblk_t *mp)
4022 mblk_t *nextmp;
4024 for (; mp != NULL; mp = nextmp) {
4025 nextmp = mp->b_next;
4026 mp->b_next = NULL;
4027 mblk_free(mp);
4031 static void
4032 esballoc_enqueue_mblk(mblk_t *mp)
4035 if (taskq_dispatch(system_taskq, (task_func_t *)esballoc_mblk_free, mp,
4036 TQ_NOSLEEP) == NULL) {
4037 mblk_t *first_mp = mp;
4039 * System is low on resources and can't perform a non-sleeping
4040 * dispatch. Schedule for a background thread.
4042 mutex_enter(&service_queue);
4043 STRSTAT(taskqfails);
4045 while (mp->b_next != NULL)
4046 mp = mp->b_next;
4048 mp->b_next = freebs_list;
4049 freebs_list = first_mp;
4050 cv_signal(&services_to_run);
4051 mutex_exit(&service_queue);
4055 static void
4056 esballoc_timer(void *arg)
4058 esb_queue_t *eqp = arg;
4060 mutex_enter(&eqp->eq_lock);
4061 eqp->eq_flags &= ~ESBQ_TIMER;
4063 if (!(eqp->eq_flags & ESBQ_PROCESSING) &&
4064 eqp->eq_len > 0)
4065 esballoc_process_queue(eqp);
4067 esballoc_set_timer(eqp, esbq_timeout);
4068 mutex_exit(&eqp->eq_lock);
4071 static void
4072 esballoc_set_timer(esb_queue_t *eqp, clock_t eq_timeout)
4074 ASSERT(MUTEX_HELD(&eqp->eq_lock));
4076 if (eqp->eq_len > 0 && !(eqp->eq_flags & ESBQ_TIMER)) {
4077 (void) timeout(esballoc_timer, eqp, eq_timeout);
4078 eqp->eq_flags |= ESBQ_TIMER;
4083 * Setup esbq array length based upon NCPU scaled by CPUs per
4084 * queue. Use static system_esbq until kmem_ready and we can
4085 * create an array in freebs_enqueue().
4087 void
4088 esballoc_queue_init(void)
4090 esbq_log2_cpus_per_q = highbit(esbq_cpus_per_q - 1);
4091 esbq_cpus_per_q = 1 << esbq_log2_cpus_per_q;
4092 esbq_nelem = howmany(NCPU, esbq_cpus_per_q);
4093 system_esbq.eq_len = 0;
4094 system_esbq.eq_head = system_esbq.eq_tail = NULL;
4095 system_esbq.eq_flags = 0;
4099 * Set the QBACK or QB_BACK flag in the given queue for
4100 * the given priority band.
4102 void
4103 setqback(queue_t *q, unsigned char pri)
4105 int i;
4106 qband_t *qbp;
4107 qband_t **qbpp;
4109 ASSERT(MUTEX_HELD(QLOCK(q)));
4110 if (pri != 0) {
4111 if (pri > q->q_nband) {
4112 qbpp = &q->q_bandp;
4113 while (*qbpp)
4114 qbpp = &(*qbpp)->qb_next;
4115 while (pri > q->q_nband) {
4116 if ((*qbpp = allocband()) == NULL) {
4117 cmn_err(CE_WARN,
4118 "setqback: can't allocate qband\n");
4119 return;
4121 (*qbpp)->qb_hiwat = q->q_hiwat;
4122 (*qbpp)->qb_lowat = q->q_lowat;
4123 q->q_nband++;
4124 qbpp = &(*qbpp)->qb_next;
4127 qbp = q->q_bandp;
4128 i = pri;
4129 while (--i)
4130 qbp = qbp->qb_next;
4131 qbp->qb_flag |= QB_BACK;
4132 } else {
4133 q->q_flag |= QBACK;
4138 strcopyin(void *from, void *to, size_t len, int copyflag)
4140 if (copyflag & U_TO_K) {
4141 ASSERT((copyflag & K_TO_K) == 0);
4142 if (copyin(from, to, len))
4143 return (EFAULT);
4144 } else {
4145 ASSERT(copyflag & K_TO_K);
4146 bcopy(from, to, len);
4148 return (0);
4152 strcopyout(void *from, void *to, size_t len, int copyflag)
4154 if (copyflag & U_TO_K) {
4155 if (copyout(from, to, len))
4156 return (EFAULT);
4157 } else {
4158 ASSERT(copyflag & K_TO_K);
4159 bcopy(from, to, len);
4161 return (0);
4165 * strsignal_nolock() posts a signal to the process(es) at the stream head.
4166 * It assumes that the stream head lock is already held, whereas strsignal()
4167 * acquires the lock first. This routine was created because a few callers
4168 * release the stream head lock before calling only to re-acquire it after
4169 * it returns.
4171 void
4172 strsignal_nolock(stdata_t *stp, int sig, uchar_t band)
4174 ASSERT(MUTEX_HELD(&stp->sd_lock));
4175 switch (sig) {
4176 case SIGPOLL:
4177 if (stp->sd_sigflags & S_MSG)
4178 strsendsig(stp->sd_siglist, S_MSG, band, 0);
4179 break;
4180 default:
4181 if (stp->sd_pgidp)
4182 pgsignal(stp->sd_pgidp, sig);
4183 break;
4187 void
4188 strsignal(stdata_t *stp, int sig, int32_t band)
4190 TRACE_3(TR_FAC_STREAMS_FR, TR_SENDSIG,
4191 "strsignal:%p, %X, %X", stp, sig, band);
4193 mutex_enter(&stp->sd_lock);
4194 switch (sig) {
4195 case SIGPOLL:
4196 if (stp->sd_sigflags & S_MSG)
4197 strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0);
4198 break;
4200 default:
4201 if (stp->sd_pgidp) {
4202 pgsignal(stp->sd_pgidp, sig);
4204 break;
4206 mutex_exit(&stp->sd_lock);
4209 void
4210 strhup(stdata_t *stp)
4212 ASSERT(mutex_owned(&stp->sd_lock));
4213 pollwakeup(&stp->sd_pollist, POLLHUP);
4214 if (stp->sd_sigflags & S_HANGUP)
4215 strsendsig(stp->sd_siglist, S_HANGUP, 0, 0);
4219 * Backenable the first queue upstream from `q' with a service procedure.
4221 void
4222 backenable(queue_t *q, uchar_t pri)
4224 queue_t *nq;
4227 * Our presence might not prevent other modules in our own
4228 * stream from popping/pushing since the caller of getq might not
4229 * have a claim on the queue (some drivers do a getq on somebody
4230 * else's queue - they know that the queue itself is not going away
4231 * but the framework has to guarantee q_next in that stream).
4233 claimstr(q);
4235 /* Find nearest back queue with service proc */
4236 for (nq = backq(q); nq && !nq->q_qinfo->qi_srvp; nq = backq(nq)) {
4237 ASSERT(STRMATED(q->q_stream) || STREAM(q) == STREAM(nq));
4240 if (nq) {
4241 kthread_t *freezer;
4243 * backenable can be called either with no locks held
4244 * or with the stream frozen (the latter occurs when a module
4245 * calls rmvq with the stream frozen). If the stream is frozen
4246 * by the caller the caller will hold all qlocks in the stream.
4247 * Note that a frozen stream doesn't freeze a mated stream,
4248 * so we explicitly check for that.
4250 freezer = STREAM(q)->sd_freezer;
4251 if (freezer != curthread || STREAM(q) != STREAM(nq)) {
4252 mutex_enter(QLOCK(nq));
4254 #ifdef DEBUG
4255 else {
4256 ASSERT(frozenstr(q));
4257 ASSERT(MUTEX_HELD(QLOCK(q)));
4258 ASSERT(MUTEX_HELD(QLOCK(nq)));
4260 #endif
4261 setqback(nq, pri);
4262 qenable_locked(nq);
4263 if (freezer != curthread || STREAM(q) != STREAM(nq))
4264 mutex_exit(QLOCK(nq));
4266 releasestr(q);
4270 * Return the appropriate errno when one of flags_to_check is set
4271 * in sd_flags. Uses the exported error routines if they are set.
4272 * Will return 0 if non error is set (or if the exported error routines
4273 * do not return an error).
4275 * If there is both a read and write error to check, we prefer the read error.
4276 * Also, give preference to recorded errno's over the error functions.
4277 * The flags that are handled are:
4278 * STPLEX return EINVAL
4279 * STRDERR return sd_rerror (and clear if STRDERRNONPERSIST)
4280 * STWRERR return sd_werror (and clear if STWRERRNONPERSIST)
4281 * STRHUP return sd_werror
4283 * If the caller indicates that the operation is a peek, a nonpersistent error
4284 * is not cleared.
4287 strgeterr(stdata_t *stp, int32_t flags_to_check, int ispeek)
4289 int32_t sd_flag = stp->sd_flag & flags_to_check;
4290 int error = 0;
4292 ASSERT(MUTEX_HELD(&stp->sd_lock));
4293 ASSERT((flags_to_check & ~(STRDERR|STWRERR|STRHUP|STPLEX)) == 0);
4294 if (sd_flag & STPLEX)
4295 error = EINVAL;
4296 else if (sd_flag & STRDERR) {
4297 error = stp->sd_rerror;
4298 if ((stp->sd_flag & STRDERRNONPERSIST) && !ispeek) {
4300 * Read errors are non-persistent i.e. discarded once
4301 * returned to a non-peeking caller,
4303 stp->sd_rerror = 0;
4304 stp->sd_flag &= ~STRDERR;
4306 if (error == 0 && stp->sd_rderrfunc != NULL) {
4307 int clearerr = 0;
4309 error = (*stp->sd_rderrfunc)(stp->sd_vnode, ispeek,
4310 &clearerr);
4311 if (clearerr) {
4312 stp->sd_flag &= ~STRDERR;
4313 stp->sd_rderrfunc = NULL;
4316 } else if (sd_flag & STWRERR) {
4317 error = stp->sd_werror;
4318 if ((stp->sd_flag & STWRERRNONPERSIST) && !ispeek) {
4320 * Write errors are non-persistent i.e. discarded once
4321 * returned to a non-peeking caller,
4323 stp->sd_werror = 0;
4324 stp->sd_flag &= ~STWRERR;
4326 if (error == 0 && stp->sd_wrerrfunc != NULL) {
4327 int clearerr = 0;
4329 error = (*stp->sd_wrerrfunc)(stp->sd_vnode, ispeek,
4330 &clearerr);
4331 if (clearerr) {
4332 stp->sd_flag &= ~STWRERR;
4333 stp->sd_wrerrfunc = NULL;
4336 } else if (sd_flag & STRHUP) {
4337 /* sd_werror set when STRHUP */
4338 error = stp->sd_werror;
4340 return (error);
4345 * Single-thread open/close/push/pop
4346 * for twisted streams also
4349 strstartplumb(stdata_t *stp, int flag, int cmd)
4351 int waited = 1;
4352 int error = 0;
4354 if (STRMATED(stp)) {
4355 struct stdata *stmatep = stp->sd_mate;
4357 STRLOCKMATES(stp);
4358 while (waited) {
4359 waited = 0;
4360 while (stmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
4361 if ((cmd == I_POP) &&
4362 (flag & (FNDELAY|FNONBLOCK))) {
4363 STRUNLOCKMATES(stp);
4364 return (EAGAIN);
4366 waited = 1;
4367 mutex_exit(&stp->sd_lock);
4368 if (!cv_wait_sig(&stmatep->sd_monitor,
4369 &stmatep->sd_lock)) {
4370 mutex_exit(&stmatep->sd_lock);
4371 return (EINTR);
4373 mutex_exit(&stmatep->sd_lock);
4374 STRLOCKMATES(stp);
4376 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
4377 if ((cmd == I_POP) &&
4378 (flag & (FNDELAY|FNONBLOCK))) {
4379 STRUNLOCKMATES(stp);
4380 return (EAGAIN);
4382 waited = 1;
4383 mutex_exit(&stmatep->sd_lock);
4384 if (!cv_wait_sig(&stp->sd_monitor,
4385 &stp->sd_lock)) {
4386 mutex_exit(&stp->sd_lock);
4387 return (EINTR);
4389 mutex_exit(&stp->sd_lock);
4390 STRLOCKMATES(stp);
4392 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) {
4393 error = strgeterr(stp,
4394 STRDERR|STWRERR|STRHUP|STPLEX, 0);
4395 if (error != 0) {
4396 STRUNLOCKMATES(stp);
4397 return (error);
4401 stp->sd_flag |= STRPLUMB;
4402 STRUNLOCKMATES(stp);
4403 } else {
4404 mutex_enter(&stp->sd_lock);
4405 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
4406 if (((cmd == I_POP) || (cmd == _I_REMOVE)) &&
4407 (flag & (FNDELAY|FNONBLOCK))) {
4408 mutex_exit(&stp->sd_lock);
4409 return (EAGAIN);
4411 if (!cv_wait_sig(&stp->sd_monitor, &stp->sd_lock)) {
4412 mutex_exit(&stp->sd_lock);
4413 return (EINTR);
4415 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) {
4416 error = strgeterr(stp,
4417 STRDERR|STWRERR|STRHUP|STPLEX, 0);
4418 if (error != 0) {
4419 mutex_exit(&stp->sd_lock);
4420 return (error);
4424 stp->sd_flag |= STRPLUMB;
4425 mutex_exit(&stp->sd_lock);
4427 return (0);
4431 * Complete the plumbing operation associated with stream `stp'.
4433 void
4434 strendplumb(stdata_t *stp)
4436 ASSERT(MUTEX_HELD(&stp->sd_lock));
4437 ASSERT(stp->sd_flag & STRPLUMB);
4438 stp->sd_flag &= ~STRPLUMB;
4439 cv_broadcast(&stp->sd_monitor);
4443 * This describes how the STREAMS framework handles synchronization
4444 * during open/push and close/pop.
4445 * The key interfaces for open and close are qprocson and qprocsoff,
4446 * respectively. While the close case in general is harder both open
4447 * have close have significant similarities.
4449 * During close the STREAMS framework has to both ensure that there
4450 * are no stale references to the queue pair (and syncq) that
4451 * are being closed and also provide the guarantees that are documented
4452 * in qprocsoff(9F).
4453 * If there are stale references to the queue that is closing it can
4454 * result in kernel memory corruption or kernel panics.
4456 * Note that is it up to the module/driver to ensure that it itself
4457 * does not have any stale references to the closing queues once its close
4458 * routine returns. This includes:
4459 * - Cancelling any timeout/bufcall/qtimeout/qbufcall callback routines
4460 * associated with the queues. For timeout and bufcall callbacks the
4461 * module/driver also has to ensure (or wait for) any callbacks that
4462 * are in progress.
4463 * - If the module/driver is using esballoc it has to ensure that any
4464 * esballoc free functions do not refer to a queue that has closed.
4465 * (Note that in general the close routine can not wait for the esballoc'ed
4466 * messages to be freed since that can cause a deadlock.)
4467 * - Cancelling any interrupts that refer to the closing queues and
4468 * also ensuring that there are no interrupts in progress that will
4469 * refer to the closing queues once the close routine returns.
4470 * - For multiplexors removing any driver global state that refers to
4471 * the closing queue and also ensuring that there are no threads in
4472 * the multiplexor that has picked up a queue pointer but not yet
4473 * finished using it.
4475 * In addition, a driver/module can only reference the q_next pointer
4476 * in its open, close, put, or service procedures or in a
4477 * qtimeout/qbufcall callback procedure executing "on" the correct
4478 * stream. Thus it can not reference the q_next pointer in an interrupt
4479 * routine or a timeout, bufcall or esballoc callback routine. Likewise
4480 * it can not reference q_next of a different queue e.g. in a mux that
4481 * passes messages from one queues put/service procedure to another queue.
4482 * In all the cases when the driver/module can not access the q_next
4483 * field it must use the *next* versions e.g. canputnext instead of
4484 * canput(q->q_next) and putnextctl instead of putctl(q->q_next, ...).
4487 * Assuming that the driver/module conforms to the above constraints
4488 * the STREAMS framework has to avoid stale references to q_next for all
4489 * the framework internal cases which include (but are not limited to):
4490 * - Threads in canput/canputnext/backenable and elsewhere that are
4491 * walking q_next.
4492 * - Messages on a syncq that have a reference to the queue through b_queue.
4493 * - Messages on an outer perimeter (syncq) that have a reference to the
4494 * queue through b_queue.
4495 * - Threads that use q_nfsrv (e.g. canput) to find a queue.
4496 * Note that only canput and bcanput use q_nfsrv without any locking.
4498 * The STREAMS framework providing the qprocsoff(9F) guarantees means that
4499 * after qprocsoff returns, the framework has to ensure that no threads can
4500 * enter the put or service routines for the closing read or write-side queue.
4501 * In addition to preventing "direct" entry into the put procedures
4502 * the framework also has to prevent messages being drained from
4503 * the syncq or the outer perimeter.
4504 * XXX Note that currently qdetach does relies on D_MTOCEXCL as the only
4505 * mechanism to prevent qwriter(PERIM_OUTER) from running after
4506 * qprocsoff has returned.
4507 * Note that if a module/driver uses put(9F) on one of its own queues
4508 * it is up to the module/driver to ensure that the put() doesn't
4509 * get called when the queue is closing.
4512 * The framework aspects of the above "contract" is implemented by
4513 * qprocsoff, removeq, and strlock:
4514 * - qprocsoff (disable_svc) sets QWCLOSE to prevent runservice from
4515 * entering the service procedures.
4516 * - strlock acquires the sd_lock and sd_reflock to prevent putnext,
4517 * canputnext, backenable etc from dereferencing the q_next that will
4518 * soon change.
4519 * - strlock waits for sd_refcnt to be zero to wait for e.g. any canputnext
4520 * or other q_next walker that uses claimstr/releasestr to finish.
4521 * - optionally for every syncq in the stream strlock acquires all the
4522 * sq_lock's and waits for all sq_counts to drop to a value that indicates
4523 * that no thread executes in the put or service procedures and that no
4524 * thread is draining into the module/driver. This ensures that no
4525 * open, close, put, service, or qtimeout/qbufcall callback procedure is
4526 * currently executing hence no such thread can end up with the old stale
4527 * q_next value and no canput/backenable can have the old stale
4528 * q_nfsrv/q_next.
4529 * - qdetach (wait_svc) makes sure that any scheduled or running threads
4530 * have either finished or observed the QWCLOSE flag and gone away.
4535 * Get all the locks necessary to change q_next.
4537 * Wait for sd_refcnt to reach 0 and, if sqlist is present, wait for the
4538 * sq_count of each syncq in the list to drop to sq_rmqcount, indicating that
4539 * the only threads inside the syncq are threads currently calling removeq().
4540 * Since threads calling removeq() are in the process of removing their queues
4541 * from the stream, we do not need to worry about them accessing a stale q_next
4542 * pointer and thus we do not need to wait for them to exit (in fact, waiting
4543 * for them can cause deadlock).
4545 * This routine is subject to starvation since it does not set any flag to
4546 * prevent threads from entering a module in the stream (i.e. sq_count can
4547 * increase on some syncq while it is waiting on some other syncq).
4549 * Assumes that only one thread attempts to call strlock for a given
4550 * stream. If this is not the case the two threads would deadlock.
4551 * This assumption is guaranteed since strlock is only called by insertq
4552 * and removeq and streams plumbing changes are single-threaded for
4553 * a given stream using the STWOPEN, STRCLOSE, and STRPLUMB flags.
4555 * For pipes, it is not difficult to atomically designate a pair of streams
4556 * to be mated. Once mated atomically by the framework the twisted pair remain
4557 * configured that way until dismantled atomically by the framework.
4558 * When plumbing takes place on a twisted stream it is necessary to ensure that
4559 * this operation is done exclusively on the twisted stream since two such
4560 * operations, each initiated on different ends of the pipe will deadlock
4561 * waiting for each other to complete.
4563 * On entry, no locks should be held.
4564 * The locks acquired and held by strlock depends on a few factors.
4565 * - If sqlist is non-NULL all the syncq locks in the sqlist will be acquired
4566 * and held on exit and all sq_count are at an acceptable level.
4567 * - In all cases, sd_lock and sd_reflock are acquired and held on exit with
4568 * sd_refcnt being zero.
4571 static void
4572 strlock(struct stdata *stp, sqlist_t *sqlist)
4574 syncql_t *sql, *sql2;
4575 retry:
4577 * Wait for any claimstr to go away.
4579 if (STRMATED(stp)) {
4580 struct stdata *stp1, *stp2;
4582 STRLOCKMATES(stp);
4584 * Note that the selection of locking order is not
4585 * important, just that they are always acquired in
4586 * the same order. To assure this, we choose this
4587 * order based on the value of the pointer, and since
4588 * the pointer will not change for the life of this
4589 * pair, we will always grab the locks in the same
4590 * order (and hence, prevent deadlocks).
4592 if (&(stp->sd_lock) > &((stp->sd_mate)->sd_lock)) {
4593 stp1 = stp;
4594 stp2 = stp->sd_mate;
4595 } else {
4596 stp2 = stp;
4597 stp1 = stp->sd_mate;
4599 mutex_enter(&stp1->sd_reflock);
4600 if (stp1->sd_refcnt > 0) {
4601 STRUNLOCKMATES(stp);
4602 cv_wait(&stp1->sd_refmonitor, &stp1->sd_reflock);
4603 mutex_exit(&stp1->sd_reflock);
4604 goto retry;
4606 mutex_enter(&stp2->sd_reflock);
4607 if (stp2->sd_refcnt > 0) {
4608 STRUNLOCKMATES(stp);
4609 mutex_exit(&stp1->sd_reflock);
4610 cv_wait(&stp2->sd_refmonitor, &stp2->sd_reflock);
4611 mutex_exit(&stp2->sd_reflock);
4612 goto retry;
4614 STREAM_PUTLOCKS_ENTER(stp1);
4615 STREAM_PUTLOCKS_ENTER(stp2);
4616 } else {
4617 mutex_enter(&stp->sd_lock);
4618 mutex_enter(&stp->sd_reflock);
4619 while (stp->sd_refcnt > 0) {
4620 mutex_exit(&stp->sd_lock);
4621 cv_wait(&stp->sd_refmonitor, &stp->sd_reflock);
4622 if (mutex_tryenter(&stp->sd_lock) == 0) {
4623 mutex_exit(&stp->sd_reflock);
4624 mutex_enter(&stp->sd_lock);
4625 mutex_enter(&stp->sd_reflock);
4628 STREAM_PUTLOCKS_ENTER(stp);
4631 if (sqlist == NULL)
4632 return;
4634 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) {
4635 syncq_t *sq = sql->sql_sq;
4636 uint16_t count;
4638 mutex_enter(SQLOCK(sq));
4639 count = sq->sq_count;
4640 ASSERT(sq->sq_rmqcount <= count);
4641 SQ_PUTLOCKS_ENTER(sq);
4642 SUM_SQ_PUTCOUNTS(sq, count);
4643 if (count == sq->sq_rmqcount)
4644 continue;
4646 /* Failed - drop all locks that we have acquired so far */
4647 if (STRMATED(stp)) {
4648 STREAM_PUTLOCKS_EXIT(stp);
4649 STREAM_PUTLOCKS_EXIT(stp->sd_mate);
4650 STRUNLOCKMATES(stp);
4651 mutex_exit(&stp->sd_reflock);
4652 mutex_exit(&stp->sd_mate->sd_reflock);
4653 } else {
4654 STREAM_PUTLOCKS_EXIT(stp);
4655 mutex_exit(&stp->sd_lock);
4656 mutex_exit(&stp->sd_reflock);
4658 for (sql2 = sqlist->sqlist_head; sql2 != sql;
4659 sql2 = sql2->sql_next) {
4660 SQ_PUTLOCKS_EXIT(sql2->sql_sq);
4661 mutex_exit(SQLOCK(sql2->sql_sq));
4665 * The wait loop below may starve when there are many threads
4666 * claiming the syncq. This is especially a problem with permod
4667 * syncqs (IP). To lessen the impact of the problem we increment
4668 * sq_needexcl and clear fastbits so that putnexts will slow
4669 * down and call sqenable instead of draining right away.
4671 sq->sq_needexcl++;
4672 SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
4673 while (count > sq->sq_rmqcount) {
4674 sq->sq_flags |= SQ_WANTWAKEUP;
4675 SQ_PUTLOCKS_EXIT(sq);
4676 cv_wait(&sq->sq_wait, SQLOCK(sq));
4677 count = sq->sq_count;
4678 SQ_PUTLOCKS_ENTER(sq);
4679 SUM_SQ_PUTCOUNTS(sq, count);
4681 sq->sq_needexcl--;
4682 if (sq->sq_needexcl == 0)
4683 SQ_PUTCOUNT_SETFAST_LOCKED(sq);
4684 SQ_PUTLOCKS_EXIT(sq);
4685 ASSERT(count == sq->sq_rmqcount);
4686 mutex_exit(SQLOCK(sq));
4687 goto retry;
4692 * Drop all the locks that strlock acquired.
4694 static void
4695 strunlock(struct stdata *stp, sqlist_t *sqlist)
4697 syncql_t *sql;
4699 if (STRMATED(stp)) {
4700 STREAM_PUTLOCKS_EXIT(stp);
4701 STREAM_PUTLOCKS_EXIT(stp->sd_mate);
4702 STRUNLOCKMATES(stp);
4703 mutex_exit(&stp->sd_reflock);
4704 mutex_exit(&stp->sd_mate->sd_reflock);
4705 } else {
4706 STREAM_PUTLOCKS_EXIT(stp);
4707 mutex_exit(&stp->sd_lock);
4708 mutex_exit(&stp->sd_reflock);
4711 if (sqlist == NULL)
4712 return;
4714 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) {
4715 SQ_PUTLOCKS_EXIT(sql->sql_sq);
4716 mutex_exit(SQLOCK(sql->sql_sq));
4721 * When the module has service procedure, we need check if the next
4722 * module which has service procedure is in flow control to trigger
4723 * the backenable.
4725 static void
4726 backenable_insertedq(queue_t *q)
4728 qband_t *qbp;
4730 claimstr(q);
4731 if (q->q_qinfo->qi_srvp != NULL && q->q_next != NULL) {
4732 if (q->q_next->q_nfsrv->q_flag & QWANTW)
4733 backenable(q, 0);
4735 qbp = q->q_next->q_nfsrv->q_bandp;
4736 for (; qbp != NULL; qbp = qbp->qb_next)
4737 if ((qbp->qb_flag & QB_WANTW) && qbp->qb_first != NULL)
4738 backenable(q, qbp->qb_first->b_band);
4740 releasestr(q);
4744 * Given two read queues, insert a new single one after another.
4746 * This routine acquires all the necessary locks in order to change
4747 * q_next and related pointer using strlock().
4748 * It depends on the stream head ensuring that there are no concurrent
4749 * insertq or removeq on the same stream. The stream head ensures this
4750 * using the flags STWOPEN, STRCLOSE, and STRPLUMB.
4752 * Note that no syncq locks are held during the q_next change. This is
4753 * applied to all streams since, unlike removeq, there is no problem of stale
4754 * pointers when adding a module to the stream. Thus drivers/modules that do a
4755 * canput(rq->q_next) would never get a closed/freed queue pointer even if we
4756 * applied this optimization to all streams.
4758 void
4759 insertq(struct stdata *stp, queue_t *new)
4761 queue_t *after;
4762 queue_t *wafter;
4763 queue_t *wnew = _WR(new);
4764 boolean_t have_fifo = B_FALSE;
4766 if (new->q_flag & _QINSERTING) {
4767 ASSERT(stp->sd_vnode->v_type != VFIFO);
4768 after = new->q_next;
4769 wafter = _WR(new->q_next);
4770 } else {
4771 after = _RD(stp->sd_wrq);
4772 wafter = stp->sd_wrq;
4775 TRACE_2(TR_FAC_STREAMS_FR, TR_INSERTQ,
4776 "insertq:%p, %p", after, new);
4777 ASSERT(after->q_flag & QREADR);
4778 ASSERT(new->q_flag & QREADR);
4780 strlock(stp, NULL);
4782 /* Do we have a FIFO? */
4783 if (wafter->q_next == after) {
4784 have_fifo = B_TRUE;
4785 wnew->q_next = new;
4786 } else {
4787 wnew->q_next = wafter->q_next;
4789 new->q_next = after;
4791 set_nfsrv_ptr(new, wnew, after, wafter);
4793 * set_nfsrv_ptr() needs to know if this is an insertion or not,
4794 * so only reset this flag after calling it.
4796 new->q_flag &= ~_QINSERTING;
4798 if (have_fifo) {
4799 wafter->q_next = wnew;
4800 } else {
4801 if (wafter->q_next)
4802 _OTHERQ(wafter->q_next)->q_next = new;
4803 wafter->q_next = wnew;
4806 set_qend(new);
4807 /* The QEND flag might have to be updated for the upstream guy */
4808 set_qend(after);
4810 ASSERT(_SAMESTR(new) == O_SAMESTR(new));
4811 ASSERT(_SAMESTR(wnew) == O_SAMESTR(wnew));
4812 ASSERT(_SAMESTR(after) == O_SAMESTR(after));
4813 ASSERT(_SAMESTR(wafter) == O_SAMESTR(wafter));
4814 strsetuio(stp);
4817 * If this was a module insertion, bump the push count.
4819 if (!(new->q_flag & QISDRV))
4820 stp->sd_pushcnt++;
4822 strunlock(stp, NULL);
4824 /* check if the write Q needs backenable */
4825 backenable_insertedq(wnew);
4827 /* check if the read Q needs backenable */
4828 backenable_insertedq(new);
4832 * Given a read queue, unlink it from any neighbors.
4834 * This routine acquires all the necessary locks in order to
4835 * change q_next and related pointers and also guard against
4836 * stale references (e.g. through q_next) to the queue that
4837 * is being removed. It also plays part of the role in ensuring
4838 * that the module's/driver's put procedure doesn't get called
4839 * after qprocsoff returns.
4841 * Removeq depends on the stream head ensuring that there are
4842 * no concurrent insertq or removeq on the same stream. The
4843 * stream head ensures this using the flags STWOPEN, STRCLOSE and
4844 * STRPLUMB.
4846 * The set of locks needed to remove the queue is different in
4847 * different cases:
4849 * Acquire sd_lock, sd_reflock, and all the syncq locks in the stream after
4850 * waiting for the syncq reference count to drop to 0 indicating that no
4851 * non-close threads are present anywhere in the stream. This ensures that any
4852 * module/driver can reference q_next in its open, close, put, or service
4853 * procedures.
4855 * The sq_rmqcount counter tracks the number of threads inside removeq().
4856 * strlock() ensures that there is either no threads executing inside perimeter
4857 * or there is only a thread calling qprocsoff().
4859 * strlock() compares the value of sq_count with the number of threads inside
4860 * removeq() and waits until sq_count is equal to sq_rmqcount. We need to wakeup
4861 * any threads waiting in strlock() when the sq_rmqcount increases.
4864 void
4865 removeq(queue_t *qp)
4867 queue_t *wqp = _WR(qp);
4868 struct stdata *stp = STREAM(qp);
4869 sqlist_t *sqlist = NULL;
4870 boolean_t isdriver;
4871 int moved;
4872 syncq_t *sq = qp->q_syncq;
4873 syncq_t *wsq = wqp->q_syncq;
4875 ASSERT(stp);
4877 TRACE_2(TR_FAC_STREAMS_FR, TR_REMOVEQ,
4878 "removeq:%p %p", qp, wqp);
4879 ASSERT(qp->q_flag&QREADR);
4882 * For queues using Synchronous streams, we must wait for all threads in
4883 * rwnext() to drain out before proceeding.
4885 if (qp->q_flag & QSYNCSTR) {
4886 /* First, we need wakeup any threads blocked in rwnext() */
4887 mutex_enter(SQLOCK(sq));
4888 if (sq->sq_flags & SQ_WANTWAKEUP) {
4889 sq->sq_flags &= ~SQ_WANTWAKEUP;
4890 cv_broadcast(&sq->sq_wait);
4892 mutex_exit(SQLOCK(sq));
4894 if (wsq != sq) {
4895 mutex_enter(SQLOCK(wsq));
4896 if (wsq->sq_flags & SQ_WANTWAKEUP) {
4897 wsq->sq_flags &= ~SQ_WANTWAKEUP;
4898 cv_broadcast(&wsq->sq_wait);
4900 mutex_exit(SQLOCK(wsq));
4903 mutex_enter(QLOCK(qp));
4904 while (qp->q_rwcnt > 0) {
4905 qp->q_flag |= QWANTRMQSYNC;
4906 cv_wait(&qp->q_wait, QLOCK(qp));
4908 mutex_exit(QLOCK(qp));
4910 mutex_enter(QLOCK(wqp));
4911 while (wqp->q_rwcnt > 0) {
4912 wqp->q_flag |= QWANTRMQSYNC;
4913 cv_wait(&wqp->q_wait, QLOCK(wqp));
4915 mutex_exit(QLOCK(wqp));
4918 mutex_enter(SQLOCK(sq));
4919 sq->sq_rmqcount++;
4920 if (sq->sq_flags & SQ_WANTWAKEUP) {
4921 sq->sq_flags &= ~SQ_WANTWAKEUP;
4922 cv_broadcast(&sq->sq_wait);
4924 mutex_exit(SQLOCK(sq));
4926 isdriver = (qp->q_flag & QISDRV);
4928 sqlist = sqlist_build(qp, stp, STRMATED(stp));
4929 strlock(stp, sqlist);
4931 reset_nfsrv_ptr(qp, wqp);
4933 ASSERT(wqp->q_next == NULL || backq(qp)->q_next == qp);
4934 ASSERT(qp->q_next == NULL || backq(wqp)->q_next == wqp);
4935 /* Do we have a FIFO? */
4936 if (wqp->q_next == qp) {
4937 stp->sd_wrq->q_next = _RD(stp->sd_wrq);
4938 } else {
4939 if (wqp->q_next)
4940 backq(qp)->q_next = qp->q_next;
4941 if (qp->q_next)
4942 backq(wqp)->q_next = wqp->q_next;
4945 /* The QEND flag might have to be updated for the upstream guy */
4946 if (qp->q_next)
4947 set_qend(qp->q_next);
4949 ASSERT(_SAMESTR(stp->sd_wrq) == O_SAMESTR(stp->sd_wrq));
4950 ASSERT(_SAMESTR(_RD(stp->sd_wrq)) == O_SAMESTR(_RD(stp->sd_wrq)));
4953 * Move any messages destined for the put procedures to the next
4954 * syncq in line. Otherwise free them.
4956 moved = 0;
4958 * Quick check to see whether there are any messages or events.
4960 if (qp->q_syncqmsgs != 0 || (qp->q_syncq->sq_flags & SQ_EVENTS))
4961 moved += propagate_syncq(qp);
4962 if (wqp->q_syncqmsgs != 0 ||
4963 (wqp->q_syncq->sq_flags & SQ_EVENTS))
4964 moved += propagate_syncq(wqp);
4966 strsetuio(stp);
4969 * If this was a module removal, decrement the push count.
4971 if (!isdriver)
4972 stp->sd_pushcnt--;
4974 strunlock(stp, sqlist);
4975 sqlist_free(sqlist);
4978 * Make sure any messages that were propagated are drained.
4979 * Also clear any QFULL bit caused by messages that were propagated.
4982 if (qp->q_next != NULL) {
4983 clr_qfull(qp);
4985 * For the driver calling qprocsoff, propagate_syncq
4986 * frees all the messages instead of putting it in
4987 * the stream head
4989 if (!isdriver && (moved > 0))
4990 emptysq(qp->q_next->q_syncq);
4992 if (wqp->q_next != NULL) {
4993 clr_qfull(wqp);
4995 * We come here for any pop of a module except for the
4996 * case of driver being removed. We don't call emptysq
4997 * if we did not move any messages. This will avoid holding
4998 * PERMOD syncq locks in emptysq
5000 if (moved > 0)
5001 emptysq(wqp->q_next->q_syncq);
5004 mutex_enter(SQLOCK(sq));
5005 sq->sq_rmqcount--;
5006 mutex_exit(SQLOCK(sq));
5010 * Prevent further entry by setting a flag (like SQ_FROZEN, SQ_BLOCKED or
5011 * SQ_WRITER) on a syncq.
5012 * If maxcnt is not -1 it assumes that caller has "maxcnt" claim(s) on the
5013 * sync queue and waits until sq_count reaches maxcnt.
5015 * If maxcnt is -1 there's no need to grab sq_putlocks since the caller
5016 * does not care about putnext threads that are in the middle of calling put
5017 * entry points.
5019 * This routine is used for both inner and outer syncqs.
5021 static void
5022 blocksq(syncq_t *sq, ushort_t flag, int maxcnt)
5024 uint16_t count = 0;
5026 mutex_enter(SQLOCK(sq));
5028 * Wait for SQ_FROZEN/SQ_BLOCKED to be reset.
5029 * SQ_FROZEN will be set if there is a frozen stream that has a
5030 * queue which also refers to this "shared" syncq.
5031 * SQ_BLOCKED will be set if there is "off" queue which also
5032 * refers to this "shared" syncq.
5034 if (maxcnt != -1) {
5035 count = sq->sq_count;
5036 SQ_PUTLOCKS_ENTER(sq);
5037 SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
5038 SUM_SQ_PUTCOUNTS(sq, count);
5040 sq->sq_needexcl++;
5041 ASSERT(sq->sq_needexcl != 0); /* wraparound */
5043 while ((sq->sq_flags & flag) ||
5044 (maxcnt != -1 && count > (unsigned)maxcnt)) {
5045 sq->sq_flags |= SQ_WANTWAKEUP;
5046 if (maxcnt != -1) {
5047 SQ_PUTLOCKS_EXIT(sq);
5049 cv_wait(&sq->sq_wait, SQLOCK(sq));
5050 if (maxcnt != -1) {
5051 count = sq->sq_count;
5052 SQ_PUTLOCKS_ENTER(sq);
5053 SUM_SQ_PUTCOUNTS(sq, count);
5056 sq->sq_needexcl--;
5057 sq->sq_flags |= flag;
5058 ASSERT(maxcnt == -1 || count == maxcnt);
5059 if (maxcnt != -1) {
5060 if (sq->sq_needexcl == 0) {
5061 SQ_PUTCOUNT_SETFAST_LOCKED(sq);
5063 SQ_PUTLOCKS_EXIT(sq);
5064 } else if (sq->sq_needexcl == 0) {
5065 SQ_PUTCOUNT_SETFAST(sq);
5068 mutex_exit(SQLOCK(sq));
5072 * Reset a flag that was set with blocksq.
5074 * Can not use this routine to reset SQ_WRITER.
5076 * If "isouter" is set then the syncq is assumed to be an outer perimeter
5077 * and drain_syncq is not called. Instead we rely on the qwriter_outer thread
5078 * to handle the queued qwriter operations.
5080 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5081 * sq_putlocks are used.
5083 static void
5084 unblocksq(syncq_t *sq, uint16_t resetflag, int isouter)
5086 uint16_t flags;
5088 mutex_enter(SQLOCK(sq));
5089 ASSERT(resetflag != SQ_WRITER);
5090 ASSERT(sq->sq_flags & resetflag);
5091 flags = sq->sq_flags & ~resetflag;
5092 sq->sq_flags = flags;
5093 if (flags & (SQ_QUEUED | SQ_WANTWAKEUP)) {
5094 if (flags & SQ_WANTWAKEUP) {
5095 flags &= ~SQ_WANTWAKEUP;
5096 cv_broadcast(&sq->sq_wait);
5098 sq->sq_flags = flags;
5099 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) {
5100 if (!isouter) {
5101 /* drain_syncq drops SQLOCK */
5102 drain_syncq(sq);
5103 return;
5107 mutex_exit(SQLOCK(sq));
5111 * Reset a flag that was set with blocksq.
5112 * Does not drain the syncq. Use emptysq() for that.
5113 * Returns 1 if SQ_QUEUED is set. Otherwise 0.
5115 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5116 * sq_putlocks are used.
5118 static int
5119 dropsq(syncq_t *sq, uint16_t resetflag)
5121 uint16_t flags;
5123 mutex_enter(SQLOCK(sq));
5124 ASSERT(sq->sq_flags & resetflag);
5125 flags = sq->sq_flags & ~resetflag;
5126 if (flags & SQ_WANTWAKEUP) {
5127 flags &= ~SQ_WANTWAKEUP;
5128 cv_broadcast(&sq->sq_wait);
5130 sq->sq_flags = flags;
5131 mutex_exit(SQLOCK(sq));
5132 if (flags & SQ_QUEUED)
5133 return (1);
5134 return (0);
5138 * Empty all the messages on a syncq.
5140 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5141 * sq_putlocks are used.
5143 static void
5144 emptysq(syncq_t *sq)
5146 uint16_t flags;
5148 mutex_enter(SQLOCK(sq));
5149 flags = sq->sq_flags;
5150 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) {
5152 * To prevent potential recursive invocation of drain_syncq we
5153 * do not call drain_syncq if count is non-zero.
5155 if (sq->sq_count == 0) {
5156 /* drain_syncq() drops SQLOCK */
5157 drain_syncq(sq);
5158 return;
5159 } else
5160 sqenable(sq);
5162 mutex_exit(SQLOCK(sq));
5166 * Ordered insert while removing duplicates.
5168 static void
5169 sqlist_insert(sqlist_t *sqlist, syncq_t *sqp)
5171 syncql_t *sqlp, **prev_sqlpp, *new_sqlp;
5173 prev_sqlpp = &sqlist->sqlist_head;
5174 while ((sqlp = *prev_sqlpp) != NULL) {
5175 if (sqlp->sql_sq >= sqp) {
5176 if (sqlp->sql_sq == sqp) /* duplicate */
5177 return;
5178 break;
5180 prev_sqlpp = &sqlp->sql_next;
5182 new_sqlp = &sqlist->sqlist_array[sqlist->sqlist_index++];
5183 ASSERT((char *)new_sqlp < (char *)sqlist + sqlist->sqlist_size);
5184 new_sqlp->sql_next = sqlp;
5185 new_sqlp->sql_sq = sqp;
5186 *prev_sqlpp = new_sqlp;
5190 * Walk the write side queues until we hit either the driver
5191 * or a twist in the stream (_SAMESTR will return false in both
5192 * these cases) then turn around and walk the read side queues
5193 * back up to the stream head.
5195 static void
5196 sqlist_insertall(sqlist_t *sqlist, queue_t *q)
5198 while (q != NULL) {
5199 sqlist_insert(sqlist, q->q_syncq);
5201 if (_SAMESTR(q))
5202 q = q->q_next;
5203 else if (!(q->q_flag & QREADR))
5204 q = _RD(q);
5205 else
5206 q = NULL;
5211 * Allocate and build a list of all syncqs in a stream and the syncq(s)
5212 * associated with the "q" parameter. The resulting list is sorted in a
5213 * canonical order and is free of duplicates.
5214 * Assumes the passed queue is a _RD(q).
5216 static sqlist_t *
5217 sqlist_build(queue_t *q, struct stdata *stp, boolean_t do_twist)
5219 sqlist_t *sqlist = sqlist_alloc(stp, KM_SLEEP);
5222 * start with the current queue/qpair
5224 ASSERT(q->q_flag & QREADR);
5226 sqlist_insert(sqlist, q->q_syncq);
5227 sqlist_insert(sqlist, _WR(q)->q_syncq);
5229 sqlist_insertall(sqlist, stp->sd_wrq);
5230 if (do_twist)
5231 sqlist_insertall(sqlist, stp->sd_mate->sd_wrq);
5233 return (sqlist);
5236 static sqlist_t *
5237 sqlist_alloc(struct stdata *stp, int kmflag)
5239 size_t sqlist_size;
5240 sqlist_t *sqlist;
5243 * Allocate 2 syncql_t's for each pushed module. Note that
5244 * the sqlist_t structure already has 4 syncql_t's built in:
5245 * 2 for the stream head, and 2 for the driver/other stream head.
5247 sqlist_size = 2 * sizeof (syncql_t) * stp->sd_pushcnt +
5248 sizeof (sqlist_t);
5249 if (STRMATED(stp))
5250 sqlist_size += 2 * sizeof (syncql_t) * stp->sd_mate->sd_pushcnt;
5251 sqlist = kmem_alloc(sqlist_size, kmflag);
5253 sqlist->sqlist_head = NULL;
5254 sqlist->sqlist_size = sqlist_size;
5255 sqlist->sqlist_index = 0;
5257 return (sqlist);
5261 * Free the list created by sqlist_alloc()
5263 static void
5264 sqlist_free(sqlist_t *sqlist)
5266 kmem_free(sqlist, sqlist->sqlist_size);
5270 * Prevent any new entries into any syncq in this stream.
5271 * Used by freezestr.
5273 void
5274 strblock(queue_t *q)
5276 struct stdata *stp;
5277 syncql_t *sql;
5278 sqlist_t *sqlist;
5280 q = _RD(q);
5282 stp = STREAM(q);
5283 ASSERT(stp != NULL);
5286 * Get a sorted list with all the duplicates removed containing
5287 * all the syncqs referenced by this stream.
5289 sqlist = sqlist_build(q, stp, B_FALSE);
5290 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next)
5291 blocksq(sql->sql_sq, SQ_FROZEN, -1);
5292 sqlist_free(sqlist);
5296 * Release the block on new entries into this stream
5298 void
5299 strunblock(queue_t *q)
5301 struct stdata *stp;
5302 syncql_t *sql;
5303 sqlist_t *sqlist;
5304 int drain_needed;
5306 q = _RD(q);
5309 * Get a sorted list with all the duplicates removed containing
5310 * all the syncqs referenced by this stream.
5311 * Have to drop the SQ_FROZEN flag on all the syncqs before
5312 * starting to drain them; otherwise the draining might
5313 * cause a freezestr in some module on the stream (which
5314 * would deadlock).
5316 stp = STREAM(q);
5317 ASSERT(stp != NULL);
5318 sqlist = sqlist_build(q, stp, B_FALSE);
5319 drain_needed = 0;
5320 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next)
5321 drain_needed += dropsq(sql->sql_sq, SQ_FROZEN);
5322 if (drain_needed) {
5323 for (sql = sqlist->sqlist_head; sql != NULL;
5324 sql = sql->sql_next)
5325 emptysq(sql->sql_sq);
5327 sqlist_free(sqlist);
5330 #ifdef DEBUG
5331 static int
5332 qprocsareon(queue_t *rq)
5334 if (rq->q_next == NULL)
5335 return (0);
5336 return (_WR(rq->q_next)->q_next == _WR(rq));
5340 qclaimed(queue_t *q)
5342 uint_t count;
5344 count = q->q_syncq->sq_count;
5345 SUM_SQ_PUTCOUNTS(q->q_syncq, count);
5346 return (count != 0);
5350 * Check if anyone has frozen this stream with freezestr
5353 frozenstr(queue_t *q)
5355 return ((q->q_syncq->sq_flags & SQ_FROZEN) != 0);
5357 #endif /* DEBUG */
5360 * Enter a queue.
5361 * Obsoleted interface. Should not be used.
5363 void
5364 enterq(queue_t *q)
5366 entersq(q->q_syncq, SQ_CALLBACK);
5369 void
5370 leaveq(queue_t *q)
5372 leavesq(q->q_syncq, SQ_CALLBACK);
5376 * Enter a perimeter. c_inner and c_outer specifies which concurrency bits
5377 * to check.
5378 * Wait if SQ_QUEUED is set to preserve ordering between messages and qwriter
5379 * calls and the running of open, close and service procedures.
5381 * If c_inner bit is set no need to grab sq_putlocks since we don't care
5382 * if other threads have entered or are entering put entry point.
5384 * If c_inner bit is set it might have been possible to use
5385 * sq_putlocks/sq_putcounts instead of SQLOCK/sq_count (e.g. to optimize
5386 * open/close path for IP) but since the count may need to be decremented in
5387 * qwait() we wouldn't know which counter to decrement. Currently counter is
5388 * selected by current cpu_seqid and current CPU can change at any moment. XXX
5389 * in the future we might use curthread id bits to select the counter and this
5390 * would stay constant across routine calls.
5392 void
5393 entersq(syncq_t *sq, int entrypoint)
5395 uint16_t count = 0;
5396 uint16_t flags;
5397 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL;
5398 uint16_t type;
5399 uint_t c_inner = entrypoint & SQ_CI;
5400 uint_t c_outer = entrypoint & SQ_CO;
5403 * Increment ref count to keep closes out of this queue.
5405 ASSERT(sq);
5406 ASSERT(c_inner && c_outer);
5407 mutex_enter(SQLOCK(sq));
5408 flags = sq->sq_flags;
5409 type = sq->sq_type;
5410 if (!(type & c_inner)) {
5411 /* Make sure all putcounts now use slowlock. */
5412 count = sq->sq_count;
5413 SQ_PUTLOCKS_ENTER(sq);
5414 SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
5415 SUM_SQ_PUTCOUNTS(sq, count);
5416 sq->sq_needexcl++;
5417 ASSERT(sq->sq_needexcl != 0); /* wraparound */
5418 waitflags |= SQ_MESSAGES;
5421 * Wait until we can enter the inner perimeter.
5422 * If we want exclusive access we wait until sq_count is 0.
5423 * We have to do this before entering the outer perimeter in order
5424 * to preserve put/close message ordering.
5426 while ((flags & waitflags) || (!(type & c_inner) && count != 0)) {
5427 sq->sq_flags = flags | SQ_WANTWAKEUP;
5428 if (!(type & c_inner)) {
5429 SQ_PUTLOCKS_EXIT(sq);
5431 cv_wait(&sq->sq_wait, SQLOCK(sq));
5432 if (!(type & c_inner)) {
5433 count = sq->sq_count;
5434 SQ_PUTLOCKS_ENTER(sq);
5435 SUM_SQ_PUTCOUNTS(sq, count);
5437 flags = sq->sq_flags;
5440 if (!(type & c_inner)) {
5441 ASSERT(sq->sq_needexcl > 0);
5442 sq->sq_needexcl--;
5443 if (sq->sq_needexcl == 0) {
5444 SQ_PUTCOUNT_SETFAST_LOCKED(sq);
5448 /* Check if we need to enter the outer perimeter */
5449 if (!(type & c_outer)) {
5451 * We have to enter the outer perimeter exclusively before
5452 * we can increment sq_count to avoid deadlock. This implies
5453 * that we have to re-check sq_flags and sq_count.
5455 * is it possible to have c_inner set when c_outer is not set?
5457 if (!(type & c_inner)) {
5458 SQ_PUTLOCKS_EXIT(sq);
5460 mutex_exit(SQLOCK(sq));
5461 outer_enter(sq->sq_outer, SQ_GOAWAY);
5462 mutex_enter(SQLOCK(sq));
5463 flags = sq->sq_flags;
5465 * there should be no need to recheck sq_putcounts
5466 * because outer_enter() has already waited for them to clear
5467 * after setting SQ_WRITER.
5469 count = sq->sq_count;
5470 #ifdef DEBUG
5472 * SUMCHECK_SQ_PUTCOUNTS should return the sum instead
5473 * of doing an ASSERT internally. Others should do
5474 * something like
5475 * ASSERT(SUMCHECK_SQ_PUTCOUNTS(sq) == 0);
5476 * without the need to #ifdef DEBUG it.
5478 SUMCHECK_SQ_PUTCOUNTS(sq, 0);
5479 #endif
5480 while ((flags & (SQ_EXCL|SQ_BLOCKED|SQ_FROZEN)) ||
5481 (!(type & c_inner) && count != 0)) {
5482 sq->sq_flags = flags | SQ_WANTWAKEUP;
5483 cv_wait(&sq->sq_wait, SQLOCK(sq));
5484 count = sq->sq_count;
5485 flags = sq->sq_flags;
5489 sq->sq_count++;
5490 ASSERT(sq->sq_count != 0); /* Wraparound */
5491 if (!(type & c_inner)) {
5492 /* Exclusive entry */
5493 ASSERT(sq->sq_count == 1);
5494 sq->sq_flags |= SQ_EXCL;
5495 if (type & c_outer) {
5496 SQ_PUTLOCKS_EXIT(sq);
5499 mutex_exit(SQLOCK(sq));
5503 * Leave a syncq. Announce to framework that closes may proceed.
5504 * c_inner and c_outer specify which concurrency bits to check.
5506 * Must never be called from driver or module put entry point.
5508 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5509 * sq_putlocks are used.
5511 void
5512 leavesq(syncq_t *sq, int entrypoint)
5514 uint16_t flags;
5515 uint16_t type;
5516 uint_t c_outer = entrypoint & SQ_CO;
5517 #ifdef DEBUG
5518 uint_t c_inner = entrypoint & SQ_CI;
5519 #endif
5522 * Decrement ref count, drain the syncq if possible, and wake up
5523 * any waiting close.
5525 ASSERT(sq);
5526 ASSERT(c_inner && c_outer);
5527 mutex_enter(SQLOCK(sq));
5528 flags = sq->sq_flags;
5529 type = sq->sq_type;
5530 if (flags & (SQ_QUEUED|SQ_WANTWAKEUP|SQ_WANTEXWAKEUP)) {
5532 if (flags & SQ_WANTWAKEUP) {
5533 flags &= ~SQ_WANTWAKEUP;
5534 cv_broadcast(&sq->sq_wait);
5536 if (flags & SQ_WANTEXWAKEUP) {
5537 flags &= ~SQ_WANTEXWAKEUP;
5538 cv_broadcast(&sq->sq_exitwait);
5541 if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) {
5543 * The syncq needs to be drained. "Exit" the syncq
5544 * before calling drain_syncq.
5546 ASSERT(sq->sq_count != 0);
5547 sq->sq_count--;
5548 ASSERT((flags & SQ_EXCL) || (type & c_inner));
5549 sq->sq_flags = flags & ~SQ_EXCL;
5550 drain_syncq(sq);
5551 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
5552 /* Check if we need to exit the outer perimeter */
5553 /* XXX will this ever be true? */
5554 if (!(type & c_outer))
5555 outer_exit(sq->sq_outer);
5556 return;
5559 ASSERT(sq->sq_count != 0);
5560 sq->sq_count--;
5561 ASSERT((flags & SQ_EXCL) || (type & c_inner));
5562 sq->sq_flags = flags & ~SQ_EXCL;
5563 mutex_exit(SQLOCK(sq));
5565 /* Check if we need to exit the outer perimeter */
5566 if (!(sq->sq_type & c_outer))
5567 outer_exit(sq->sq_outer);
5571 * Prevent q_next from changing in this stream by incrementing sq_count.
5573 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5574 * sq_putlocks are used.
5576 void
5577 claimq(queue_t *qp)
5579 syncq_t *sq = qp->q_syncq;
5581 mutex_enter(SQLOCK(sq));
5582 sq->sq_count++;
5583 ASSERT(sq->sq_count != 0); /* Wraparound */
5584 mutex_exit(SQLOCK(sq));
5588 * Undo claimq.
5590 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5591 * sq_putlocks are used.
5593 void
5594 releaseq(queue_t *qp)
5596 syncq_t *sq = qp->q_syncq;
5597 uint16_t flags;
5599 mutex_enter(SQLOCK(sq));
5600 ASSERT(sq->sq_count > 0);
5601 sq->sq_count--;
5603 flags = sq->sq_flags;
5604 if (flags & (SQ_WANTWAKEUP|SQ_QUEUED)) {
5605 if (flags & SQ_WANTWAKEUP) {
5606 flags &= ~SQ_WANTWAKEUP;
5607 cv_broadcast(&sq->sq_wait);
5609 sq->sq_flags = flags;
5610 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) {
5612 * To prevent potential recursive invocation of
5613 * drain_syncq we do not call drain_syncq if count is
5614 * non-zero.
5616 if (sq->sq_count == 0) {
5617 drain_syncq(sq);
5618 return;
5619 } else
5620 sqenable(sq);
5623 mutex_exit(SQLOCK(sq));
5627 * Prevent q_next from changing in this stream by incrementing sd_refcnt.
5629 void
5630 claimstr(queue_t *qp)
5632 struct stdata *stp = STREAM(qp);
5634 mutex_enter(&stp->sd_reflock);
5635 stp->sd_refcnt++;
5636 ASSERT(stp->sd_refcnt != 0); /* Wraparound */
5637 mutex_exit(&stp->sd_reflock);
5641 * Undo claimstr.
5643 void
5644 releasestr(queue_t *qp)
5646 struct stdata *stp = STREAM(qp);
5648 mutex_enter(&stp->sd_reflock);
5649 ASSERT(stp->sd_refcnt != 0);
5650 if (--stp->sd_refcnt == 0)
5651 cv_broadcast(&stp->sd_refmonitor);
5652 mutex_exit(&stp->sd_reflock);
5655 static syncq_t *
5656 new_syncq(void)
5658 return (kmem_cache_alloc(syncq_cache, KM_SLEEP));
5661 static void
5662 free_syncq(syncq_t *sq)
5664 ASSERT(sq->sq_head == NULL);
5665 ASSERT(sq->sq_outer == NULL);
5666 ASSERT(sq->sq_callbpend == NULL);
5667 ASSERT((sq->sq_onext == NULL && sq->sq_oprev == NULL) ||
5668 (sq->sq_onext == sq && sq->sq_oprev == sq));
5670 if (sq->sq_ciputctrl != NULL) {
5671 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1);
5672 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl,
5673 sq->sq_nciputctrl, 0);
5674 ASSERT(ciputctrl_cache != NULL);
5675 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl);
5678 sq->sq_tail = NULL;
5679 sq->sq_evhead = NULL;
5680 sq->sq_evtail = NULL;
5681 sq->sq_ciputctrl = NULL;
5682 sq->sq_nciputctrl = 0;
5683 sq->sq_count = 0;
5684 sq->sq_rmqcount = 0;
5685 sq->sq_callbflags = 0;
5686 sq->sq_cancelid = 0;
5687 sq->sq_next = NULL;
5688 sq->sq_needexcl = 0;
5689 sq->sq_svcflags = 0;
5690 sq->sq_nqueues = 0;
5691 sq->sq_pri = 0;
5692 sq->sq_onext = NULL;
5693 sq->sq_oprev = NULL;
5694 sq->sq_flags = 0;
5695 sq->sq_type = 0;
5696 sq->sq_servcount = 0;
5698 kmem_cache_free(syncq_cache, sq);
5701 /* Outer perimeter code */
5704 * The outer syncq uses the fields and flags in the syncq slightly
5705 * differently from the inner syncqs.
5706 * sq_count Incremented when there are pending or running
5707 * writers at the outer perimeter to prevent the set of
5708 * inner syncqs that belong to the outer perimeter from
5709 * changing.
5710 * sq_head/tail List of deferred qwriter(OUTER) operations.
5712 * SQ_BLOCKED Set to prevent traversing of sq_next,sq_prev while
5713 * inner syncqs are added to or removed from the
5714 * outer perimeter.
5715 * SQ_QUEUED sq_head/tail has messages or events queued.
5717 * SQ_WRITER A thread is currently traversing all the inner syncqs
5718 * setting the SQ_WRITER flag.
5722 * Get write access at the outer perimeter.
5723 * Note that read access is done by entersq, putnext, and put by simply
5724 * incrementing sq_count in the inner syncq.
5726 * Waits until "flags" is no longer set in the outer to prevent multiple
5727 * threads from having write access at the same time. SQ_WRITER has to be part
5728 * of "flags".
5730 * Increases sq_count on the outer syncq to keep away outer_insert/remove
5731 * until the outer_exit is finished.
5733 * outer_enter is vulnerable to starvation since it does not prevent new
5734 * threads from entering the inner syncqs while it is waiting for sq_count to
5735 * go to zero.
5737 void
5738 outer_enter(syncq_t *outer, uint16_t flags)
5740 syncq_t *sq;
5741 int wait_needed;
5742 uint16_t count;
5744 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5745 outer->sq_oprev != NULL);
5746 ASSERT(flags & SQ_WRITER);
5748 retry:
5749 mutex_enter(SQLOCK(outer));
5750 while (outer->sq_flags & flags) {
5751 outer->sq_flags |= SQ_WANTWAKEUP;
5752 cv_wait(&outer->sq_wait, SQLOCK(outer));
5755 ASSERT(!(outer->sq_flags & SQ_WRITER));
5756 outer->sq_flags |= SQ_WRITER;
5757 outer->sq_count++;
5758 ASSERT(outer->sq_count != 0); /* wraparound */
5759 wait_needed = 0;
5761 * Set SQ_WRITER on all the inner syncqs while holding
5762 * the SQLOCK on the outer syncq. This ensures that the changing
5763 * of SQ_WRITER is atomic under the outer SQLOCK.
5765 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) {
5766 mutex_enter(SQLOCK(sq));
5767 count = sq->sq_count;
5768 SQ_PUTLOCKS_ENTER(sq);
5769 sq->sq_flags |= SQ_WRITER;
5770 SUM_SQ_PUTCOUNTS(sq, count);
5771 if (count != 0)
5772 wait_needed = 1;
5773 SQ_PUTLOCKS_EXIT(sq);
5774 mutex_exit(SQLOCK(sq));
5776 mutex_exit(SQLOCK(outer));
5779 * Get everybody out of the syncqs sequentially.
5780 * Note that we don't actually need to acquire the PUTLOCKS, since
5781 * we have already cleared the fastbit, and set QWRITER. By
5782 * definition, the count can not increase since putnext will
5783 * take the slowlock path (and the purpose of acquiring the
5784 * putlocks was to make sure it didn't increase while we were
5785 * waiting).
5787 * Note that we still acquire the PUTLOCKS to be safe.
5789 if (wait_needed) {
5790 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) {
5791 mutex_enter(SQLOCK(sq));
5792 count = sq->sq_count;
5793 SQ_PUTLOCKS_ENTER(sq);
5794 SUM_SQ_PUTCOUNTS(sq, count);
5795 while (count != 0) {
5796 sq->sq_flags |= SQ_WANTWAKEUP;
5797 SQ_PUTLOCKS_EXIT(sq);
5798 cv_wait(&sq->sq_wait, SQLOCK(sq));
5799 count = sq->sq_count;
5800 SQ_PUTLOCKS_ENTER(sq);
5801 SUM_SQ_PUTCOUNTS(sq, count);
5803 SQ_PUTLOCKS_EXIT(sq);
5804 mutex_exit(SQLOCK(sq));
5807 * Verify that none of the flags got set while we
5808 * were waiting for the sq_counts to drop.
5809 * If this happens we exit and retry entering the
5810 * outer perimeter.
5812 mutex_enter(SQLOCK(outer));
5813 if (outer->sq_flags & (flags & ~SQ_WRITER)) {
5814 mutex_exit(SQLOCK(outer));
5815 outer_exit(outer);
5816 goto retry;
5818 mutex_exit(SQLOCK(outer));
5823 * Drop the write access at the outer perimeter.
5824 * Read access is dropped implicitly (by putnext, put, and leavesq) by
5825 * decrementing sq_count.
5827 void
5828 outer_exit(syncq_t *outer)
5830 syncq_t *sq;
5831 int drain_needed;
5832 uint16_t flags;
5834 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5835 outer->sq_oprev != NULL);
5836 ASSERT(MUTEX_NOT_HELD(SQLOCK(outer)));
5839 * Atomically (from the perspective of threads calling become_writer)
5840 * drop the write access at the outer perimeter by holding
5841 * SQLOCK(outer) across all the dropsq calls and the resetting of
5842 * SQ_WRITER.
5843 * This defines a locking order between the outer perimeter
5844 * SQLOCK and the inner perimeter SQLOCKs.
5846 mutex_enter(SQLOCK(outer));
5847 flags = outer->sq_flags;
5848 ASSERT(outer->sq_flags & SQ_WRITER);
5849 if (flags & SQ_QUEUED) {
5850 write_now(outer);
5851 flags = outer->sq_flags;
5855 * sq_onext is stable since sq_count has not yet been decreased.
5856 * Reset the SQ_WRITER flags in all syncqs.
5857 * After dropping SQ_WRITER on the outer syncq we empty all the
5858 * inner syncqs.
5860 drain_needed = 0;
5861 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext)
5862 drain_needed += dropsq(sq, SQ_WRITER);
5863 ASSERT(!(outer->sq_flags & SQ_QUEUED));
5864 flags &= ~SQ_WRITER;
5865 if (drain_needed) {
5866 outer->sq_flags = flags;
5867 mutex_exit(SQLOCK(outer));
5868 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext)
5869 emptysq(sq);
5870 mutex_enter(SQLOCK(outer));
5871 flags = outer->sq_flags;
5873 if (flags & SQ_WANTWAKEUP) {
5874 flags &= ~SQ_WANTWAKEUP;
5875 cv_broadcast(&outer->sq_wait);
5877 outer->sq_flags = flags;
5878 ASSERT(outer->sq_count > 0);
5879 outer->sq_count--;
5880 mutex_exit(SQLOCK(outer));
5884 * Add another syncq to an outer perimeter.
5885 * Block out all other access to the outer perimeter while it is being
5886 * changed using blocksq.
5887 * Assumes that the caller has *not* done an outer_enter.
5889 * Vulnerable to starvation in blocksq.
5891 static void
5892 outer_insert(syncq_t *outer, syncq_t *sq)
5894 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5895 outer->sq_oprev != NULL);
5896 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL &&
5897 sq->sq_oprev == NULL); /* Can't be in an outer perimeter */
5899 /* Get exclusive access to the outer perimeter list */
5900 blocksq(outer, SQ_BLOCKED, 0);
5901 ASSERT(outer->sq_flags & SQ_BLOCKED);
5902 ASSERT(!(outer->sq_flags & SQ_WRITER));
5904 mutex_enter(SQLOCK(sq));
5905 sq->sq_outer = outer;
5906 outer->sq_onext->sq_oprev = sq;
5907 sq->sq_onext = outer->sq_onext;
5908 outer->sq_onext = sq;
5909 sq->sq_oprev = outer;
5910 mutex_exit(SQLOCK(sq));
5911 unblocksq(outer, SQ_BLOCKED, 1);
5915 * Remove a syncq from an outer perimeter.
5916 * Block out all other access to the outer perimeter while it is being
5917 * changed using blocksq.
5918 * Assumes that the caller has *not* done an outer_enter.
5920 * Vulnerable to starvation in blocksq.
5922 static void
5923 outer_remove(syncq_t *outer, syncq_t *sq)
5925 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5926 outer->sq_oprev != NULL);
5927 ASSERT(sq->sq_outer == outer);
5929 /* Get exclusive access to the outer perimeter list */
5930 blocksq(outer, SQ_BLOCKED, 0);
5931 ASSERT(outer->sq_flags & SQ_BLOCKED);
5932 ASSERT(!(outer->sq_flags & SQ_WRITER));
5934 mutex_enter(SQLOCK(sq));
5935 sq->sq_outer = NULL;
5936 sq->sq_onext->sq_oprev = sq->sq_oprev;
5937 sq->sq_oprev->sq_onext = sq->sq_onext;
5938 sq->sq_oprev = sq->sq_onext = NULL;
5939 mutex_exit(SQLOCK(sq));
5940 unblocksq(outer, SQ_BLOCKED, 1);
5944 * Queue a deferred qwriter(OUTER) callback for this outer perimeter.
5945 * If this is the first callback for this outer perimeter then add
5946 * this outer perimeter to the list of outer perimeters that
5947 * the qwriter_outer_thread will process.
5949 * Increments sq_count in the outer syncq to prevent the membership
5950 * of the outer perimeter (in terms of inner syncqs) to change while
5951 * the callback is pending.
5953 static void
5954 queue_writer(syncq_t *outer, void (*func)(), queue_t *q, mblk_t *mp)
5956 ASSERT(MUTEX_HELD(SQLOCK(outer)));
5958 mp->b_prev = (mblk_t *)func;
5959 mp->b_queue = q;
5960 mp->b_next = NULL;
5961 outer->sq_count++; /* Decremented when dequeued */
5962 ASSERT(outer->sq_count != 0); /* Wraparound */
5963 if (outer->sq_evhead == NULL) {
5964 /* First message. */
5965 outer->sq_evhead = outer->sq_evtail = mp;
5966 outer->sq_flags |= SQ_EVENTS;
5967 mutex_exit(SQLOCK(outer));
5968 STRSTAT(qwr_outer);
5969 (void) taskq_dispatch(streams_taskq,
5970 (task_func_t *)qwriter_outer_service, outer, TQ_SLEEP);
5971 } else {
5972 ASSERT(outer->sq_flags & SQ_EVENTS);
5973 outer->sq_evtail->b_next = mp;
5974 outer->sq_evtail = mp;
5975 mutex_exit(SQLOCK(outer));
5980 * Try and upgrade to write access at the outer perimeter. If this can
5981 * not be done without blocking then queue the callback to be done
5982 * by the qwriter_outer_thread.
5984 * This routine can only be called from put or service procedures plus
5985 * asynchronous callback routines that have properly entered the queue (with
5986 * entersq). Thus qwriter(OUTER) assumes the caller has one claim on the syncq
5987 * associated with q.
5989 void
5990 qwriter_outer(queue_t *q, mblk_t *mp, void (*func)())
5992 syncq_t *osq, *sq, *outer;
5993 int failed;
5994 uint16_t flags;
5996 osq = q->q_syncq;
5997 outer = osq->sq_outer;
5998 if (outer == NULL)
5999 panic("qwriter(PERIM_OUTER): no outer perimeter");
6000 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
6001 outer->sq_oprev != NULL);
6003 mutex_enter(SQLOCK(outer));
6004 flags = outer->sq_flags;
6006 * If some thread is traversing sq_next, or if we are blocked by
6007 * outer_insert or outer_remove, or if the we already have queued
6008 * callbacks, then queue this callback for later processing.
6010 * Also queue the qwriter for an interrupt thread in order
6011 * to reduce the time spent running at high IPL.
6012 * to identify there are events.
6014 if ((flags & SQ_GOAWAY) || (curthread->t_pri >= kpreemptpri)) {
6016 * Queue the become_writer request.
6017 * The queueing is atomic under SQLOCK(outer) in order
6018 * to synchronize with outer_exit.
6019 * queue_writer will drop the outer SQLOCK
6021 if (flags & SQ_BLOCKED) {
6022 /* Must set SQ_WRITER on inner perimeter */
6023 mutex_enter(SQLOCK(osq));
6024 osq->sq_flags |= SQ_WRITER;
6025 mutex_exit(SQLOCK(osq));
6026 } else {
6027 if (!(flags & SQ_WRITER)) {
6029 * The outer could have been SQ_BLOCKED thus
6030 * SQ_WRITER might not be set on the inner.
6032 mutex_enter(SQLOCK(osq));
6033 osq->sq_flags |= SQ_WRITER;
6034 mutex_exit(SQLOCK(osq));
6036 ASSERT(osq->sq_flags & SQ_WRITER);
6038 queue_writer(outer, func, q, mp);
6039 return;
6042 * We are half-way to exclusive access to the outer perimeter.
6043 * Prevent any outer_enter, qwriter(OUTER), or outer_insert/remove
6044 * while the inner syncqs are traversed.
6046 outer->sq_count++;
6047 ASSERT(outer->sq_count != 0); /* wraparound */
6048 flags |= SQ_WRITER;
6050 * Check if we can run the function immediately. Mark all
6051 * syncqs with the writer flag to prevent new entries into
6052 * put and service procedures.
6054 * Set SQ_WRITER on all the inner syncqs while holding
6055 * the SQLOCK on the outer syncq. This ensures that the changing
6056 * of SQ_WRITER is atomic under the outer SQLOCK.
6058 failed = 0;
6059 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) {
6060 uint16_t count;
6061 uint_t maxcnt = (sq == osq) ? 1 : 0;
6063 mutex_enter(SQLOCK(sq));
6064 count = sq->sq_count;
6065 SQ_PUTLOCKS_ENTER(sq);
6066 SUM_SQ_PUTCOUNTS(sq, count);
6067 if (sq->sq_count > maxcnt)
6068 failed = 1;
6069 sq->sq_flags |= SQ_WRITER;
6070 SQ_PUTLOCKS_EXIT(sq);
6071 mutex_exit(SQLOCK(sq));
6073 if (failed) {
6075 * Some other thread has a read claim on the outer perimeter.
6076 * Queue the callback for deferred processing.
6078 * queue_writer will set SQ_QUEUED before we drop SQ_WRITER
6079 * so that other qwriter(OUTER) calls will queue their
6080 * callbacks as well. queue_writer increments sq_count so we
6081 * decrement to compensate for the our increment.
6083 * Dropping SQ_WRITER enables the writer thread to work
6084 * on this outer perimeter.
6086 outer->sq_flags = flags;
6087 queue_writer(outer, func, q, mp);
6088 /* queue_writer dropper the lock */
6089 mutex_enter(SQLOCK(outer));
6090 ASSERT(outer->sq_count > 0);
6091 outer->sq_count--;
6092 ASSERT(outer->sq_flags & SQ_WRITER);
6093 flags = outer->sq_flags;
6094 flags &= ~SQ_WRITER;
6095 if (flags & SQ_WANTWAKEUP) {
6096 flags &= ~SQ_WANTWAKEUP;
6097 cv_broadcast(&outer->sq_wait);
6099 outer->sq_flags = flags;
6100 mutex_exit(SQLOCK(outer));
6101 return;
6102 } else {
6103 outer->sq_flags = flags;
6104 mutex_exit(SQLOCK(outer));
6107 /* Can run it immediately */
6108 (*func)(q, mp);
6110 outer_exit(outer);
6114 * Dequeue all writer callbacks from the outer perimeter and run them.
6116 static void
6117 write_now(syncq_t *outer)
6119 mblk_t *mp;
6120 queue_t *q;
6121 void (*func)();
6123 ASSERT(MUTEX_HELD(SQLOCK(outer)));
6124 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
6125 outer->sq_oprev != NULL);
6126 while ((mp = outer->sq_evhead) != NULL) {
6128 * queues cannot be placed on the queuelist on the outer
6129 * perimeter.
6131 ASSERT(!(outer->sq_flags & SQ_MESSAGES));
6132 ASSERT((outer->sq_flags & SQ_EVENTS));
6134 outer->sq_evhead = mp->b_next;
6135 if (outer->sq_evhead == NULL) {
6136 outer->sq_evtail = NULL;
6137 outer->sq_flags &= ~SQ_EVENTS;
6139 ASSERT(outer->sq_count != 0);
6140 outer->sq_count--; /* Incremented when enqueued. */
6141 mutex_exit(SQLOCK(outer));
6143 * Drop the message if the queue is closing.
6144 * Make sure that the queue is "claimed" when the callback
6145 * is run in order to satisfy various ASSERTs.
6147 q = mp->b_queue;
6148 func = (void (*)())mp->b_prev;
6149 ASSERT(func != NULL);
6150 mp->b_next = mp->b_prev = NULL;
6151 if (q->q_flag & QWCLOSE) {
6152 freemsg(mp);
6153 } else {
6154 claimq(q);
6155 (*func)(q, mp);
6156 releaseq(q);
6158 mutex_enter(SQLOCK(outer));
6160 ASSERT(MUTEX_HELD(SQLOCK(outer)));
6164 * The list of messages on the inner syncq is effectively hashed
6165 * by destination queue. These destination queues are doubly
6166 * linked lists (hopefully) in priority order. Messages are then
6167 * put on the queue referenced by the q_sqhead/q_sqtail elements.
6168 * Additional messages are linked together by the b_next/b_prev
6169 * elements in the mblk, with (similar to putq()) the first message
6170 * having a NULL b_prev and the last message having a NULL b_next.
6172 * Events, such as qwriter callbacks, are put onto a list in FIFO
6173 * order referenced by sq_evhead, and sq_evtail. This is a singly
6174 * linked list, and messages here MUST be processed in the order queued.
6178 * Run the events on the syncq event list (sq_evhead).
6179 * Assumes there is only one claim on the syncq, it is
6180 * already exclusive (SQ_EXCL set), and the SQLOCK held.
6181 * Messages here are processed in order, with the SQ_EXCL bit
6182 * held all the way through till the last message is processed.
6184 void
6185 sq_run_events(syncq_t *sq)
6187 mblk_t *bp;
6188 queue_t *qp;
6189 uint16_t flags = sq->sq_flags;
6190 void (*func)();
6192 ASSERT(MUTEX_HELD(SQLOCK(sq)));
6193 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6194 sq->sq_oprev == NULL) ||
6195 (sq->sq_outer != NULL && sq->sq_onext != NULL &&
6196 sq->sq_oprev != NULL));
6198 ASSERT(flags & SQ_EXCL);
6199 ASSERT(sq->sq_count == 1);
6202 * We need to process all of the events on this list. It
6203 * is possible that new events will be added while we are
6204 * away processing a callback, so on every loop, we start
6205 * back at the beginning of the list.
6208 * We have to reaccess sq_evhead since there is a
6209 * possibility of a new entry while we were running
6210 * the callback.
6212 for (bp = sq->sq_evhead; bp != NULL; bp = sq->sq_evhead) {
6213 ASSERT(bp->b_queue->q_syncq == sq);
6214 ASSERT(sq->sq_flags & SQ_EVENTS);
6216 qp = bp->b_queue;
6217 func = (void (*)())bp->b_prev;
6218 ASSERT(func != NULL);
6221 * Messages from the event queue must be taken off in
6222 * FIFO order.
6224 ASSERT(sq->sq_evhead == bp);
6225 sq->sq_evhead = bp->b_next;
6227 if (bp->b_next == NULL) {
6228 /* Deleting last */
6229 ASSERT(sq->sq_evtail == bp);
6230 sq->sq_evtail = NULL;
6231 sq->sq_flags &= ~SQ_EVENTS;
6233 bp->b_prev = bp->b_next = NULL;
6234 ASSERT(bp->b_datap->db_ref != 0);
6236 mutex_exit(SQLOCK(sq));
6238 (*func)(qp, bp);
6240 mutex_enter(SQLOCK(sq));
6242 * re-read the flags, since they could have changed.
6244 flags = sq->sq_flags;
6245 ASSERT(flags & SQ_EXCL);
6247 ASSERT(sq->sq_evhead == NULL && sq->sq_evtail == NULL);
6248 ASSERT(!(sq->sq_flags & SQ_EVENTS));
6250 if (flags & SQ_WANTWAKEUP) {
6251 flags &= ~SQ_WANTWAKEUP;
6252 cv_broadcast(&sq->sq_wait);
6254 if (flags & SQ_WANTEXWAKEUP) {
6255 flags &= ~SQ_WANTEXWAKEUP;
6256 cv_broadcast(&sq->sq_exitwait);
6258 sq->sq_flags = flags;
6262 * Put messages on the event list.
6263 * If we can go exclusive now, do so and process the event list, otherwise
6264 * let the last claim service this list (or wake the sqthread).
6265 * This procedure assumes SQLOCK is held. To run the event list, it
6266 * must be called with no claims.
6268 static void
6269 sqfill_events(syncq_t *sq, queue_t *q, mblk_t *mp, void (*func)())
6271 uint16_t count;
6273 ASSERT(MUTEX_HELD(SQLOCK(sq)));
6274 ASSERT(func != NULL);
6277 * This is a callback. Add it to the list of callbacks
6278 * and see about upgrading.
6280 mp->b_prev = (mblk_t *)func;
6281 mp->b_queue = q;
6282 mp->b_next = NULL;
6283 if (sq->sq_evhead == NULL) {
6284 sq->sq_evhead = sq->sq_evtail = mp;
6285 sq->sq_flags |= SQ_EVENTS;
6286 } else {
6287 ASSERT(sq->sq_evtail != NULL);
6288 ASSERT(sq->sq_evtail->b_next == NULL);
6289 ASSERT(sq->sq_flags & SQ_EVENTS);
6290 sq->sq_evtail->b_next = mp;
6291 sq->sq_evtail = mp;
6294 * We have set SQ_EVENTS, so threads will have to
6295 * unwind out of the perimeter, and new entries will
6296 * not grab a putlock. But we still need to know
6297 * how many threads have already made a claim to the
6298 * syncq, so grab the putlocks, and sum the counts.
6299 * If there are no claims on the syncq, we can upgrade
6300 * to exclusive, and run the event list.
6301 * NOTE: We hold the SQLOCK, so we can just grab the
6302 * putlocks.
6304 count = sq->sq_count;
6305 SQ_PUTLOCKS_ENTER(sq);
6306 SUM_SQ_PUTCOUNTS(sq, count);
6308 * We have no claim, so we need to check if there
6309 * are no others, then we can upgrade.
6312 * There are currently no claims on
6313 * the syncq by this thread (at least on this entry). The thread who has
6314 * the claim should drain syncq.
6316 if (count > 0) {
6318 * Can't upgrade - other threads inside.
6320 SQ_PUTLOCKS_EXIT(sq);
6321 mutex_exit(SQLOCK(sq));
6322 return;
6325 * Need to set SQ_EXCL and make a claim on the syncq.
6327 ASSERT((sq->sq_flags & SQ_EXCL) == 0);
6328 sq->sq_flags |= SQ_EXCL;
6329 ASSERT(sq->sq_count == 0);
6330 sq->sq_count++;
6331 SQ_PUTLOCKS_EXIT(sq);
6333 /* Process the events list */
6334 sq_run_events(sq);
6337 * Release our claim...
6339 sq->sq_count--;
6342 * And release SQ_EXCL.
6343 * We don't need to acquire the putlocks to release
6344 * SQ_EXCL, since we are exclusive, and hold the SQLOCK.
6346 sq->sq_flags &= ~SQ_EXCL;
6349 * sq_run_events should have released SQ_EXCL
6351 ASSERT(!(sq->sq_flags & SQ_EXCL));
6354 * If anything happened while we were running the
6355 * events (or was there before), we need to process
6356 * them now. We shouldn't be exclusive sine we
6357 * released the perimeter above (plus, we asserted
6358 * for it).
6360 if (!(sq->sq_flags & SQ_STAYAWAY) && (sq->sq_flags & SQ_QUEUED))
6361 drain_syncq(sq);
6362 else
6363 mutex_exit(SQLOCK(sq));
6367 * Perform delayed processing. The caller has to make sure that it is safe
6368 * to enter the syncq (e.g. by checking that none of the SQ_STAYAWAY bits are
6369 * set).
6371 * Assume that the caller has NO claims on the syncq. However, a claim
6372 * on the syncq does not indicate that a thread is draining the syncq.
6373 * There may be more claims on the syncq than there are threads draining
6374 * (i.e. #_threads_draining <= sq_count)
6376 * drain_syncq has to terminate when one of the SQ_STAYAWAY bits gets set
6377 * in order to preserve qwriter(OUTER) ordering constraints.
6379 * sq_putcount only needs to be checked when dispatching the queued
6380 * writer call for CIPUT sync queue, but this is handled in sq_run_events.
6382 void
6383 drain_syncq(syncq_t *sq)
6385 queue_t *qp;
6386 uint16_t count;
6387 uint16_t type = sq->sq_type;
6388 uint16_t flags = sq->sq_flags;
6389 boolean_t bg_service = sq->sq_svcflags & SQ_SERVICE;
6391 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START,
6392 "drain_syncq start:%p", sq);
6393 ASSERT(MUTEX_HELD(SQLOCK(sq)));
6394 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6395 sq->sq_oprev == NULL) ||
6396 (sq->sq_outer != NULL && sq->sq_onext != NULL &&
6397 sq->sq_oprev != NULL));
6400 * Drop SQ_SERVICE flag.
6402 if (bg_service)
6403 sq->sq_svcflags &= ~SQ_SERVICE;
6406 * If SQ_EXCL is set, someone else is processing this syncq - let them
6407 * finish the job.
6409 if (flags & SQ_EXCL) {
6410 if (bg_service) {
6411 ASSERT(sq->sq_servcount != 0);
6412 sq->sq_servcount--;
6414 mutex_exit(SQLOCK(sq));
6415 return;
6419 * This routine can be called by a background thread if
6420 * it was scheduled by a hi-priority thread. SO, if there are
6421 * NOT messages queued, return (remember, we have the SQLOCK,
6422 * and it cannot change until we release it). Wakeup any waiters also.
6424 if (!(flags & SQ_QUEUED)) {
6425 if (flags & SQ_WANTWAKEUP) {
6426 flags &= ~SQ_WANTWAKEUP;
6427 cv_broadcast(&sq->sq_wait);
6429 if (flags & SQ_WANTEXWAKEUP) {
6430 flags &= ~SQ_WANTEXWAKEUP;
6431 cv_broadcast(&sq->sq_exitwait);
6433 sq->sq_flags = flags;
6434 if (bg_service) {
6435 ASSERT(sq->sq_servcount != 0);
6436 sq->sq_servcount--;
6438 mutex_exit(SQLOCK(sq));
6439 return;
6443 * If this is not a concurrent put perimeter, we need to
6444 * become exclusive to drain. Also, if not CIPUT, we would
6445 * not have acquired a putlock, so we don't need to check
6446 * the putcounts. If not entering with a claim, we test
6447 * for sq_count == 0.
6449 type = sq->sq_type;
6450 if (!(type & SQ_CIPUT)) {
6451 if (sq->sq_count > 1) {
6452 if (bg_service) {
6453 ASSERT(sq->sq_servcount != 0);
6454 sq->sq_servcount--;
6456 mutex_exit(SQLOCK(sq));
6457 return;
6459 sq->sq_flags |= SQ_EXCL;
6463 * This is where we make a claim to the syncq.
6464 * This can either be done by incrementing a putlock, or
6465 * the sq_count. But since we already have the SQLOCK
6466 * here, we just bump the sq_count.
6468 * Note that after we make a claim, we need to let the code
6469 * fall through to the end of this routine to clean itself
6470 * up. A return in the while loop will put the syncq in a
6471 * very bad state.
6473 sq->sq_count++;
6474 ASSERT(sq->sq_count != 0); /* wraparound */
6476 while ((flags = sq->sq_flags) & SQ_QUEUED) {
6478 * If we are told to stayaway or went exclusive,
6479 * we are done.
6481 if (flags & (SQ_STAYAWAY)) {
6482 break;
6486 * If there are events to run, do so.
6487 * We have one claim to the syncq, so if there are
6488 * more than one, other threads are running.
6490 if (sq->sq_evhead != NULL) {
6491 ASSERT(sq->sq_flags & SQ_EVENTS);
6493 count = sq->sq_count;
6494 SQ_PUTLOCKS_ENTER(sq);
6495 SUM_SQ_PUTCOUNTS(sq, count);
6496 if (count > 1) {
6497 SQ_PUTLOCKS_EXIT(sq);
6498 /* Can't upgrade - other threads inside */
6499 break;
6501 ASSERT((flags & SQ_EXCL) == 0);
6502 sq->sq_flags = flags | SQ_EXCL;
6503 SQ_PUTLOCKS_EXIT(sq);
6505 * we have the only claim, run the events,
6506 * sq_run_events will clear the SQ_EXCL flag.
6508 sq_run_events(sq);
6511 * If this is a CIPUT perimeter, we need
6512 * to drop the SQ_EXCL flag so we can properly
6513 * continue draining the syncq.
6515 if (type & SQ_CIPUT) {
6516 ASSERT(sq->sq_flags & SQ_EXCL);
6517 sq->sq_flags &= ~SQ_EXCL;
6521 * And go back to the beginning just in case
6522 * anything changed while we were away.
6524 ASSERT((sq->sq_flags & SQ_EXCL) || (type & SQ_CIPUT));
6525 continue;
6528 ASSERT(sq->sq_evhead == NULL);
6529 ASSERT(!(sq->sq_flags & SQ_EVENTS));
6532 * Find the queue that is not draining.
6534 * q_draining is protected by QLOCK which we do not hold.
6535 * But if it was set, then a thread was draining, and if it gets
6536 * cleared, then it was because the thread has successfully
6537 * drained the syncq, or a GOAWAY state occurred. For the GOAWAY
6538 * state to happen, a thread needs the SQLOCK which we hold, and
6539 * if there was such a flag, we would have already seen it.
6542 for (qp = sq->sq_head;
6543 qp != NULL && (qp->q_draining ||
6544 (qp->q_sqflags & Q_SQDRAINING));
6545 qp = qp->q_sqnext)
6548 if (qp == NULL)
6549 break;
6552 * We have a queue to work on, and we hold the
6553 * SQLOCK and one claim, call qdrain_syncq.
6554 * This means we need to release the SQLOCK and
6555 * acquire the QLOCK (OK since we have a claim).
6556 * Note that qdrain_syncq will actually dequeue
6557 * this queue from the sq_head list when it is
6558 * convinced all the work is done and release
6559 * the QLOCK before returning.
6561 qp->q_sqflags |= Q_SQDRAINING;
6562 mutex_exit(SQLOCK(sq));
6563 mutex_enter(QLOCK(qp));
6564 qdrain_syncq(sq, qp);
6565 mutex_enter(SQLOCK(sq));
6567 /* The queue is drained */
6568 ASSERT(qp->q_sqflags & Q_SQDRAINING);
6569 qp->q_sqflags &= ~Q_SQDRAINING;
6571 * NOTE: After this point qp should not be used since it may be
6572 * closed.
6576 ASSERT(MUTEX_HELD(SQLOCK(sq)));
6577 flags = sq->sq_flags;
6580 * sq->sq_head cannot change because we hold the
6581 * sqlock. However, a thread CAN decide that it is no longer
6582 * going to drain that queue. However, this should be due to
6583 * a GOAWAY state, and we should see that here.
6585 * This loop is not very efficient. One solution may be adding a second
6586 * pointer to the "draining" queue, but it is difficult to do when
6587 * queues are inserted in the middle due to priority ordering. Another
6588 * possibility is to yank the queue out of the sq list and put it onto
6589 * the "draining list" and then put it back if it can't be drained.
6592 ASSERT((sq->sq_head == NULL) || (flags & SQ_GOAWAY) ||
6593 (type & SQ_CI) || sq->sq_head->q_draining);
6595 /* Drop SQ_EXCL for non-CIPUT perimeters */
6596 if (!(type & SQ_CIPUT))
6597 flags &= ~SQ_EXCL;
6598 ASSERT((flags & SQ_EXCL) == 0);
6600 /* Wake up any waiters. */
6601 if (flags & SQ_WANTWAKEUP) {
6602 flags &= ~SQ_WANTWAKEUP;
6603 cv_broadcast(&sq->sq_wait);
6605 if (flags & SQ_WANTEXWAKEUP) {
6606 flags &= ~SQ_WANTEXWAKEUP;
6607 cv_broadcast(&sq->sq_exitwait);
6609 sq->sq_flags = flags;
6611 ASSERT(sq->sq_count != 0);
6612 /* Release our claim. */
6613 sq->sq_count--;
6615 if (bg_service) {
6616 ASSERT(sq->sq_servcount != 0);
6617 sq->sq_servcount--;
6620 mutex_exit(SQLOCK(sq));
6622 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END,
6623 "drain_syncq end:%p", sq);
6629 * qdrain_syncq can be called (currently) from only one of two places:
6630 * drain_syncq
6631 * putnext (or some variation of it).
6632 * and eventually
6633 * qwait(_sig)
6635 * If called from drain_syncq, we found it in the list of queues needing
6636 * service, so there is work to be done (or it wouldn't be in the list).
6638 * If called from some putnext variation, it was because the
6639 * perimeter is open, but messages are blocking a putnext and
6640 * there is not a thread working on it. Now a thread could start
6641 * working on it while we are getting ready to do so ourself, but
6642 * the thread would set the q_draining flag, and we can spin out.
6644 * As for qwait(_sig), I think I shall let it continue to call
6645 * drain_syncq directly (after all, it will get here eventually).
6647 * qdrain_syncq has to terminate when:
6648 * - one of the SQ_STAYAWAY bits gets set to preserve qwriter(OUTER) ordering
6649 * - SQ_EVENTS gets set to preserve qwriter(INNER) ordering
6651 * ASSUMES:
6652 * One claim
6653 * QLOCK held
6654 * SQLOCK not held
6655 * Will release QLOCK before returning
6657 void
6658 qdrain_syncq(syncq_t *sq, queue_t *q)
6660 mblk_t *bp;
6661 #ifdef DEBUG
6662 uint16_t count;
6663 #endif
6665 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START,
6666 "drain_syncq start:%p", sq);
6667 ASSERT(q->q_syncq == sq);
6668 ASSERT(MUTEX_HELD(QLOCK(q)));
6669 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
6671 * For non-CIPUT perimeters, we should be called with the exclusive bit
6672 * set already. For CIPUT perimeters, we will be doing a concurrent
6673 * drain, so it better not be set.
6675 ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)));
6676 ASSERT(!((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)));
6677 ASSERT((sq->sq_type & SQ_CIPUT) || (sq->sq_flags & SQ_EXCL));
6679 * All outer pointers are set, or none of them are
6681 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6682 sq->sq_oprev == NULL) ||
6683 (sq->sq_outer != NULL && sq->sq_onext != NULL &&
6684 sq->sq_oprev != NULL));
6685 #ifdef DEBUG
6686 count = sq->sq_count;
6688 * This is OK without the putlocks, because we have one
6689 * claim either from the sq_count, or a putcount. We could
6690 * get an erroneous value from other counts, but ours won't
6691 * change, so one way or another, we will have at least a
6692 * value of one.
6694 SUM_SQ_PUTCOUNTS(sq, count);
6695 ASSERT(count >= 1);
6696 #endif /* DEBUG */
6699 * The first thing to do is find out if a thread is already draining
6700 * this queue. If so, we are done, just return.
6702 if (q->q_draining) {
6703 mutex_exit(QLOCK(q));
6704 return;
6708 * If the perimeter is exclusive, there is nothing we can do right now,
6709 * go away. Note that there is nothing to prevent this case from
6710 * changing right after this check, but the spin-out will catch it.
6713 /* Tell other threads that we are draining this queue */
6714 q->q_draining = 1; /* Protected by QLOCK */
6717 * If there is nothing to do, clear QFULL as necessary. This caters for
6718 * the case where an empty queue was enqueued onto the syncq.
6720 if (q->q_sqhead == NULL) {
6721 ASSERT(q->q_syncqmsgs == 0);
6722 mutex_exit(QLOCK(q));
6723 clr_qfull(q);
6724 mutex_enter(QLOCK(q));
6728 * Note that q_sqhead must be re-checked here in case another message
6729 * was enqueued whilst QLOCK was dropped during the call to clr_qfull.
6731 for (bp = q->q_sqhead; bp != NULL; bp = q->q_sqhead) {
6733 * Because we can enter this routine just because a putnext is
6734 * blocked, we need to spin out if the perimeter wants to go
6735 * exclusive as well as just blocked. We need to spin out also
6736 * if events are queued on the syncq.
6737 * Don't check for SQ_EXCL, because non-CIPUT perimeters would
6738 * set it, and it can't become exclusive while we hold a claim.
6740 if (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)) {
6741 break;
6744 #ifdef DEBUG
6746 * Since we are in qdrain_syncq, we already know the queue,
6747 * but for sanity, we want to check this against the qp that
6748 * was passed in by bp->b_queue.
6751 ASSERT(bp->b_queue == q);
6752 ASSERT(bp->b_queue->q_syncq == sq);
6753 bp->b_queue = NULL;
6756 * We would have the following check in the DEBUG code:
6758 * if (bp->b_prev != NULL) {
6759 * ASSERT(bp->b_prev == (void (*)())q->q_qinfo->qi_putp);
6762 * This can't be done, however, since IP modifies qinfo
6763 * structure at run-time (switching between IPv4 qinfo and IPv6
6764 * qinfo), invalidating the check.
6765 * So the assignment to func is left here, but the ASSERT itself
6766 * is removed until the whole issue is resolved.
6768 #endif
6769 ASSERT(q->q_sqhead == bp);
6770 q->q_sqhead = bp->b_next;
6771 bp->b_prev = bp->b_next = NULL;
6772 ASSERT(q->q_syncqmsgs > 0);
6773 mutex_exit(QLOCK(q));
6775 ASSERT(bp->b_datap->db_ref != 0);
6777 (void) (*q->q_qinfo->qi_putp)(q, bp);
6779 mutex_enter(QLOCK(q));
6782 * q_syncqmsgs should only be decremented after executing the
6783 * put procedure to avoid message re-ordering. This is due to an
6784 * optimisation in putnext() which can call the put procedure
6785 * directly if it sees q_syncqmsgs == 0 (despite Q_SQQUEUED
6786 * being set).
6788 * We also need to clear QFULL in the next service procedure
6789 * queue if this is the last message destined for that queue.
6791 * It would make better sense to have some sort of tunable for
6792 * the low water mark, but these semantics are not yet defined.
6793 * So, alas, we use a constant.
6795 if (--q->q_syncqmsgs == 0) {
6796 mutex_exit(QLOCK(q));
6797 clr_qfull(q);
6798 mutex_enter(QLOCK(q));
6802 * Always clear SQ_EXCL when CIPUT in order to handle
6803 * qwriter(INNER). The putp() can call qwriter and get exclusive
6804 * access IFF this is the only claim. So, we need to test for
6805 * this possibility, acquire the mutex and clear the bit.
6807 if ((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)) {
6808 mutex_enter(SQLOCK(sq));
6809 sq->sq_flags &= ~SQ_EXCL;
6810 mutex_exit(SQLOCK(sq));
6815 * We should either have no messages on this queue, or we were told to
6816 * goaway by a waiter (which we will wake up at the end of this
6817 * function).
6819 ASSERT((q->q_sqhead == NULL) ||
6820 (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)));
6822 ASSERT(MUTEX_HELD(QLOCK(q)));
6823 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
6825 /* Remove the q from the syncq list if all the messages are drained. */
6826 if (q->q_sqhead == NULL) {
6827 ASSERT(q->q_syncqmsgs == 0);
6828 mutex_enter(SQLOCK(sq));
6829 if (q->q_sqflags & Q_SQQUEUED)
6830 SQRM_Q(sq, q);
6831 mutex_exit(SQLOCK(sq));
6833 * Since the queue is removed from the list, reset its priority.
6835 q->q_spri = 0;
6839 * Remember, the q_draining flag is used to let another thread know
6840 * that there is a thread currently draining the messages for a queue.
6841 * Since we are now done with this queue (even if there may be messages
6842 * still there), we need to clear this flag so some thread will work on
6843 * it if needed.
6845 ASSERT(q->q_draining);
6846 q->q_draining = 0;
6848 /* Called with a claim, so OK to drop all locks. */
6849 mutex_exit(QLOCK(q));
6851 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END,
6852 "drain_syncq end:%p", sq);
6854 /* END OF QDRAIN_SYNCQ */
6858 * This is the mate to qdrain_syncq, except that it is putting the message onto
6859 * the queue instead of draining. Since the message is destined for the queue
6860 * that is selected, there is no need to identify the function because the
6861 * message is intended for the put routine for the queue. For debug kernels,
6862 * this routine will do it anyway just in case.
6864 * After the message is enqueued on the syncq, it calls putnext_tail()
6865 * which will schedule a background thread to actually process the message.
6867 * Assumes that there is a claim on the syncq (sq->sq_count > 0) and
6868 * SQLOCK(sq) and QLOCK(q) are not held.
6870 void
6871 qfill_syncq(syncq_t *sq, queue_t *q, mblk_t *mp)
6873 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
6874 ASSERT(MUTEX_NOT_HELD(QLOCK(q)));
6875 ASSERT(sq->sq_count > 0);
6876 ASSERT(q->q_syncq == sq);
6877 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6878 sq->sq_oprev == NULL) ||
6879 (sq->sq_outer != NULL && sq->sq_onext != NULL &&
6880 sq->sq_oprev != NULL));
6882 mutex_enter(QLOCK(q));
6884 #ifdef DEBUG
6886 * This is used for debug in the qfill_syncq/qdrain_syncq case
6887 * to trace the queue that the message is intended for. Note
6888 * that the original use was to identify the queue and function
6889 * to call on the drain. In the new syncq, we have the context
6890 * of the queue that we are draining, so call it's putproc and
6891 * don't rely on the saved values. But for debug this is still
6892 * useful information.
6894 mp->b_prev = (mblk_t *)q->q_qinfo->qi_putp;
6895 mp->b_queue = q;
6896 mp->b_next = NULL;
6897 #endif
6898 ASSERT(q->q_syncq == sq);
6900 * Enqueue the message on the list.
6901 * SQPUT_MP() accesses q_syncqmsgs. We are already holding QLOCK to
6902 * protect it. So it's ok to acquire SQLOCK after SQPUT_MP().
6904 SQPUT_MP(q, mp);
6905 mutex_enter(SQLOCK(sq));
6908 * And queue on syncq for scheduling, if not already queued.
6909 * Note that we need the SQLOCK for this, and for testing flags
6910 * at the end to see if we will drain. So grab it now, and
6911 * release it before we call qdrain_syncq or return.
6913 if (!(q->q_sqflags & Q_SQQUEUED)) {
6914 q->q_spri = curthread->t_pri;
6915 SQPUT_Q(sq, q);
6917 #ifdef DEBUG
6918 else {
6920 * All of these conditions MUST be true!
6922 ASSERT(sq->sq_tail != NULL);
6923 if (sq->sq_tail == sq->sq_head) {
6924 ASSERT((q->q_sqprev == NULL) &&
6925 (q->q_sqnext == NULL));
6926 } else {
6927 ASSERT((q->q_sqprev != NULL) ||
6928 (q->q_sqnext != NULL));
6930 ASSERT(sq->sq_flags & SQ_QUEUED);
6931 ASSERT(q->q_syncqmsgs != 0);
6932 ASSERT(q->q_sqflags & Q_SQQUEUED);
6934 #endif
6935 mutex_exit(QLOCK(q));
6937 * SQLOCK is still held, so sq_count can be safely decremented.
6939 sq->sq_count--;
6941 putnext_tail(sq, q, 0);
6942 /* Should not reference sq or q after this point. */
6945 /* End of qfill_syncq */
6948 * Remove all messages from a syncq (if qp is NULL) or remove all messages
6949 * that would be put into qp by drain_syncq.
6950 * Used when deleting the syncq (qp == NULL) or when detaching
6951 * a queue (qp != NULL).
6952 * Return non-zero if one or more messages were freed.
6954 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
6955 * sq_putlocks are used.
6957 * NOTE: This function assumes that it is called from the close() context and
6958 * that all the queues in the syncq are going away. For this reason it doesn't
6959 * acquire QLOCK for modifying q_sqhead/q_sqtail fields. This assumption is
6960 * currently valid, but it is useful to rethink this function to behave properly
6961 * in other cases.
6964 flush_syncq(syncq_t *sq, queue_t *qp)
6966 mblk_t *bp, *mp_head, *mp_next, *mp_prev;
6967 queue_t *q;
6968 int ret = 0;
6970 mutex_enter(SQLOCK(sq));
6973 * Before we leave, we need to make sure there are no
6974 * events listed for this queue. All events for this queue
6975 * will just be freed.
6977 if (qp != NULL && sq->sq_evhead != NULL) {
6978 ASSERT(sq->sq_flags & SQ_EVENTS);
6980 mp_prev = NULL;
6981 for (bp = sq->sq_evhead; bp != NULL; bp = mp_next) {
6982 mp_next = bp->b_next;
6983 if (bp->b_queue == qp) {
6984 /* Delete this message */
6985 if (mp_prev != NULL) {
6986 mp_prev->b_next = mp_next;
6988 * Update sq_evtail if the last element
6989 * is removed.
6991 if (bp == sq->sq_evtail) {
6992 ASSERT(mp_next == NULL);
6993 sq->sq_evtail = mp_prev;
6995 } else
6996 sq->sq_evhead = mp_next;
6997 if (sq->sq_evhead == NULL)
6998 sq->sq_flags &= ~SQ_EVENTS;
6999 bp->b_prev = bp->b_next = NULL;
7000 freemsg(bp);
7001 ret++;
7002 } else {
7003 mp_prev = bp;
7009 * Walk sq_head and:
7010 * - match qp if qp is set, remove it's messages
7011 * - all if qp is not set
7013 q = sq->sq_head;
7014 while (q != NULL) {
7015 ASSERT(q->q_syncq == sq);
7016 if ((qp == NULL) || (qp == q)) {
7018 * Yank the messages as a list off the queue
7020 mp_head = q->q_sqhead;
7022 * We do not have QLOCK(q) here (which is safe due to
7023 * assumptions mentioned above). To obtain the lock we
7024 * need to release SQLOCK which may allow lots of things
7025 * to change upon us. This place requires more analysis.
7027 q->q_sqhead = q->q_sqtail = NULL;
7028 ASSERT(mp_head->b_queue &&
7029 mp_head->b_queue->q_syncq == sq);
7032 * Free each of the messages.
7034 for (bp = mp_head; bp != NULL; bp = mp_next) {
7035 mp_next = bp->b_next;
7036 bp->b_prev = bp->b_next = NULL;
7037 freemsg(bp);
7038 ret++;
7041 * Now remove the queue from the syncq.
7043 ASSERT(q->q_sqflags & Q_SQQUEUED);
7044 SQRM_Q(sq, q);
7045 q->q_spri = 0;
7046 q->q_syncqmsgs = 0;
7049 * If qp was specified, we are done with it and are
7050 * going to drop SQLOCK(sq) and return. We wakeup syncq
7051 * waiters while we still have the SQLOCK.
7053 if ((qp != NULL) && (sq->sq_flags & SQ_WANTWAKEUP)) {
7054 sq->sq_flags &= ~SQ_WANTWAKEUP;
7055 cv_broadcast(&sq->sq_wait);
7057 /* Drop SQLOCK across clr_qfull */
7058 mutex_exit(SQLOCK(sq));
7061 * We avoid doing the test that drain_syncq does and
7062 * unconditionally clear qfull for every flushed
7063 * message. Since flush_syncq is only called during
7064 * close this should not be a problem.
7066 clr_qfull(q);
7067 if (qp != NULL) {
7068 return (ret);
7069 } else {
7070 mutex_enter(SQLOCK(sq));
7072 * The head was removed by SQRM_Q above.
7073 * reread the new head and flush it.
7075 q = sq->sq_head;
7077 } else {
7078 q = q->q_sqnext;
7080 ASSERT(MUTEX_HELD(SQLOCK(sq)));
7083 if (sq->sq_flags & SQ_WANTWAKEUP) {
7084 sq->sq_flags &= ~SQ_WANTWAKEUP;
7085 cv_broadcast(&sq->sq_wait);
7088 mutex_exit(SQLOCK(sq));
7089 return (ret);
7093 * Propagate all messages from a syncq to the next syncq that are associated
7094 * with the specified queue. If the queue is attached to a driver or if the
7095 * messages have been added due to a qwriter(PERIM_INNER), free the messages.
7097 * Assumes that the stream is strlock()'ed. We don't come here if there
7098 * are no messages to propagate.
7100 * NOTE : If the queue is attached to a driver, all the messages are freed
7101 * as there is no point in propagating the messages from the driver syncq
7102 * to the closing stream head which will in turn get freed later.
7104 static int
7105 propagate_syncq(queue_t *qp)
7107 mblk_t *bp, *head, *tail, *prev, *next;
7108 syncq_t *sq;
7109 queue_t *nqp;
7110 syncq_t *nsq;
7111 boolean_t isdriver;
7112 int moved = 0;
7113 uint16_t flags;
7114 pri_t priority = curthread->t_pri;
7115 #ifdef DEBUG
7116 void (*func)();
7117 #endif
7119 sq = qp->q_syncq;
7120 ASSERT(MUTEX_HELD(SQLOCK(sq)));
7121 /* debug macro */
7122 SQ_PUTLOCKS_HELD(sq);
7124 * As entersq() does not increment the sq_count for
7125 * the write side, check sq_count for non-QPERQ
7126 * perimeters alone.
7128 ASSERT((qp->q_flag & QPERQ) || (sq->sq_count >= 1));
7131 * propagate_syncq() can be called because of either messages on the
7132 * queue syncq or because on events on the queue syncq. Do actual
7133 * message propagations if there are any messages.
7135 if (qp->q_syncqmsgs) {
7136 isdriver = (qp->q_flag & QISDRV);
7138 if (!isdriver) {
7139 nqp = qp->q_next;
7140 nsq = nqp->q_syncq;
7141 ASSERT(MUTEX_HELD(SQLOCK(nsq)));
7142 /* debug macro */
7143 SQ_PUTLOCKS_HELD(nsq);
7144 #ifdef DEBUG
7145 func = (void (*)())nqp->q_qinfo->qi_putp;
7146 #endif
7149 SQRM_Q(sq, qp);
7150 priority = MAX(qp->q_spri, priority);
7151 qp->q_spri = 0;
7152 head = qp->q_sqhead;
7153 tail = qp->q_sqtail;
7154 qp->q_sqhead = qp->q_sqtail = NULL;
7155 qp->q_syncqmsgs = 0;
7158 * Walk the list of messages, and free them if this is a driver,
7159 * otherwise reset the b_prev and b_queue value to the new putp.
7160 * Afterward, we will just add the head to the end of the next
7161 * syncq, and point the tail to the end of this one.
7164 for (bp = head; bp != NULL; bp = next) {
7165 next = bp->b_next;
7166 if (isdriver) {
7167 bp->b_prev = bp->b_next = NULL;
7168 freemsg(bp);
7169 continue;
7171 /* Change the q values for this message */
7172 bp->b_queue = nqp;
7173 #ifdef DEBUG
7174 bp->b_prev = (mblk_t *)func;
7175 #endif
7176 moved++;
7179 * Attach list of messages to the end of the new queue (if there
7180 * is a list of messages).
7183 if (!isdriver && head != NULL) {
7184 ASSERT(tail != NULL);
7185 if (nqp->q_sqhead == NULL) {
7186 nqp->q_sqhead = head;
7187 } else {
7188 ASSERT(nqp->q_sqtail != NULL);
7189 nqp->q_sqtail->b_next = head;
7191 nqp->q_sqtail = tail;
7193 * When messages are moved from high priority queue to
7194 * another queue, the destination queue priority is
7195 * upgraded.
7198 if (priority > nqp->q_spri)
7199 nqp->q_spri = priority;
7201 SQPUT_Q(nsq, nqp);
7203 nqp->q_syncqmsgs += moved;
7204 ASSERT(nqp->q_syncqmsgs != 0);
7209 * Before we leave, we need to make sure there are no
7210 * events listed for this queue. All events for this queue
7211 * will just be freed.
7213 if (sq->sq_evhead != NULL) {
7214 ASSERT(sq->sq_flags & SQ_EVENTS);
7215 prev = NULL;
7216 for (bp = sq->sq_evhead; bp != NULL; bp = next) {
7217 next = bp->b_next;
7218 if (bp->b_queue == qp) {
7219 /* Delete this message */
7220 if (prev != NULL) {
7221 prev->b_next = next;
7223 * Update sq_evtail if the last element
7224 * is removed.
7226 if (bp == sq->sq_evtail) {
7227 ASSERT(next == NULL);
7228 sq->sq_evtail = prev;
7230 } else
7231 sq->sq_evhead = next;
7232 if (sq->sq_evhead == NULL)
7233 sq->sq_flags &= ~SQ_EVENTS;
7234 bp->b_prev = bp->b_next = NULL;
7235 freemsg(bp);
7236 } else {
7237 prev = bp;
7242 flags = sq->sq_flags;
7244 /* Wake up any waiter before leaving. */
7245 if (flags & SQ_WANTWAKEUP) {
7246 flags &= ~SQ_WANTWAKEUP;
7247 cv_broadcast(&sq->sq_wait);
7249 sq->sq_flags = flags;
7251 return (moved);
7255 * Try and upgrade to exclusive access at the inner perimeter. If this can
7256 * not be done without blocking then request will be queued on the syncq
7257 * and drain_syncq will run it later.
7259 * This routine can only be called from put or service procedures plus
7260 * asynchronous callback routines that have properly entered the queue (with
7261 * entersq). Thus qwriter_inner assumes the caller has one claim on the syncq
7262 * associated with q.
7264 void
7265 qwriter_inner(queue_t *q, mblk_t *mp, void (*func)())
7267 syncq_t *sq = q->q_syncq;
7268 uint16_t count;
7270 mutex_enter(SQLOCK(sq));
7271 count = sq->sq_count;
7272 SQ_PUTLOCKS_ENTER(sq);
7273 SUM_SQ_PUTCOUNTS(sq, count);
7274 ASSERT(count >= 1);
7275 ASSERT(sq->sq_type & (SQ_CIPUT|SQ_CISVC));
7277 if (count == 1) {
7279 * Can upgrade. This case also handles nested qwriter calls
7280 * (when the qwriter callback function calls qwriter). In that
7281 * case SQ_EXCL is already set.
7283 sq->sq_flags |= SQ_EXCL;
7284 SQ_PUTLOCKS_EXIT(sq);
7285 mutex_exit(SQLOCK(sq));
7286 (*func)(q, mp);
7288 * Assumes that leavesq, putnext, and drain_syncq will reset
7289 * SQ_EXCL for SQ_CIPUT/SQ_CISVC queues. We leave SQ_EXCL on
7290 * until putnext, leavesq, or drain_syncq drops it.
7291 * That way we handle nested qwriter(INNER) without dropping
7292 * SQ_EXCL until the outermost qwriter callback routine is
7293 * done.
7295 return;
7297 SQ_PUTLOCKS_EXIT(sq);
7298 sqfill_events(sq, q, mp, func);
7302 * Synchronous callback support functions
7306 * Allocate a callback parameter structure.
7307 * Assumes that caller initializes the flags and the id.
7308 * Acquires SQLOCK(sq) if non-NULL is returned.
7310 callbparams_t *
7311 callbparams_alloc(syncq_t *sq, void (*func)(void *), void *arg, int kmflags)
7313 callbparams_t *cbp;
7314 size_t size = sizeof (callbparams_t);
7316 cbp = kmem_alloc(size, kmflags & ~KM_PANIC);
7319 * Only try tryhard allocation if the caller is ready to panic.
7320 * Otherwise just fail.
7322 if (cbp == NULL) {
7323 if (kmflags & KM_PANIC)
7324 cbp = kmem_alloc_tryhard(sizeof (callbparams_t),
7325 &size, kmflags);
7326 else
7327 return (NULL);
7330 ASSERT(size >= sizeof (callbparams_t));
7331 cbp->cbp_size = size;
7332 cbp->cbp_sq = sq;
7333 cbp->cbp_func = func;
7334 cbp->cbp_arg = arg;
7335 mutex_enter(SQLOCK(sq));
7336 cbp->cbp_next = sq->sq_callbpend;
7337 sq->sq_callbpend = cbp;
7338 return (cbp);
7341 void
7342 callbparams_free(syncq_t *sq, callbparams_t *cbp)
7344 callbparams_t **pp, *p;
7346 ASSERT(MUTEX_HELD(SQLOCK(sq)));
7348 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) {
7349 if (p == cbp) {
7350 *pp = p->cbp_next;
7351 kmem_free(p, p->cbp_size);
7352 return;
7355 (void) (STRLOG(0, 0, 0, SL_CONSOLE,
7356 "callbparams_free: not found\n"));
7359 void
7360 callbparams_free_id(syncq_t *sq, callbparams_id_t id, int32_t flag)
7362 callbparams_t **pp, *p;
7364 ASSERT(MUTEX_HELD(SQLOCK(sq)));
7366 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) {
7367 if (p->cbp_id == id && p->cbp_flags == flag) {
7368 *pp = p->cbp_next;
7369 kmem_free(p, p->cbp_size);
7370 return;
7373 (void) (STRLOG(0, 0, 0, SL_CONSOLE,
7374 "callbparams_free_id: not found\n"));
7378 * Callback wrapper function used by once-only callbacks that can be
7379 * cancelled (qtimeout and qbufcall)
7380 * Contains inline version of entersq(sq, SQ_CALLBACK) that can be
7381 * cancelled by the qun* functions.
7383 void
7384 qcallbwrapper(void *arg)
7386 callbparams_t *cbp = arg;
7387 syncq_t *sq;
7388 uint16_t count = 0;
7389 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL;
7390 uint16_t type;
7392 sq = cbp->cbp_sq;
7393 mutex_enter(SQLOCK(sq));
7394 type = sq->sq_type;
7395 if (!(type & SQ_CICB)) {
7396 count = sq->sq_count;
7397 SQ_PUTLOCKS_ENTER(sq);
7398 SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
7399 SUM_SQ_PUTCOUNTS(sq, count);
7400 sq->sq_needexcl++;
7401 ASSERT(sq->sq_needexcl != 0); /* wraparound */
7402 waitflags |= SQ_MESSAGES;
7404 /* Can not handle exclusive entry at outer perimeter */
7405 ASSERT(type & SQ_COCB);
7407 while ((sq->sq_flags & waitflags) || (!(type & SQ_CICB) &&count != 0)) {
7408 if ((sq->sq_callbflags & cbp->cbp_flags) &&
7409 (sq->sq_cancelid == cbp->cbp_id)) {
7410 /* timeout has been cancelled */
7411 sq->sq_callbflags |= SQ_CALLB_BYPASSED;
7412 callbparams_free(sq, cbp);
7413 if (!(type & SQ_CICB)) {
7414 ASSERT(sq->sq_needexcl > 0);
7415 sq->sq_needexcl--;
7416 if (sq->sq_needexcl == 0) {
7417 SQ_PUTCOUNT_SETFAST_LOCKED(sq);
7419 SQ_PUTLOCKS_EXIT(sq);
7421 mutex_exit(SQLOCK(sq));
7422 return;
7424 sq->sq_flags |= SQ_WANTWAKEUP;
7425 if (!(type & SQ_CICB)) {
7426 SQ_PUTLOCKS_EXIT(sq);
7428 cv_wait(&sq->sq_wait, SQLOCK(sq));
7429 if (!(type & SQ_CICB)) {
7430 count = sq->sq_count;
7431 SQ_PUTLOCKS_ENTER(sq);
7432 SUM_SQ_PUTCOUNTS(sq, count);
7436 sq->sq_count++;
7437 ASSERT(sq->sq_count != 0); /* Wraparound */
7438 if (!(type & SQ_CICB)) {
7439 ASSERT(count == 0);
7440 sq->sq_flags |= SQ_EXCL;
7441 ASSERT(sq->sq_needexcl > 0);
7442 sq->sq_needexcl--;
7443 if (sq->sq_needexcl == 0) {
7444 SQ_PUTCOUNT_SETFAST_LOCKED(sq);
7446 SQ_PUTLOCKS_EXIT(sq);
7449 mutex_exit(SQLOCK(sq));
7451 cbp->cbp_func(cbp->cbp_arg);
7454 * We drop the lock only for leavesq to re-acquire it.
7455 * Possible optimization is inline of leavesq.
7457 mutex_enter(SQLOCK(sq));
7458 callbparams_free(sq, cbp);
7459 mutex_exit(SQLOCK(sq));
7460 leavesq(sq, SQ_CALLBACK);
7464 * No need to grab sq_putlocks here. See comment in strsubr.h that
7465 * explains when sq_putlocks are used.
7467 * sq_count (or one of the sq_putcounts) has already been
7468 * decremented by the caller, and if SQ_QUEUED, we need to call
7469 * drain_syncq (the global syncq drain).
7470 * If putnext_tail is called with the SQ_EXCL bit set, we are in
7471 * one of two states, non-CIPUT perimeter, and we need to clear
7472 * it, or we went exclusive in the put procedure. In any case,
7473 * we want to clear the bit now, and it is probably easier to do
7474 * this at the beginning of this function (remember, we hold
7475 * the SQLOCK). Lastly, if there are other messages queued
7476 * on the syncq (and not for our destination), enable the syncq
7477 * for background work.
7480 /* ARGSUSED */
7481 void
7482 putnext_tail(syncq_t *sq, queue_t *qp, uint32_t passflags)
7484 uint16_t flags = sq->sq_flags;
7486 ASSERT(MUTEX_HELD(SQLOCK(sq)));
7487 ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
7489 /* Clear SQ_EXCL if set in passflags */
7490 if (passflags & SQ_EXCL) {
7491 flags &= ~SQ_EXCL;
7493 if (flags & SQ_WANTWAKEUP) {
7494 flags &= ~SQ_WANTWAKEUP;
7495 cv_broadcast(&sq->sq_wait);
7497 if (flags & SQ_WANTEXWAKEUP) {
7498 flags &= ~SQ_WANTEXWAKEUP;
7499 cv_broadcast(&sq->sq_exitwait);
7501 sq->sq_flags = flags;
7504 * We have cleared SQ_EXCL if we were asked to, and started
7505 * the wakeup process for waiters. If there are no writers
7506 * then we need to drain the syncq if we were told to, or
7507 * enable the background thread to do it.
7509 if (!(flags & (SQ_STAYAWAY|SQ_EXCL))) {
7510 if ((passflags & SQ_QUEUED) ||
7511 (sq->sq_svcflags & SQ_DISABLED)) {
7512 /* drain_syncq will take care of events in the list */
7513 drain_syncq(sq);
7514 return;
7515 } else if (flags & SQ_QUEUED) {
7516 sqenable(sq);
7519 /* Drop the SQLOCK on exit */
7520 mutex_exit(SQLOCK(sq));
7521 TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
7522 "putnext_end:(%p, %p, %p) done", NULL, qp, sq);
7525 void
7526 set_qend(queue_t *q)
7528 mutex_enter(QLOCK(q));
7529 if (!O_SAMESTR(q))
7530 q->q_flag |= QEND;
7531 else
7532 q->q_flag &= ~QEND;
7533 mutex_exit(QLOCK(q));
7534 q = _OTHERQ(q);
7535 mutex_enter(QLOCK(q));
7536 if (!O_SAMESTR(q))
7537 q->q_flag |= QEND;
7538 else
7539 q->q_flag &= ~QEND;
7540 mutex_exit(QLOCK(q));
7544 * Set QFULL in next service procedure queue (that cares) if not already
7545 * set and if there are already more messages on the syncq than
7546 * sq_max_size. If sq_max_size is 0, no flow control will be asserted on
7547 * any syncq.
7549 * The fq here is the next queue with a service procedure. This is where
7550 * we would fail canputnext, so this is where we need to set QFULL.
7551 * In the case when fq != q we need to take QLOCK(fq) to set QFULL flag.
7553 * We already have QLOCK at this point. To avoid cross-locks with
7554 * freezestr() which grabs all QLOCKs and with strlock() which grabs both
7555 * SQLOCK and sd_reflock, we need to drop respective locks first.
7557 void
7558 set_qfull(queue_t *q)
7560 queue_t *fq = NULL;
7562 ASSERT(MUTEX_HELD(QLOCK(q)));
7563 if ((sq_max_size != 0) && (!(q->q_nfsrv->q_flag & QFULL)) &&
7564 (q->q_syncqmsgs > sq_max_size)) {
7565 if ((fq = q->q_nfsrv) == q) {
7566 fq->q_flag |= QFULL;
7567 } else {
7568 mutex_exit(QLOCK(q));
7569 mutex_enter(QLOCK(fq));
7570 fq->q_flag |= QFULL;
7571 mutex_exit(QLOCK(fq));
7572 mutex_enter(QLOCK(q));
7577 void
7578 clr_qfull(queue_t *q)
7580 queue_t *oq = q;
7582 q = q->q_nfsrv;
7583 /* Fast check if there is any work to do before getting the lock. */
7584 if ((q->q_flag & (QFULL|QWANTW)) == 0) {
7585 return;
7589 * Do not reset QFULL (and backenable) if the q_count is the reason
7590 * for QFULL being set.
7592 mutex_enter(QLOCK(q));
7594 * If queue is empty i.e q_mblkcnt is zero, queue can not be full.
7595 * Hence clear the QFULL.
7596 * If both q_count and q_mblkcnt are less than the hiwat mark,
7597 * clear the QFULL.
7599 if (q->q_mblkcnt == 0 || ((q->q_count < q->q_hiwat) &&
7600 (q->q_mblkcnt < q->q_hiwat))) {
7601 q->q_flag &= ~QFULL;
7603 * A little more confusing, how about this way:
7604 * if someone wants to write,
7605 * AND
7606 * both counts are less than the lowat mark
7607 * OR
7608 * the lowat mark is zero
7609 * THEN
7610 * backenable
7612 if ((q->q_flag & QWANTW) &&
7613 (((q->q_count < q->q_lowat) &&
7614 (q->q_mblkcnt < q->q_lowat)) || q->q_lowat == 0)) {
7615 q->q_flag &= ~QWANTW;
7616 mutex_exit(QLOCK(q));
7617 backenable(oq, 0);
7618 } else
7619 mutex_exit(QLOCK(q));
7620 } else
7621 mutex_exit(QLOCK(q));
7625 * Set the forward service procedure pointer.
7627 * Called at insert-time to cache a queue's next forward service procedure in
7628 * q_nfsrv; used by canput() and canputnext(). If the queue to be inserted
7629 * has a service procedure then q_nfsrv points to itself. If the queue to be
7630 * inserted does not have a service procedure, then q_nfsrv points to the next
7631 * queue forward that has a service procedure. If the queue is at the logical
7632 * end of the stream (driver for write side, stream head for the read side)
7633 * and does not have a service procedure, then q_nfsrv also points to itself.
7635 void
7636 set_nfsrv_ptr(
7637 queue_t *rnew, /* read queue pointer to new module */
7638 queue_t *wnew, /* write queue pointer to new module */
7639 queue_t *prev_rq, /* read queue pointer to the module above */
7640 queue_t *prev_wq) /* write queue pointer to the module above */
7642 queue_t *qp;
7644 if (prev_wq->q_next == NULL) {
7646 * Insert the driver, initialize the driver and stream head.
7647 * In this case, prev_rq/prev_wq should be the stream head.
7648 * _I_INSERT does not allow inserting a driver. Make sure
7649 * that it is not an insertion.
7651 ASSERT(!(rnew->q_flag & _QINSERTING));
7652 wnew->q_nfsrv = wnew;
7653 if (rnew->q_qinfo->qi_srvp)
7654 rnew->q_nfsrv = rnew;
7655 else
7656 rnew->q_nfsrv = prev_rq;
7657 prev_rq->q_nfsrv = prev_rq;
7658 prev_wq->q_nfsrv = prev_wq;
7659 } else {
7661 * set up read side q_nfsrv pointer. This MUST be done
7662 * before setting the write side, because the setting of
7663 * the write side for a fifo may depend on it.
7665 * Suppose we have a fifo that only has pipemod pushed.
7666 * pipemod has no read or write service procedures, so
7667 * nfsrv for both pipemod queues points to prev_rq (the
7668 * stream read head). Now push bufmod (which has only a
7669 * read service procedure). Doing the write side first,
7670 * wnew->q_nfsrv is set to pipemod's writeq nfsrv, which
7671 * is WRONG; the next queue forward from wnew with a
7672 * service procedure will be rnew, not the stream read head.
7673 * Since the downstream queue (which in the case of a fifo
7674 * is the read queue rnew) can affect upstream queues, it
7675 * needs to be done first. Setting up the read side first
7676 * sets nfsrv for both pipemod queues to rnew and then
7677 * when the write side is set up, wnew-q_nfsrv will also
7678 * point to rnew.
7680 if (rnew->q_qinfo->qi_srvp) {
7682 * use _OTHERQ() because, if this is a pipe, next
7683 * module may have been pushed from other end and
7684 * q_next could be a read queue.
7686 qp = _OTHERQ(prev_wq->q_next);
7687 while (qp && qp->q_nfsrv != qp) {
7688 qp->q_nfsrv = rnew;
7689 qp = backq(qp);
7691 rnew->q_nfsrv = rnew;
7692 } else
7693 rnew->q_nfsrv = prev_rq->q_nfsrv;
7695 /* set up write side q_nfsrv pointer */
7696 if (wnew->q_qinfo->qi_srvp) {
7697 wnew->q_nfsrv = wnew;
7700 * For insertion, need to update nfsrv of the modules
7701 * above which do not have a service routine.
7703 if (rnew->q_flag & _QINSERTING) {
7704 for (qp = prev_wq;
7705 qp != NULL && qp->q_nfsrv != qp;
7706 qp = backq(qp)) {
7707 qp->q_nfsrv = wnew->q_nfsrv;
7710 } else {
7711 if (prev_wq->q_next == prev_rq)
7713 * Since prev_wq/prev_rq are the middle of a
7714 * fifo, wnew/rnew will also be the middle of
7715 * a fifo and wnew's nfsrv is same as rnew's.
7717 wnew->q_nfsrv = rnew->q_nfsrv;
7718 else
7719 wnew->q_nfsrv = prev_wq->q_next->q_nfsrv;
7725 * Reset the forward service procedure pointer; called at remove-time.
7727 void
7728 reset_nfsrv_ptr(queue_t *rqp, queue_t *wqp)
7730 queue_t *tmp_qp;
7732 /* Reset the write side q_nfsrv pointer for _I_REMOVE */
7733 if ((rqp->q_flag & _QREMOVING) && (wqp->q_qinfo->qi_srvp != NULL)) {
7734 for (tmp_qp = backq(wqp);
7735 tmp_qp != NULL && tmp_qp->q_nfsrv == wqp;
7736 tmp_qp = backq(tmp_qp)) {
7737 tmp_qp->q_nfsrv = wqp->q_nfsrv;
7741 /* reset the read side q_nfsrv pointer */
7742 if (rqp->q_qinfo->qi_srvp) {
7743 if (wqp->q_next) { /* non-driver case */
7744 tmp_qp = _OTHERQ(wqp->q_next);
7745 while (tmp_qp && tmp_qp->q_nfsrv == rqp) {
7746 /* Note that rqp->q_next cannot be NULL */
7747 ASSERT(rqp->q_next != NULL);
7748 tmp_qp->q_nfsrv = rqp->q_next->q_nfsrv;
7749 tmp_qp = backq(tmp_qp);
7756 * This routine should be called after all stream geometry changes to update
7757 * the stream head cached struio() rd/wr queue pointers. Note must be called
7758 * with the streamlock()ed.
7760 * Note: only enables Synchronous STREAMS for a side of a Stream which has
7761 * an explicit synchronous barrier module queue. That is, a queue that
7762 * has specified a struio() type.
7764 static void
7765 strsetuio(stdata_t *stp)
7767 queue_t *wrq;
7769 if (stp->sd_flag & STPLEX) {
7771 * Not streamhead, but a mux, so no Synchronous STREAMS.
7773 stp->sd_struiowrq = NULL;
7774 stp->sd_struiordq = NULL;
7775 return;
7778 * Scan the write queue(s) while synchronous
7779 * until we find a qinfo uio type specified.
7781 wrq = stp->sd_wrq->q_next;
7782 while (wrq) {
7783 if (wrq->q_struiot == STRUIOT_NONE) {
7784 wrq = 0;
7785 break;
7787 if (wrq->q_struiot != STRUIOT_DONTCARE)
7788 break;
7789 if (! _SAMESTR(wrq)) {
7790 wrq = 0;
7791 break;
7793 wrq = wrq->q_next;
7795 stp->sd_struiowrq = wrq;
7797 * Scan the read queue(s) while synchronous
7798 * until we find a qinfo uio type specified.
7800 wrq = stp->sd_wrq->q_next;
7801 while (wrq) {
7802 if (_RD(wrq)->q_struiot == STRUIOT_NONE) {
7803 wrq = 0;
7804 break;
7806 if (_RD(wrq)->q_struiot != STRUIOT_DONTCARE)
7807 break;
7808 if (! _SAMESTR(wrq)) {
7809 wrq = 0;
7810 break;
7812 wrq = wrq->q_next;
7814 stp->sd_struiordq = wrq ? _RD(wrq) : 0;
7818 * pass_wput, unblocks the passthru queues, so that
7819 * messages can arrive at muxs lower read queue, before
7820 * I_LINK/I_UNLINK is acked/nacked.
7822 static void
7823 pass_wput(queue_t *q, mblk_t *mp)
7825 syncq_t *sq;
7827 sq = _RD(q)->q_syncq;
7828 if (sq->sq_flags & SQ_BLOCKED)
7829 unblocksq(sq, SQ_BLOCKED, 0);
7830 putnext(q, mp);
7834 * Set up queues for the link/unlink.
7835 * Create a new queue and block it and then insert it
7836 * below the stream head on the lower stream.
7837 * This prevents any messages from arriving during the setq
7838 * as well as while the mux is processing the LINK/I_UNLINK.
7839 * The blocked passq is unblocked once the LINK/I_UNLINK has
7840 * been acked or nacked or if a message is generated and sent
7841 * down muxs write put procedure.
7842 * See pass_wput().
7844 * After the new queue is inserted, all messages coming from below are
7845 * blocked. The call to strlock will ensure that all activity in the stream head
7846 * read queue syncq is stopped (sq_count drops to zero).
7848 static queue_t *
7849 link_addpassthru(stdata_t *stpdown)
7851 queue_t *passq;
7852 sqlist_t sqlist;
7854 passq = allocq();
7855 STREAM(passq) = STREAM(_WR(passq)) = stpdown;
7856 /* setq might sleep in allocator - avoid holding locks. */
7857 setq(passq, &passthru_rinit, &passthru_winit, NULL, QPERQ,
7858 SQ_CI|SQ_CO, B_FALSE);
7859 claimq(passq);
7860 blocksq(passq->q_syncq, SQ_BLOCKED, 1);
7861 insertq(STREAM(passq), passq);
7864 * Use strlock() to wait for the stream head sq_count to drop to zero
7865 * since we are going to change q_ptr in the stream head. Note that
7866 * insertq() doesn't wait for any syncq counts to drop to zero.
7868 sqlist.sqlist_head = NULL;
7869 sqlist.sqlist_index = 0;
7870 sqlist.sqlist_size = sizeof (sqlist_t);
7871 sqlist_insert(&sqlist, _RD(stpdown->sd_wrq)->q_syncq);
7872 strlock(stpdown, &sqlist);
7873 strunlock(stpdown, &sqlist);
7875 releaseq(passq);
7876 return (passq);
7880 * Let messages flow up into the mux by removing
7881 * the passq.
7883 static void
7884 link_rempassthru(queue_t *passq)
7886 claimq(passq);
7887 removeq(passq);
7888 releaseq(passq);
7889 freeq(passq);
7893 * Wait for the condition variable pointed to by `cvp' to be signaled,
7894 * or for `tim' milliseconds to elapse, whichever comes first. If `tim'
7895 * is negative, then there is no time limit. If `nosigs' is non-zero,
7896 * then the wait will be non-interruptible.
7898 * Returns >0 if signaled, 0 if interrupted, or -1 upon timeout.
7900 clock_t
7901 str_cv_wait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim, int nosigs)
7903 clock_t ret;
7905 if (tim < 0) {
7906 if (nosigs) {
7907 cv_wait(cvp, mp);
7908 ret = 1;
7909 } else {
7910 ret = cv_wait_sig(cvp, mp);
7912 } else if (tim > 0) {
7914 * convert milliseconds to clock ticks
7916 if (nosigs) {
7917 ret = cv_reltimedwait(cvp, mp,
7918 MSEC_TO_TICK_ROUNDUP(tim), TR_CLOCK_TICK);
7919 } else {
7920 ret = cv_reltimedwait_sig(cvp, mp,
7921 MSEC_TO_TICK_ROUNDUP(tim), TR_CLOCK_TICK);
7923 } else {
7924 ret = -1;
7926 return (ret);
7930 * Wait until the stream head can determine if it is at the mark but
7931 * don't wait forever to prevent a race condition between the "mark" state
7932 * in the stream head and any mark state in the caller/user of this routine.
7934 * This is used by sockets and for a socket it would be incorrect
7935 * to return a failure for SIOCATMARK when there is no data in the receive
7936 * queue and the marked urgent data is traveling up the stream.
7938 * This routine waits until the mark is known by waiting for one of these
7939 * three events:
7940 * The stream head read queue becoming non-empty (including an EOF).
7941 * The STRATMARK flag being set (due to a MSGMARKNEXT message).
7942 * The STRNOTATMARK flag being set (which indicates that the transport
7943 * has sent a MSGNOTMARKNEXT message to indicate that it is not at
7944 * the mark).
7946 * The routine returns 1 if the stream is at the mark; 0 if it can
7947 * be determined that the stream is not at the mark.
7948 * If the wait times out and it can't determine
7949 * whether or not the stream might be at the mark the routine will return -1.
7951 * Note: This routine should only be used when a mark is pending i.e.,
7952 * in the socket case the SIGURG has been posted.
7953 * Note2: This can not wakeup just because synchronous streams indicate
7954 * that data is available since it is not possible to use the synchronous
7955 * streams interfaces to determine the b_flag value for the data queued below
7956 * the stream head.
7959 strwaitmark(vnode_t *vp)
7961 struct stdata *stp = vp->v_stream;
7962 queue_t *rq = _RD(stp->sd_wrq);
7963 int mark;
7965 mutex_enter(&stp->sd_lock);
7966 while (rq->q_first == NULL &&
7967 !(stp->sd_flag & (STRATMARK|STRNOTATMARK|STREOF))) {
7968 stp->sd_flag |= RSLEEP;
7970 /* Wait for 100 milliseconds for any state change. */
7971 if (str_cv_wait(&rq->q_wait, &stp->sd_lock, 100, 1) == -1) {
7972 mutex_exit(&stp->sd_lock);
7973 return (-1);
7976 if (stp->sd_flag & STRATMARK)
7977 mark = 1;
7978 else if (rq->q_first != NULL && (rq->q_first->b_flag & MSGMARK))
7979 mark = 1;
7980 else
7981 mark = 0;
7983 mutex_exit(&stp->sd_lock);
7984 return (mark);
7988 * Set a read side error. If persist is set change the socket error
7989 * to persistent. If errfunc is set install the function as the exported
7990 * error handler.
7992 void
7993 strsetrerror(vnode_t *vp, int error, int persist, errfunc_t errfunc)
7995 struct stdata *stp = vp->v_stream;
7997 mutex_enter(&stp->sd_lock);
7998 stp->sd_rerror = error;
7999 if (error == 0 && errfunc == NULL)
8000 stp->sd_flag &= ~STRDERR;
8001 else
8002 stp->sd_flag |= STRDERR;
8003 if (persist) {
8004 stp->sd_flag &= ~STRDERRNONPERSIST;
8005 } else {
8006 stp->sd_flag |= STRDERRNONPERSIST;
8008 stp->sd_rderrfunc = errfunc;
8009 if (error != 0 || errfunc != NULL) {
8010 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */
8011 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */
8012 cv_broadcast(&stp->sd_monitor); /* ioctllers */
8014 mutex_exit(&stp->sd_lock);
8015 pollwakeup(&stp->sd_pollist, POLLERR);
8016 mutex_enter(&stp->sd_lock);
8018 if (stp->sd_sigflags & S_ERROR)
8019 strsendsig(stp->sd_siglist, S_ERROR, 0, error);
8021 mutex_exit(&stp->sd_lock);
8025 * Set a write side error. If persist is set change the socket error
8026 * to persistent.
8028 void
8029 strsetwerror(vnode_t *vp, int error, int persist, errfunc_t errfunc)
8031 struct stdata *stp = vp->v_stream;
8033 mutex_enter(&stp->sd_lock);
8034 stp->sd_werror = error;
8035 if (error == 0 && errfunc == NULL)
8036 stp->sd_flag &= ~STWRERR;
8037 else
8038 stp->sd_flag |= STWRERR;
8039 if (persist) {
8040 stp->sd_flag &= ~STWRERRNONPERSIST;
8041 } else {
8042 stp->sd_flag |= STWRERRNONPERSIST;
8044 stp->sd_wrerrfunc = errfunc;
8045 if (error != 0 || errfunc != NULL) {
8046 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */
8047 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */
8048 cv_broadcast(&stp->sd_monitor); /* ioctllers */
8050 mutex_exit(&stp->sd_lock);
8051 pollwakeup(&stp->sd_pollist, POLLERR);
8052 mutex_enter(&stp->sd_lock);
8054 if (stp->sd_sigflags & S_ERROR)
8055 strsendsig(stp->sd_siglist, S_ERROR, 0, error);
8057 mutex_exit(&stp->sd_lock);
8061 * Make the stream return 0 (EOF) when all data has been read.
8062 * No effect on write side.
8064 void
8065 strseteof(vnode_t *vp, int eof)
8067 struct stdata *stp = vp->v_stream;
8069 mutex_enter(&stp->sd_lock);
8070 if (!eof) {
8071 stp->sd_flag &= ~STREOF;
8072 mutex_exit(&stp->sd_lock);
8073 return;
8075 stp->sd_flag |= STREOF;
8076 if (stp->sd_flag & RSLEEP) {
8077 stp->sd_flag &= ~RSLEEP;
8078 cv_broadcast(&_RD(stp->sd_wrq)->q_wait);
8081 mutex_exit(&stp->sd_lock);
8082 pollwakeup(&stp->sd_pollist, POLLIN|POLLRDNORM);
8083 mutex_enter(&stp->sd_lock);
8085 if (stp->sd_sigflags & (S_INPUT|S_RDNORM))
8086 strsendsig(stp->sd_siglist, S_INPUT|S_RDNORM, 0, 0);
8087 mutex_exit(&stp->sd_lock);
8090 void
8091 strflushrq(vnode_t *vp, int flag)
8093 struct stdata *stp = vp->v_stream;
8095 mutex_enter(&stp->sd_lock);
8096 flushq(_RD(stp->sd_wrq), flag);
8097 mutex_exit(&stp->sd_lock);
8100 void
8101 strsetrputhooks(vnode_t *vp, uint_t flags,
8102 msgfunc_t protofunc, msgfunc_t miscfunc)
8104 struct stdata *stp = vp->v_stream;
8106 mutex_enter(&stp->sd_lock);
8108 if (protofunc == NULL)
8109 stp->sd_rprotofunc = strrput_proto;
8110 else
8111 stp->sd_rprotofunc = protofunc;
8113 if (miscfunc == NULL)
8114 stp->sd_rmiscfunc = strrput_misc;
8115 else
8116 stp->sd_rmiscfunc = miscfunc;
8118 if (flags & SH_CONSOL_DATA)
8119 stp->sd_rput_opt |= SR_CONSOL_DATA;
8120 else
8121 stp->sd_rput_opt &= ~SR_CONSOL_DATA;
8123 if (flags & SH_SIGALLDATA)
8124 stp->sd_rput_opt |= SR_SIGALLDATA;
8125 else
8126 stp->sd_rput_opt &= ~SR_SIGALLDATA;
8128 if (flags & SH_IGN_ZEROLEN)
8129 stp->sd_rput_opt |= SR_IGN_ZEROLEN;
8130 else
8131 stp->sd_rput_opt &= ~SR_IGN_ZEROLEN;
8133 mutex_exit(&stp->sd_lock);
8136 void
8137 strsetwputhooks(vnode_t *vp, uint_t flags, clock_t closetime)
8139 struct stdata *stp = vp->v_stream;
8141 mutex_enter(&stp->sd_lock);
8142 stp->sd_closetime = closetime;
8144 if (flags & SH_SIGPIPE)
8145 stp->sd_wput_opt |= SW_SIGPIPE;
8146 else
8147 stp->sd_wput_opt &= ~SW_SIGPIPE;
8148 if (flags & SH_RECHECK_ERR)
8149 stp->sd_wput_opt |= SW_RECHECK_ERR;
8150 else
8151 stp->sd_wput_opt &= ~SW_RECHECK_ERR;
8153 mutex_exit(&stp->sd_lock);
8156 void
8157 strsetrwputdatahooks(vnode_t *vp, msgfunc_t rdatafunc, msgfunc_t wdatafunc)
8159 struct stdata *stp = vp->v_stream;
8161 mutex_enter(&stp->sd_lock);
8163 stp->sd_rputdatafunc = rdatafunc;
8164 stp->sd_wputdatafunc = wdatafunc;
8166 mutex_exit(&stp->sd_lock);
8169 /* Used within framework when the queue is already locked */
8170 void
8171 qenable_locked(queue_t *q)
8173 stdata_t *stp = STREAM(q);
8175 ASSERT(MUTEX_HELD(QLOCK(q)));
8177 if (!q->q_qinfo->qi_srvp)
8178 return;
8181 * Do not place on run queue if already enabled or closing.
8183 if (q->q_flag & (QWCLOSE|QENAB))
8184 return;
8187 * mark queue enabled and place on run list if it is not already being
8188 * serviced. If it is serviced, the runservice() function will detect
8189 * that QENAB is set and call service procedure before clearing
8190 * QINSERVICE flag.
8192 q->q_flag |= QENAB;
8193 if (q->q_flag & QINSERVICE)
8194 return;
8196 /* Record the time of qenable */
8197 q->q_qtstamp = ddi_get_lbolt();
8200 * Put the queue in the stp list and schedule it for background
8201 * processing if it is not already scheduled or if stream head does not
8202 * intent to process it in the foreground later by setting
8203 * STRS_WILLSERVICE flag.
8205 mutex_enter(&stp->sd_qlock);
8207 * If there are already something on the list, stp flags should show
8208 * intention to drain it.
8210 IMPLY(STREAM_NEEDSERVICE(stp),
8211 (stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED)));
8213 ENQUEUE(q, stp->sd_qhead, stp->sd_qtail, q_link);
8214 stp->sd_nqueues++;
8217 * If no one will drain this stream we are the first producer and
8218 * need to schedule it for background thread.
8220 if (!(stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))) {
8222 * No one will service this stream later, so we have to
8223 * schedule it now.
8225 STRSTAT(stenables);
8226 stp->sd_svcflags |= STRS_SCHEDULED;
8227 stp->sd_servid = (void *)taskq_dispatch(streams_taskq,
8228 (task_func_t *)stream_service, stp, TQ_NOSLEEP|TQ_NOQUEUE);
8230 if (stp->sd_servid == NULL) {
8232 * Task queue failed so fail over to the backup
8233 * servicing thread.
8235 STRSTAT(taskqfails);
8237 * It is safe to clear STRS_SCHEDULED flag because it
8238 * was set by this thread above.
8240 stp->sd_svcflags &= ~STRS_SCHEDULED;
8243 * Failover scheduling is protected by service_queue
8244 * lock.
8246 mutex_enter(&service_queue);
8247 ASSERT((stp->sd_qhead == q) && (stp->sd_qtail == q));
8248 ASSERT(q->q_link == NULL);
8250 * Append the queue to qhead/qtail list.
8252 if (qhead == NULL)
8253 qhead = q;
8254 else
8255 qtail->q_link = q;
8256 qtail = q;
8258 * Clear stp queue list.
8260 stp->sd_qhead = stp->sd_qtail = NULL;
8261 stp->sd_nqueues = 0;
8263 * Wakeup background queue processing thread.
8265 cv_signal(&services_to_run);
8266 mutex_exit(&service_queue);
8269 mutex_exit(&stp->sd_qlock);
8272 static void
8273 queue_service(queue_t *q)
8276 * The queue in the list should have
8277 * QENAB flag set and should not have
8278 * QINSERVICE flag set. QINSERVICE is
8279 * set when the queue is dequeued and
8280 * qenable_locked doesn't enqueue a
8281 * queue with QINSERVICE set.
8284 ASSERT(!(q->q_flag & QINSERVICE));
8285 ASSERT((q->q_flag & QENAB));
8286 mutex_enter(QLOCK(q));
8287 q->q_flag &= ~QENAB;
8288 q->q_flag |= QINSERVICE;
8289 mutex_exit(QLOCK(q));
8290 runservice(q);
8293 static void
8294 syncq_service(syncq_t *sq)
8296 STRSTAT(syncqservice);
8297 mutex_enter(SQLOCK(sq));
8298 ASSERT(!(sq->sq_svcflags & SQ_SERVICE));
8299 ASSERT(sq->sq_servcount != 0);
8300 ASSERT(sq->sq_next == NULL);
8302 /* if we came here from the background thread, clear the flag */
8303 if (sq->sq_svcflags & SQ_BGTHREAD)
8304 sq->sq_svcflags &= ~SQ_BGTHREAD;
8306 /* let drain_syncq know that it's being called in the background */
8307 sq->sq_svcflags |= SQ_SERVICE;
8308 drain_syncq(sq);
8311 static void
8312 qwriter_outer_service(syncq_t *outer)
8315 * Note that SQ_WRITER is used on the outer perimeter
8316 * to signal that a qwriter(OUTER) is either investigating
8317 * running or that it is actually running a function.
8319 outer_enter(outer, SQ_BLOCKED|SQ_WRITER);
8322 * All inner syncq are empty and have SQ_WRITER set
8323 * to block entering the outer perimeter.
8325 * We do not need to explicitly call write_now since
8326 * outer_exit does it for us.
8328 outer_exit(outer);
8331 static void
8332 mblk_free(mblk_t *mp)
8334 dblk_t *dbp = mp->b_datap;
8335 frtn_t *frp = dbp->db_frtnp;
8337 mp->b_next = NULL;
8338 if (dbp->db_fthdr != NULL)
8339 str_ftfree(dbp);
8341 ASSERT(dbp->db_fthdr == NULL);
8342 frp->free_func(frp->free_arg);
8343 ASSERT(dbp->db_mblk == mp);
8345 if (dbp->db_credp != NULL) {
8346 crfree(dbp->db_credp);
8347 dbp->db_credp = NULL;
8349 dbp->db_cpid = -1;
8350 dbp->db_struioflag = 0;
8351 dbp->db_struioun.cksum.flags = 0;
8353 kmem_cache_free(dbp->db_cache, dbp);
8357 * Background processing of the stream queue list.
8359 static void
8360 stream_service(stdata_t *stp)
8362 queue_t *q;
8364 mutex_enter(&stp->sd_qlock);
8366 STR_SERVICE(stp, q);
8368 stp->sd_svcflags &= ~STRS_SCHEDULED;
8369 stp->sd_servid = NULL;
8370 cv_signal(&stp->sd_qcv);
8371 mutex_exit(&stp->sd_qlock);
8375 * Foreground processing of the stream queue list.
8377 void
8378 stream_runservice(stdata_t *stp)
8380 queue_t *q;
8382 mutex_enter(&stp->sd_qlock);
8383 STRSTAT(rservice);
8385 * We are going to drain this stream queue list, so qenable_locked will
8386 * not schedule it until we finish.
8388 stp->sd_svcflags |= STRS_WILLSERVICE;
8390 STR_SERVICE(stp, q);
8392 stp->sd_svcflags &= ~STRS_WILLSERVICE;
8393 mutex_exit(&stp->sd_qlock);
8395 * Help backup background thread to drain the qhead/qtail list.
8397 while (qhead != NULL) {
8398 STRSTAT(qhelps);
8399 mutex_enter(&service_queue);
8400 DQ(q, qhead, qtail, q_link);
8401 mutex_exit(&service_queue);
8402 if (q != NULL)
8403 queue_service(q);
8407 void
8408 stream_willservice(stdata_t *stp)
8410 mutex_enter(&stp->sd_qlock);
8411 stp->sd_svcflags |= STRS_WILLSERVICE;
8412 mutex_exit(&stp->sd_qlock);
8416 * Replace the cred currently in the mblk with a different one.
8417 * Also update db_cpid.
8419 void
8420 mblk_setcred(mblk_t *mp, cred_t *cr, pid_t cpid)
8422 dblk_t *dbp = mp->b_datap;
8423 cred_t *ocr = dbp->db_credp;
8425 ASSERT(cr != NULL);
8427 if (cr != ocr) {
8428 crhold(dbp->db_credp = cr);
8429 if (ocr != NULL)
8430 crfree(ocr);
8432 /* Don't overwrite with NOPID */
8433 if (cpid != NOPID)
8434 dbp->db_cpid = cpid;
8438 * If the src message has a cred, then replace the cred currently in the mblk
8439 * with it.
8440 * Also update db_cpid.
8442 void
8443 mblk_copycred(mblk_t *mp, const mblk_t *src)
8445 dblk_t *dbp = mp->b_datap;
8446 cred_t *cr, *ocr;
8447 pid_t cpid;
8449 cr = msg_getcred(src, &cpid);
8450 if (cr == NULL)
8451 return;
8453 ocr = dbp->db_credp;
8454 if (cr != ocr) {
8455 crhold(dbp->db_credp = cr);
8456 if (ocr != NULL)
8457 crfree(ocr);
8459 /* Don't overwrite with NOPID */
8460 if (cpid != NOPID)
8461 dbp->db_cpid = cpid;
8465 hcksum_assoc(mblk_t *mp, multidata_t *mmd, pdesc_t *pd,
8466 uint32_t start, uint32_t stuff, uint32_t end, uint32_t value,
8467 uint32_t flags, int km_flags)
8469 int rc = 0;
8471 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA);
8472 if (mp->b_datap->db_type == M_DATA) {
8473 /* Associate values for M_DATA type */
8474 DB_CKSUMSTART(mp) = (intptr_t)start;
8475 DB_CKSUMSTUFF(mp) = (intptr_t)stuff;
8476 DB_CKSUMEND(mp) = (intptr_t)end;
8477 DB_CKSUMFLAGS(mp) = flags;
8478 DB_CKSUM16(mp) = (uint16_t)value;
8480 } else {
8481 pattrinfo_t pa_info;
8483 ASSERT(mmd != NULL);
8485 pa_info.type = PATTR_HCKSUM;
8486 pa_info.len = sizeof (pattr_hcksum_t);
8488 if (mmd_addpattr(mmd, pd, &pa_info, B_TRUE, km_flags) != NULL) {
8489 pattr_hcksum_t *hck = (pattr_hcksum_t *)pa_info.buf;
8491 hck->hcksum_start_offset = start;
8492 hck->hcksum_stuff_offset = stuff;
8493 hck->hcksum_end_offset = end;
8494 hck->hcksum_cksum_val.inet_cksum = (uint16_t)value;
8495 hck->hcksum_flags = flags;
8496 } else {
8497 rc = -1;
8500 return (rc);
8503 void
8504 hcksum_retrieve(mblk_t *mp, multidata_t *mmd, pdesc_t *pd,
8505 uint32_t *start, uint32_t *stuff, uint32_t *end,
8506 uint32_t *value, uint32_t *flags)
8508 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA);
8509 if (mp->b_datap->db_type == M_DATA) {
8510 if (flags != NULL) {
8511 *flags = DB_CKSUMFLAGS(mp) & HCK_FLAGS;
8512 if ((*flags & (HCK_PARTIALCKSUM |
8513 HCK_FULLCKSUM)) != 0) {
8514 if (value != NULL)
8515 *value = (uint32_t)DB_CKSUM16(mp);
8516 if ((*flags & HCK_PARTIALCKSUM) != 0) {
8517 if (start != NULL)
8518 *start =
8519 (uint32_t)DB_CKSUMSTART(mp);
8520 if (stuff != NULL)
8521 *stuff =
8522 (uint32_t)DB_CKSUMSTUFF(mp);
8523 if (end != NULL)
8524 *end =
8525 (uint32_t)DB_CKSUMEND(mp);
8529 } else {
8530 pattrinfo_t hck_attr = {PATTR_HCKSUM};
8532 ASSERT(mmd != NULL);
8534 /* get hardware checksum attribute */
8535 if (mmd_getpattr(mmd, pd, &hck_attr) != NULL) {
8536 pattr_hcksum_t *hck = (pattr_hcksum_t *)hck_attr.buf;
8538 ASSERT(hck_attr.len >= sizeof (pattr_hcksum_t));
8539 if (flags != NULL)
8540 *flags = hck->hcksum_flags;
8541 if (start != NULL)
8542 *start = hck->hcksum_start_offset;
8543 if (stuff != NULL)
8544 *stuff = hck->hcksum_stuff_offset;
8545 if (end != NULL)
8546 *end = hck->hcksum_end_offset;
8547 if (value != NULL)
8548 *value = (uint32_t)
8549 hck->hcksum_cksum_val.inet_cksum;
8554 void
8555 lso_info_set(mblk_t *mp, uint32_t mss, uint32_t flags)
8557 ASSERT(DB_TYPE(mp) == M_DATA);
8558 ASSERT((flags & ~HW_LSO_FLAGS) == 0);
8560 /* Set the flags */
8561 DB_LSOFLAGS(mp) |= flags;
8562 DB_LSOMSS(mp) = mss;
8565 void
8566 lso_info_cleanup(mblk_t *mp)
8568 ASSERT(DB_TYPE(mp) == M_DATA);
8570 /* Clear the flags */
8571 DB_LSOFLAGS(mp) &= ~HW_LSO_FLAGS;
8572 DB_LSOMSS(mp) = 0;
8576 * Checksum buffer *bp for len bytes with psum partial checksum,
8577 * or 0 if none, and return the 16 bit partial checksum.
8579 unsigned
8580 bcksum(uchar_t *bp, int len, unsigned int psum)
8582 int odd = len & 1;
8583 extern unsigned int ip_ocsum();
8585 if (((intptr_t)bp & 1) == 0 && !odd) {
8587 * Bp is 16 bit aligned and len is multiple of 16 bit word.
8589 return (ip_ocsum((ushort_t *)bp, len >> 1, psum));
8591 if (((intptr_t)bp & 1) != 0) {
8593 * Bp isn't 16 bit aligned.
8595 unsigned int tsum;
8597 #ifdef _LITTLE_ENDIAN
8598 psum += *bp;
8599 #else
8600 psum += *bp << 8;
8601 #endif
8602 len--;
8603 bp++;
8604 tsum = ip_ocsum((ushort_t *)bp, len >> 1, 0);
8605 psum += (tsum << 8) & 0xffff | (tsum >> 8);
8606 if (len & 1) {
8607 bp += len - 1;
8608 #ifdef _LITTLE_ENDIAN
8609 psum += *bp << 8;
8610 #else
8611 psum += *bp;
8612 #endif
8614 } else {
8616 * Bp is 16 bit aligned.
8618 psum = ip_ocsum((ushort_t *)bp, len >> 1, psum);
8619 if (odd) {
8620 bp += len - 1;
8621 #ifdef _LITTLE_ENDIAN
8622 psum += *bp;
8623 #else
8624 psum += *bp << 8;
8625 #endif
8629 * Normalize psum to 16 bits before returning the new partial
8630 * checksum. The max psum value before normalization is 0x3FDFE.
8632 return ((psum >> 16) + (psum & 0xFFFF));
8635 boolean_t
8636 is_vmloaned_mblk(mblk_t *mp, multidata_t *mmd, pdesc_t *pd)
8638 boolean_t rc;
8640 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA);
8641 if (DB_TYPE(mp) == M_DATA) {
8642 rc = (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0);
8643 } else {
8644 pattrinfo_t zcopy_attr = {PATTR_ZCOPY};
8646 ASSERT(mmd != NULL);
8647 rc = (mmd_getpattr(mmd, pd, &zcopy_attr) != NULL);
8649 return (rc);
8652 void
8653 freemsgchain(mblk_t *mp)
8655 mblk_t *next;
8657 while (mp != NULL) {
8658 next = mp->b_next;
8659 mp->b_next = NULL;
8661 freemsg(mp);
8662 mp = next;
8666 mblk_t *
8667 copymsgchain(mblk_t *mp)
8669 mblk_t *nmp = NULL;
8670 mblk_t **nmpp = &nmp;
8672 for (; mp != NULL; mp = mp->b_next) {
8673 if ((*nmpp = copymsg(mp)) == NULL) {
8674 freemsgchain(nmp);
8675 return (NULL);
8678 nmpp = &((*nmpp)->b_next);
8681 return (nmp);
8684 /* NOTE: Do not add code after this point. */
8685 #undef QLOCK
8688 * Replacement for QLOCK macro for those that can't use it.
8690 kmutex_t *
8691 QLOCK(queue_t *q)
8693 return (&(q)->q_lock);
8697 * Dummy runqueues/queuerun functions functions for backwards compatibility.
8699 #undef runqueues
8700 void
8701 runqueues(void)
8705 #undef queuerun
8706 void
8707 queuerun(void)
8712 * Initialize the STR stack instance, which tracks autopush and persistent
8713 * links.
8715 /* ARGSUSED */
8716 static void *
8717 str_stack_init(netstackid_t stackid, netstack_t *ns)
8719 str_stack_t *ss;
8720 int i;
8722 ss = (str_stack_t *)kmem_zalloc(sizeof (*ss), KM_SLEEP);
8723 ss->ss_netstack = ns;
8726 * set up autopush
8728 sad_initspace(ss);
8731 * set up mux_node structures.
8733 ss->ss_devcnt = devcnt; /* In case it should change before free */
8734 ss->ss_mux_nodes = kmem_zalloc((sizeof (struct mux_node) *
8735 ss->ss_devcnt), KM_SLEEP);
8736 for (i = 0; i < ss->ss_devcnt; i++)
8737 ss->ss_mux_nodes[i].mn_imaj = i;
8738 return (ss);
8742 * Note: run at zone shutdown and not destroy so that the PLINKs are
8743 * gone by the time other cleanup happens from the destroy callbacks.
8745 static void
8746 str_stack_shutdown(netstackid_t stackid, void *arg)
8748 str_stack_t *ss = (str_stack_t *)arg;
8749 int i;
8750 cred_t *cr;
8752 cr = zone_get_kcred(netstackid_to_zoneid(stackid));
8753 ASSERT(cr != NULL);
8755 /* Undo all the I_PLINKs for this zone */
8756 for (i = 0; i < ss->ss_devcnt; i++) {
8757 struct mux_edge *ep;
8758 ldi_handle_t lh;
8759 ldi_ident_t li;
8760 int ret;
8761 int rval;
8762 dev_t rdev;
8764 ep = ss->ss_mux_nodes[i].mn_outp;
8765 if (ep == NULL)
8766 continue;
8767 ret = ldi_ident_from_major((major_t)i, &li);
8768 if (ret != 0) {
8769 continue;
8771 rdev = ep->me_dev;
8772 ret = ldi_open_by_dev(&rdev, OTYP_CHR, FREAD|FWRITE,
8773 cr, &lh, li);
8774 if (ret != 0) {
8775 ldi_ident_release(li);
8776 continue;
8779 ret = ldi_ioctl(lh, I_PUNLINK, (intptr_t)MUXID_ALL, FKIOCTL,
8780 cr, &rval);
8781 if (ret) {
8782 (void) ldi_close(lh, FREAD|FWRITE, cr);
8783 ldi_ident_release(li);
8784 continue;
8786 (void) ldi_close(lh, FREAD|FWRITE, cr);
8788 /* Close layered handles */
8789 ldi_ident_release(li);
8791 crfree(cr);
8793 sad_freespace(ss);
8795 kmem_free(ss->ss_mux_nodes, sizeof (struct mux_node) * ss->ss_devcnt);
8796 ss->ss_mux_nodes = NULL;
8800 * Free the structure; str_stack_shutdown did the other cleanup work.
8802 /* ARGSUSED */
8803 static void
8804 str_stack_fini(netstackid_t stackid, void *arg)
8806 str_stack_t *ss = (str_stack_t *)arg;
8808 kmem_free(ss, sizeof (*ss));