Merge branch 'black_magic'
[unleashed.git] / kernel / os / putnext.c
blob5a6424f0375a13731107a468dbcd014114da5d19
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 /* All Rights Reserved */
31 * UNIX Device Driver Interface functions
32 * This file contains the C-versions of putnext() and put().
33 * Assembly language versions exist for some architectures.
36 #include <sys/types.h>
37 #include <sys/systm.h>
38 #include <sys/cpuvar.h>
39 #include <sys/debug.h>
40 #include <sys/t_lock.h>
41 #include <sys/stream.h>
42 #include <sys/thread.h>
43 #include <sys/strsubr.h>
44 #include <sys/ddi.h>
45 #include <sys/vtrace.h>
46 #include <sys/cmn_err.h>
47 #include <sys/strft.h>
48 #include <sys/stack.h>
49 #include <sys/archsystm.h>
52 * Streams with many modules may create long chains of calls via putnext() which
53 * may exhaust stack space. When putnext detects that the stack space left is
54 * too small (less then PUT_STACK_NEEDED), the call chain is broken and
55 * further processing is delegated to the background thread via call to
56 * putnext_tail(). Unfortunately there is no generic solution with fixed stack
57 * size, and putnext() is recursive function, so this hack is a necessary evil.
59 * The redzone value is chosen dependent on the default stack size which is 8K
60 * on 32-bit kernels and on x86 and 16K on 64-bit kernels. The values are chosen
61 * empirically. For 64-bit kernels it is 5000 and for 32-bit kernels it is 3000.
62 * Experiments showed that 2500 is not enough for either 32-bit or 64-bit
63 * kernels.
65 * The redzone value is a tuneable rather then a constant to allow adjustments
66 * in the field.
68 * The check in PUT_STACK_NOTENOUGH is taken from segkp_map_red() function. It
69 * is possible to define it as a generic function exported by seg_kp, but
71 * a) It may sound like an open invitation to use the facility indiscriminately.
72 * b) It adds extra function call in putnext path.
74 * We keep a global counter `put_stack_notenough' which keeps track how many
75 * times the stack switching hack was used.
78 static ulong_t put_stack_notenough;
80 #ifdef _LP64
81 #define PUT_STACK_NEEDED 5000
82 #else
83 #define PUT_STACK_NEEDED 3000
84 #endif
86 int put_stack_needed = PUT_STACK_NEEDED;
88 #if defined(STACK_GROWTH_DOWN)
89 #define PUT_STACK_NOTENOUGH() \
90 (((STACK_BIAS + (uintptr_t)getfp() - \
91 (uintptr_t)curthread->t_stkbase) < put_stack_needed) && \
92 ++put_stack_notenough)
93 #else
94 #error "STACK_GROWTH_DOWN undefined"
95 #endif
97 boolean_t UseFastlocks = B_FALSE;
100 * function: putnext()
101 * purpose: call the put routine of the queue linked to qp
103 * Note: this function is written to perform well on modern computer
104 * architectures by e.g. preloading values into registers and "smearing" out
105 * code.
107 * A note on the fastput mechanism. The most significant bit of a
108 * putcount is considered the "FASTPUT" bit. If set, then there is
109 * nothing stoping a concurrent put from occuring (note that putcounts
110 * are only allowed on CIPUT perimiters). If, however, it is cleared,
111 * then we need to take the normal lock path by aquiring the SQLOCK.
112 * This is a slowlock. When a thread starts exclusiveness, e.g. wants
113 * writer access, it will clear the FASTPUT bit, causing new threads
114 * to take the slowlock path. This assures that putcounts will not
115 * increase in value, so the want-writer does not need to constantly
116 * aquire the putlocks to sum the putcounts. This does have the
117 * possibility of having the count drop right after reading, but that
118 * is no different than aquiring, reading and then releasing. However,
119 * in this mode, it cannot go up, so eventually they will drop to zero
120 * and the want-writer can proceed.
122 * If the FASTPUT bit is set, or in the slowlock path we see that there
123 * are no writers or want-writers, we make the choice of calling the
124 * putproc, or a "fast-fill_syncq". The fast-fill is a fill with
125 * immediate intention to drain. This is done because there are
126 * messages already at the queue waiting to drain. To preserve message
127 * ordering, we need to put this message at the end, and pickup the
128 * messages at the beginning. We call the macro that actually
129 * enqueues the message on the queue, and then call qdrain_syncq. If
130 * there is already a drainer, we just return. We could make that
131 * check before calling qdrain_syncq, but it is a little more clear
132 * to have qdrain_syncq do this (we might try the above optimization
133 * as this behavior evolves). qdrain_syncq assumes that SQ_EXCL is set
134 * already if this is a non-CIPUT perimiter, and that an appropriate
135 * claim has been made. So we do all that work before dropping the
136 * SQLOCK with our claim.
138 * If we cannot proceed with the putproc/fast-fill, we just fall
139 * through to the qfill_syncq, and then tail processing. If state
140 * has changed in that cycle, or wakeups are needed, it will occur
141 * there.
143 void
144 putnext(queue_t *qp, mblk_t *mp)
146 queue_t *fqp = qp; /* For strft tracing */
147 syncq_t *sq;
148 uint16_t flags;
149 uint16_t drain_mask;
150 struct qinit *qi;
151 int (*putproc)();
152 struct stdata *stp;
153 int ix;
154 boolean_t queued = B_FALSE;
155 kmutex_t *sdlock = NULL;
156 kmutex_t *sqciplock = NULL;
157 ushort_t *sqcipcount = NULL;
159 TRACE_2(TR_FAC_STREAMS_FR, TR_PUTNEXT_START,
160 "putnext_start:(%p, %p)", qp, mp);
162 ASSERT(mp->b_datap->db_ref != 0);
163 ASSERT(mp->b_next == NULL && mp->b_prev == NULL);
164 stp = STREAM(qp);
165 ASSERT(stp != NULL);
166 if (stp->sd_ciputctrl != NULL) {
167 ix = CPU->cpu_seqid & stp->sd_nciputctrl;
168 sdlock = &stp->sd_ciputctrl[ix].ciputctrl_lock;
169 mutex_enter(sdlock);
170 } else {
171 mutex_enter(sdlock = &stp->sd_lock);
173 qp = qp->q_next;
174 sq = qp->q_syncq;
175 ASSERT(sq != NULL);
176 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
177 qi = qp->q_qinfo;
179 if (sq->sq_ciputctrl != NULL) {
180 /* fastlock: */
181 ASSERT(sq->sq_flags & SQ_CIPUT);
182 ix = CPU->cpu_seqid & sq->sq_nciputctrl;
183 sqciplock = &sq->sq_ciputctrl[ix].ciputctrl_lock;
184 sqcipcount = &sq->sq_ciputctrl[ix].ciputctrl_count;
185 mutex_enter(sqciplock);
186 if (!((*sqcipcount) & SQ_FASTPUT) ||
187 (sq->sq_flags & (SQ_STAYAWAY|SQ_EXCL|SQ_EVENTS))) {
188 mutex_exit(sqciplock);
189 sqciplock = NULL;
190 goto slowlock;
192 mutex_exit(sdlock);
193 (*sqcipcount)++;
194 ASSERT(*sqcipcount != 0);
195 queued = qp->q_sqflags & Q_SQQUEUED;
196 mutex_exit(sqciplock);
197 } else {
198 slowlock:
199 ASSERT(sqciplock == NULL);
200 mutex_enter(SQLOCK(sq));
201 mutex_exit(sdlock);
202 flags = sq->sq_flags;
204 * We are going to drop SQLOCK, so make a claim to prevent syncq
205 * from closing.
207 sq->sq_count++;
208 ASSERT(sq->sq_count != 0); /* Wraparound */
210 * If there are writers or exclusive waiters, there is not much
211 * we can do. Place the message on the syncq and schedule a
212 * background thread to drain it.
214 * Also if we are approaching end of stack, fill the syncq and
215 * switch processing to a background thread - see comments on
216 * top.
218 if ((flags & (SQ_STAYAWAY|SQ_EXCL|SQ_EVENTS)) ||
219 (sq->sq_needexcl != 0) || PUT_STACK_NOTENOUGH()) {
221 TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
222 "putnext_end:(%p, %p, %p) SQ_EXCL fill",
223 qp, mp, sq);
226 * NOTE: qfill_syncq will need QLOCK. It is safe to drop
227 * SQLOCK because positive sq_count keeps the syncq from
228 * closing.
230 mutex_exit(SQLOCK(sq));
232 qfill_syncq(sq, qp, mp);
234 * NOTE: after the call to qfill_syncq() qp may be
235 * closed, both qp and sq should not be referenced at
236 * this point.
238 * This ASSERT is located here to prevent stack frame
239 * consumption in the DEBUG code.
241 ASSERT(sqciplock == NULL);
242 return;
245 queued = qp->q_sqflags & Q_SQQUEUED;
247 * If not a concurrent perimiter, we need to acquire
248 * it exclusively. It could not have been previously
249 * set since we held the SQLOCK before testing
250 * SQ_GOAWAY above (which includes SQ_EXCL).
251 * We do this here because we hold the SQLOCK, and need
252 * to make this state change BEFORE dropping it.
254 if (!(flags & SQ_CIPUT)) {
255 ASSERT((sq->sq_flags & SQ_EXCL) == 0);
256 ASSERT(!(sq->sq_type & SQ_CIPUT));
257 sq->sq_flags |= SQ_EXCL;
259 mutex_exit(SQLOCK(sq));
262 ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)));
263 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
266 * We now have a claim on the syncq, we are either going to
267 * put the message on the syncq and then drain it, or we are
268 * going to call the putproc().
270 putproc = qi->qi_putp;
271 if (!queued) {
272 STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr -
273 mp->b_datap->db_base);
274 (*putproc)(qp, mp);
275 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
276 ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
277 } else {
278 mutex_enter(QLOCK(qp));
280 * If there are no messages in front of us, just call putproc(),
281 * otherwise enqueue the message and drain the queue.
283 if (qp->q_syncqmsgs == 0) {
284 mutex_exit(QLOCK(qp));
285 STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr -
286 mp->b_datap->db_base);
287 (*putproc)(qp, mp);
288 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
289 } else {
291 * We are doing a fill with the intent to
292 * drain (meaning we are filling because
293 * there are messages in front of us ane we
294 * need to preserve message ordering)
295 * Therefore, put the message on the queue
296 * and call qdrain_syncq (must be done with
297 * the QLOCK held).
299 STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT,
300 mp->b_rptr - mp->b_datap->db_base);
302 #ifdef DEBUG
304 * These two values were in the original code for
305 * all syncq messages. This is unnecessary in
306 * the current implementation, but was retained
307 * in debug mode as it is usefull to know where
308 * problems occur.
310 mp->b_queue = qp;
311 mp->b_prev = (mblk_t *)putproc;
312 #endif
313 SQPUT_MP(qp, mp);
314 qdrain_syncq(sq, qp);
315 ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
319 * Before we release our claim, we need to see if any
320 * events were posted. If the syncq is SQ_EXCL && SQ_QUEUED,
321 * we were responsible for going exclusive and, therefore,
322 * are resposible for draining.
324 if (sq->sq_flags & (SQ_EXCL)) {
325 drain_mask = 0;
326 } else {
327 drain_mask = SQ_QUEUED;
330 if (sqciplock != NULL) {
331 mutex_enter(sqciplock);
332 flags = sq->sq_flags;
333 ASSERT(flags & SQ_CIPUT);
334 /* SQ_EXCL could have been set by qwriter_inner */
335 if ((flags & (SQ_EXCL|SQ_TAIL)) || sq->sq_needexcl) {
337 * we need SQLOCK to handle
338 * wakeups/drains/flags change. sqciplock
339 * is needed to decrement sqcipcount.
340 * SQLOCK has to be grabbed before sqciplock
341 * for lock ordering purposes.
342 * after sqcipcount is decremented some lock
343 * still needs to be held to make sure
344 * syncq won't get freed on us.
346 * To prevent deadlocks we try to grab SQLOCK and if it
347 * is held already we drop sqciplock, acquire SQLOCK and
348 * reacqwire sqciplock again.
350 if (mutex_tryenter(SQLOCK(sq)) == 0) {
351 mutex_exit(sqciplock);
352 mutex_enter(SQLOCK(sq));
353 mutex_enter(sqciplock);
355 flags = sq->sq_flags;
356 ASSERT(*sqcipcount != 0);
357 (*sqcipcount)--;
358 mutex_exit(sqciplock);
359 } else {
360 ASSERT(*sqcipcount != 0);
361 (*sqcipcount)--;
362 mutex_exit(sqciplock);
363 TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
364 "putnext_end:(%p, %p, %p) done", qp, mp, sq);
365 return;
367 } else {
368 mutex_enter(SQLOCK(sq));
369 flags = sq->sq_flags;
370 ASSERT(sq->sq_count != 0);
371 sq->sq_count--;
373 if ((flags & (SQ_TAIL)) || sq->sq_needexcl) {
374 putnext_tail(sq, qp, (flags & ~drain_mask));
376 * The only purpose of this ASSERT is to preserve calling stack
377 * in DEBUG kernel.
379 ASSERT(sq != NULL);
380 return;
382 ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)) || queued);
383 ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) || queued);
385 * Safe to always drop SQ_EXCL:
386 * Not SQ_CIPUT means we set SQ_EXCL above
387 * For SQ_CIPUT SQ_EXCL will only be set if the put
388 * procedure did a qwriter(INNER) in which case
389 * nobody else is in the inner perimeter and we
390 * are exiting.
392 * I would like to make the following assertion:
394 * ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) != (SQ_EXCL|SQ_CIPUT) ||
395 * sq->sq_count == 0);
397 * which indicates that if we are both putshared and exclusive,
398 * we became exclusive while executing the putproc, and the only
399 * claim on the syncq was the one we dropped a few lines above.
400 * But other threads that enter putnext while the syncq is exclusive
401 * need to make a claim as they may need to drop SQLOCK in the
402 * has_writers case to avoid deadlocks. If these threads are
403 * delayed or preempted, it is possible that the writer thread can
404 * find out that there are other claims making the (sq_count == 0)
405 * test invalid.
408 sq->sq_flags = flags & ~SQ_EXCL;
409 mutex_exit(SQLOCK(sq));
410 TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
411 "putnext_end:(%p, %p, %p) done", qp, mp, sq);
416 * wrapper for qi_putp entry in module ops vec.
417 * implements asynchronous putnext().
418 * Note, that unlike putnext(), this routine is NOT optimized for the
419 * fastpath. Calling this routine will grab whatever locks are necessary
420 * to protect the stream head, q_next, and syncq's.
421 * And since it is in the normal locks path, we do not use putlocks if
422 * they exist (though this can be changed by swapping the value of
423 * UseFastlocks).
425 void
426 put(queue_t *qp, mblk_t *mp)
428 queue_t *fqp = qp; /* For strft tracing */
429 syncq_t *sq;
430 uint16_t flags;
431 uint16_t drain_mask;
432 struct qinit *qi;
433 int (*putproc)();
434 int ix;
435 boolean_t queued = B_FALSE;
436 kmutex_t *sqciplock = NULL;
437 ushort_t *sqcipcount = NULL;
439 TRACE_2(TR_FAC_STREAMS_FR, TR_PUT_START,
440 "put:(%X, %X)", qp, mp);
441 ASSERT(mp->b_datap->db_ref != 0);
442 ASSERT(mp->b_next == NULL && mp->b_prev == NULL);
444 sq = qp->q_syncq;
445 ASSERT(sq != NULL);
446 qi = qp->q_qinfo;
448 if (UseFastlocks && sq->sq_ciputctrl != NULL) {
449 /* fastlock: */
450 ASSERT(sq->sq_flags & SQ_CIPUT);
451 ix = CPU->cpu_seqid & sq->sq_nciputctrl;
452 sqciplock = &sq->sq_ciputctrl[ix].ciputctrl_lock;
453 sqcipcount = &sq->sq_ciputctrl[ix].ciputctrl_count;
454 mutex_enter(sqciplock);
455 if (!((*sqcipcount) & SQ_FASTPUT) ||
456 (sq->sq_flags & (SQ_STAYAWAY|SQ_EXCL|SQ_EVENTS))) {
457 mutex_exit(sqciplock);
458 sqciplock = NULL;
459 goto slowlock;
461 (*sqcipcount)++;
462 ASSERT(*sqcipcount != 0);
463 queued = qp->q_sqflags & Q_SQQUEUED;
464 mutex_exit(sqciplock);
465 } else {
466 slowlock:
467 ASSERT(sqciplock == NULL);
468 mutex_enter(SQLOCK(sq));
469 flags = sq->sq_flags;
471 * We are going to drop SQLOCK, so make a claim to prevent syncq
472 * from closing.
474 sq->sq_count++;
475 ASSERT(sq->sq_count != 0); /* Wraparound */
477 * If there are writers or exclusive waiters, there is not much
478 * we can do. Place the message on the syncq and schedule a
479 * background thread to drain it.
481 * Also if we are approaching end of stack, fill the syncq and
482 * switch processing to a background thread - see comments on
483 * top.
485 if ((flags & (SQ_STAYAWAY|SQ_EXCL|SQ_EVENTS)) ||
486 (sq->sq_needexcl != 0) || PUT_STACK_NOTENOUGH()) {
488 TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
489 "putnext_end:(%p, %p, %p) SQ_EXCL fill",
490 qp, mp, sq);
493 * NOTE: qfill_syncq will need QLOCK. It is safe to drop
494 * SQLOCK because positive sq_count keeps the syncq from
495 * closing.
497 mutex_exit(SQLOCK(sq));
499 qfill_syncq(sq, qp, mp);
501 * NOTE: after the call to qfill_syncq() qp may be
502 * closed, both qp and sq should not be referenced at
503 * this point.
505 * This ASSERT is located here to prevent stack frame
506 * consumption in the DEBUG code.
508 ASSERT(sqciplock == NULL);
509 return;
512 queued = qp->q_sqflags & Q_SQQUEUED;
514 * If not a concurrent perimiter, we need to acquire
515 * it exclusively. It could not have been previously
516 * set since we held the SQLOCK before testing
517 * SQ_GOAWAY above (which includes SQ_EXCL).
518 * We do this here because we hold the SQLOCK, and need
519 * to make this state change BEFORE dropping it.
521 if (!(flags & SQ_CIPUT)) {
522 ASSERT((sq->sq_flags & SQ_EXCL) == 0);
523 ASSERT(!(sq->sq_type & SQ_CIPUT));
524 sq->sq_flags |= SQ_EXCL;
526 mutex_exit(SQLOCK(sq));
529 ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)));
530 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
533 * We now have a claim on the syncq, we are either going to
534 * put the message on the syncq and then drain it, or we are
535 * going to call the putproc().
537 putproc = qi->qi_putp;
538 if (!queued) {
539 STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr -
540 mp->b_datap->db_base);
541 (*putproc)(qp, mp);
542 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
543 ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
544 } else {
545 mutex_enter(QLOCK(qp));
547 * If there are no messages in front of us, just call putproc(),
548 * otherwise enqueue the message and drain the queue.
550 if (qp->q_syncqmsgs == 0) {
551 mutex_exit(QLOCK(qp));
552 STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr -
553 mp->b_datap->db_base);
554 (*putproc)(qp, mp);
555 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
556 } else {
558 * We are doing a fill with the intent to
559 * drain (meaning we are filling because
560 * there are messages in front of us ane we
561 * need to preserve message ordering)
562 * Therefore, put the message on the queue
563 * and call qdrain_syncq (must be done with
564 * the QLOCK held).
566 STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT,
567 mp->b_rptr - mp->b_datap->db_base);
569 #ifdef DEBUG
571 * These two values were in the original code for
572 * all syncq messages. This is unnecessary in
573 * the current implementation, but was retained
574 * in debug mode as it is usefull to know where
575 * problems occur.
577 mp->b_queue = qp;
578 mp->b_prev = (mblk_t *)putproc;
579 #endif
580 SQPUT_MP(qp, mp);
581 qdrain_syncq(sq, qp);
582 ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
586 * Before we release our claim, we need to see if any
587 * events were posted. If the syncq is SQ_EXCL && SQ_QUEUED,
588 * we were responsible for going exclusive and, therefore,
589 * are resposible for draining.
591 if (sq->sq_flags & (SQ_EXCL)) {
592 drain_mask = 0;
593 } else {
594 drain_mask = SQ_QUEUED;
597 if (sqciplock != NULL) {
598 mutex_enter(sqciplock);
599 flags = sq->sq_flags;
600 ASSERT(flags & SQ_CIPUT);
601 /* SQ_EXCL could have been set by qwriter_inner */
602 if ((flags & (SQ_EXCL|SQ_TAIL)) || sq->sq_needexcl) {
604 * we need SQLOCK to handle
605 * wakeups/drains/flags change. sqciplock
606 * is needed to decrement sqcipcount.
607 * SQLOCK has to be grabbed before sqciplock
608 * for lock ordering purposes.
609 * after sqcipcount is decremented some lock
610 * still needs to be held to make sure
611 * syncq won't get freed on us.
613 * To prevent deadlocks we try to grab SQLOCK and if it
614 * is held already we drop sqciplock, acquire SQLOCK and
615 * reacqwire sqciplock again.
617 if (mutex_tryenter(SQLOCK(sq)) == 0) {
618 mutex_exit(sqciplock);
619 mutex_enter(SQLOCK(sq));
620 mutex_enter(sqciplock);
622 flags = sq->sq_flags;
623 ASSERT(*sqcipcount != 0);
624 (*sqcipcount)--;
625 mutex_exit(sqciplock);
626 } else {
627 ASSERT(*sqcipcount != 0);
628 (*sqcipcount)--;
629 mutex_exit(sqciplock);
630 TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
631 "putnext_end:(%p, %p, %p) done", qp, mp, sq);
632 return;
634 } else {
635 mutex_enter(SQLOCK(sq));
636 flags = sq->sq_flags;
637 ASSERT(sq->sq_count != 0);
638 sq->sq_count--;
640 if ((flags & (SQ_TAIL)) || sq->sq_needexcl) {
641 putnext_tail(sq, qp, (flags & ~drain_mask));
643 * The only purpose of this ASSERT is to preserve calling stack
644 * in DEBUG kernel.
646 ASSERT(sq != NULL);
647 return;
649 ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)) || queued);
650 ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) || queued);
652 * Safe to always drop SQ_EXCL:
653 * Not SQ_CIPUT means we set SQ_EXCL above
654 * For SQ_CIPUT SQ_EXCL will only be set if the put
655 * procedure did a qwriter(INNER) in which case
656 * nobody else is in the inner perimeter and we
657 * are exiting.
659 * I would like to make the following assertion:
661 * ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) != (SQ_EXCL|SQ_CIPUT) ||
662 * sq->sq_count == 0);
664 * which indicates that if we are both putshared and exclusive,
665 * we became exclusive while executing the putproc, and the only
666 * claim on the syncq was the one we dropped a few lines above.
667 * But other threads that enter putnext while the syncq is exclusive
668 * need to make a claim as they may need to drop SQLOCK in the
669 * has_writers case to avoid deadlocks. If these threads are
670 * delayed or preempted, it is possible that the writer thread can
671 * find out that there are other claims making the (sq_count == 0)
672 * test invalid.
675 sq->sq_flags = flags & ~SQ_EXCL;
676 mutex_exit(SQLOCK(sq));
677 TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
678 "putnext_end:(%p, %p, %p) done", qp, mp, sq);