4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
28 * Copyright (c) 2017 by Delphix. All rights reserved.
32 * Kernel task queues: general-purpose asynchronous task scheduling.
34 * A common problem in kernel programming is the need to schedule tasks
35 * to be performed later, by another thread. There are several reasons
36 * you may want or need to do this:
38 * (1) The task isn't time-critical, but your current code path is.
40 * (2) The task may require grabbing locks that you already hold.
42 * (3) The task may need to block (e.g. to wait for memory), but you
43 * cannot block in your current context.
45 * (4) Your code path can't complete because of some condition, but you can't
46 * sleep or fail, so you queue the task for later execution when condition
49 * (5) You just want a simple way to launch multiple tasks in parallel.
51 * Task queues provide such a facility. In its simplest form (used when
52 * performance is not a critical consideration) a task queue consists of a
53 * single list of tasks, together with one or more threads to service the
54 * list. There are some cases when this simple queue is not sufficient:
56 * (1) The task queues are very hot and there is a need to avoid data and lock
57 * contention over global resources.
59 * (2) Some tasks may depend on other tasks to complete, so they can't be put in
60 * the same list managed by the same thread.
62 * (3) Some tasks may block for a long time, and this should not block other
65 * To provide useful service in such cases we define a "dynamic task queue"
66 * which has an individual thread for each of the tasks. These threads are
67 * dynamically created as they are needed and destroyed when they are not in
68 * use. The API for managing task pools is the same as for managing task queues
69 * with the exception of a taskq creation flag TASKQ_DYNAMIC which tells that
70 * dynamic task pool behavior is desired.
72 * Dynamic task queues may also place tasks in the normal queue (called "backing
73 * queue") when task pool runs out of resources. Users of task queues may
74 * disallow such queued scheduling by specifying TQ_NOQUEUE in the dispatch
77 * The backing task queue is also used for scheduling internal tasks needed for
78 * dynamic task queue maintenance.
80 * INTERFACES ==================================================================
82 * taskq_t *taskq_create(name, nthreads, pri, minalloc, maxalloc, flags);
84 * Create a taskq with specified properties.
87 * TASKQ_DYNAMIC: Create task pool for task management. If this flag is
88 * specified, 'nthreads' specifies the maximum number of threads in
89 * the task queue. Task execution order for dynamic task queues is
92 * If this flag is not specified (default case) a
93 * single-list task queue is created with 'nthreads' threads
94 * servicing it. Entries in this queue are managed by
95 * taskq_ent_alloc() and taskq_ent_free() which try to keep the
96 * task population between 'minalloc' and 'maxalloc', but the
97 * latter limit is only advisory for TQ_SLEEP dispatches and the
98 * former limit is only advisory for TQ_NOALLOC dispatches. If
99 * TASKQ_PREPOPULATE is set in 'flags', the taskq will be
100 * prepopulated with 'minalloc' task structures.
102 * Since non-DYNAMIC taskqs are queues, tasks are guaranteed to be
103 * executed in the order they are scheduled if nthreads == 1.
104 * If nthreads > 1, task execution order is not predictable.
106 * TASKQ_PREPOPULATE: Prepopulate task queue with threads.
107 * Also prepopulate the task queue with 'minalloc' task structures.
109 * TASKQ_THREADS_CPU_PCT: This flag specifies that 'nthreads' should be
110 * interpreted as a percentage of the # of online CPUs on the
111 * system. The taskq subsystem will automatically adjust the
112 * number of threads in the taskq in response to CPU online
113 * and offline events, to keep the ratio. nthreads must be in
116 * The calculation used is:
118 * MAX((ncpus_online * percentage)/100, 1)
120 * This flag is not supported for DYNAMIC task queues.
121 * This flag is not compatible with TASKQ_CPR_SAFE.
123 * TASKQ_CPR_SAFE: This flag specifies that users of the task queue will
124 * use their own protocol for handling CPR issues. This flag is not
125 * supported for DYNAMIC task queues. This flag is not compatible
126 * with TASKQ_THREADS_CPU_PCT.
128 * The 'pri' field specifies the default priority for the threads that
129 * service all scheduled tasks.
131 * taskq_t *taskq_create_instance(name, instance, nthreads, pri, minalloc,
134 * Like taskq_create(), but takes an instance number (or -1 to indicate
137 * taskq_t *taskq_create_proc(name, nthreads, pri, minalloc, maxalloc, proc,
140 * Like taskq_create(), but creates the taskq threads in the specified
141 * system process. If proc != &p0, this must be called from a thread
144 * taskq_t *taskq_create_sysdc(name, nthreads, minalloc, maxalloc, proc,
147 * Like taskq_create_proc(), but the taskq threads will use the
148 * System Duty Cycle (SDC) scheduling class with a duty cycle of dc.
150 * void taskq_destroy(tap):
152 * Waits for any scheduled tasks to complete, then destroys the taskq.
153 * Caller should guarantee that no new tasks are scheduled in the closing
156 * taskqid_t taskq_dispatch(tq, func, arg, flags):
158 * Dispatches the task "func(arg)" to taskq. The 'flags' indicates whether
159 * the caller is willing to block for memory. The function returns an
160 * opaque value which is zero iff dispatch fails. If flags is TQ_NOSLEEP
161 * or TQ_NOALLOC and the task can't be dispatched, taskq_dispatch() fails
162 * and returns (taskqid_t)0.
164 * ASSUMES: func != NULL.
167 * TQ_NOSLEEP: Do not wait for resources; may fail.
169 * TQ_NOALLOC: Do not allocate memory; may fail. May only be used with
170 * non-dynamic task queues.
172 * TQ_NOQUEUE: Do not enqueue a task if it can't dispatch it due to
173 * lack of available resources and fail. If this flag is not
174 * set, and the task pool is exhausted, the task may be scheduled
175 * in the backing queue. This flag may ONLY be used with dynamic
178 * NOTE: This flag should always be used when a task queue is used
179 * for tasks that may depend on each other for completion.
180 * Enqueueing dependent tasks may create deadlocks.
182 * TQ_SLEEP: May block waiting for resources. May still fail for
183 * dynamic task queues if TQ_NOQUEUE is also specified, otherwise
186 * TQ_FRONT: Puts the new task at the front of the queue. Be careful.
188 * NOTE: Dynamic task queues are much more likely to fail in
189 * taskq_dispatch() (especially if TQ_NOQUEUE was specified), so it
190 * is important to have backup strategies handling such failures.
192 * void taskq_dispatch_ent(tq, func, arg, flags, tqent)
194 * This is a light-weight form of taskq_dispatch(), that uses a
195 * preallocated taskq_ent_t structure for scheduling. As a
196 * result, it does not perform allocations and cannot ever fail.
197 * Note especially that it cannot be used with TASKQ_DYNAMIC
198 * taskqs. The memory for the tqent must not be modified or used
199 * until the function (func) is called. (However, func itself
200 * may safely modify or free this memory, once it is called.)
201 * Note that the taskq framework will NOT free this memory.
203 * void taskq_wait(tq):
205 * Waits for all previously scheduled tasks to complete.
207 * NOTE: It does not stop any new task dispatches.
208 * Do NOT call taskq_wait() from a task: it will cause deadlock.
210 * void taskq_suspend(tq)
212 * Suspend all task execution. Tasks already scheduled for a dynamic task
213 * queue will still be executed, but all new scheduled tasks will be
214 * suspended until taskq_resume() is called.
216 * int taskq_suspended(tq)
218 * Returns 1 if taskq is suspended and 0 otherwise. It is intended to
219 * ASSERT that the task queue is suspended.
221 * void taskq_resume(tq)
223 * Resume task queue execution.
225 * int taskq_member(tq, thread)
227 * Returns 1 if 'thread' belongs to taskq 'tq' and 0 otherwise. The
228 * intended use is to ASSERT that a given function is called in taskq
233 * Global system-wide dynamic task queue for common uses. It may be used by
234 * any subsystem that needs to schedule tasks and does not need to manage
235 * its own task queues. It is initialized quite early during system boot.
237 * IMPLEMENTATION ==============================================================
239 * This is schematic representation of the task queue structures.
243 * | tq_lock | +---< taskq_ent_free()
245 * |... | | tqent: tqent:
246 * +-------------+ | +------------+ +------------+
247 * | tq_freelist |-->| tqent_next |--> ... ->| tqent_next |
248 * +-------------+ +------------+ +------------+
249 * |... | | ... | | ... |
250 * +-------------+ +------------+ +------------+
252 * | | +-------------->taskq_ent_alloc()
253 * +--------------------------------------------------------------------------+
254 * | | | tqent tqent |
255 * | +---------------------+ +--> +------------+ +--> +------------+ |
256 * | | ... | | | func, arg | | | func, arg | |
257 * +>+---------------------+ <---|-+ +------------+ <---|-+ +------------+ |
258 * | tq_taskq.tqent_next | ----+ | | tqent_next | --->+ | | tqent_next |--+
259 * +---------------------+ | +------------+ ^ | +------------+
260 * +-| tq_task.tqent_prev | +--| tqent_prev | | +--| tqent_prev | ^
261 * | +---------------------+ +------------+ | +------------+ |
262 * | |... | | ... | | | ... | |
263 * | +---------------------+ +------------+ | +------------+ |
266 * +--------------------------------------+--------------+ TQ_APPEND() -+
268 * |... | taskq_thread()-----+
270 * | tq_buckets |--+-------> [ NULL ] (for regular task queues)
272 * | DYNAMIC TASK QUEUES:
274 * +-> taskq_bucket[nCPU] taskq_bucket_dispatch()
275 * +-------------------+ ^
276 * +--->| tqbucket_lock | |
277 * | +-------------------+ +--------+ +--------+
278 * | | tqbucket_freelist |-->| tqent |-->...| tqent | ^
279 * | +-------------------+<--+--------+<--...+--------+ |
280 * | | ... | | thread | | thread | |
281 * | +-------------------+ +--------+ +--------+ |
282 * | +-------------------+ |
283 * taskq_dispatch()--+--->| tqbucket_lock | TQ_APPEND()------+
284 * TQ_HASH() | +-------------------+ +--------+ +--------+
285 * | | tqbucket_freelist |-->| tqent |-->...| tqent |
286 * | +-------------------+<--+--------+<--...+--------+
287 * | | ... | | thread | | thread |
288 * | +-------------------+ +--------+ +--------+
292 * Task queues use tq_task field to link new entry in the queue. The queue is a
293 * circular doubly-linked list. Entries are put in the end of the list with
294 * TQ_APPEND() and processed from the front of the list by taskq_thread() in
295 * FIFO order. Task queue entries are cached in the free list managed by
296 * taskq_ent_alloc() and taskq_ent_free() functions.
298 * All threads used by task queues mark t_taskq field of the thread to
299 * point to the task queue.
301 * Taskq Thread Management -----------------------------------------------------
303 * Taskq's non-dynamic threads are managed with several variables and flags:
305 * * tq_nthreads - The number of threads in taskq_thread() for the
308 * * tq_active - The number of threads not waiting on a CV in
309 * taskq_thread(); includes newly created threads
310 * not yet counted in tq_nthreads.
312 * * tq_nthreads_target
313 * - The number of threads desired for the taskq.
315 * * tq_flags & TASKQ_CHANGING
316 * - Indicates that tq_nthreads != tq_nthreads_target.
318 * * tq_flags & TASKQ_THREAD_CREATED
319 * - Indicates that a thread is being created in the taskq.
321 * During creation, tq_nthreads and tq_active are set to 0, and
322 * tq_nthreads_target is set to the number of threads desired. The
323 * TASKQ_CHANGING flag is set, and taskq_thread_create() is called to
324 * create the first thread. taskq_thread_create() increments tq_active,
325 * sets TASKQ_THREAD_CREATED, and creates the new thread.
327 * Each thread starts in taskq_thread(), clears the TASKQ_THREAD_CREATED
328 * flag, and increments tq_nthreads. It stores the new value of
329 * tq_nthreads as its "thread_id", and stores its thread pointer in the
330 * tq_threadlist at the (thread_id - 1). We keep the thread_id space
331 * densely packed by requiring that only the largest thread_id can exit during
332 * normal adjustment. The exception is during the destruction of the
333 * taskq; once tq_nthreads_target is set to zero, no new threads will be created
334 * for the taskq queue, so every thread can exit without any ordering being
337 * Threads will only process work if their thread id is <= tq_nthreads_target.
339 * When TASKQ_CHANGING is set, threads will check the current thread target
340 * whenever they wake up, and do whatever they can to apply its effects.
342 * TASKQ_THREAD_CPU_PCT --------------------------------------------------------
344 * When a taskq is created with TASKQ_THREAD_CPU_PCT, we store their requested
345 * percentage in tq_threads_ncpus_pct, start them off with the correct thread
346 * target, and add them to the taskq_cpupct_list for later adjustment.
348 * We register taskq_cpu_setup() to be called whenever a CPU changes state. It
349 * walks the list of TASKQ_THREAD_CPU_PCT taskqs, adjusts their nthread_target
350 * if need be, and wakes up all of the threads to process the change.
352 * Dynamic Task Queues Implementation ------------------------------------------
354 * For a dynamic task queues there is a 1-to-1 mapping between a thread and
355 * taskq_ent_structure. Each entry is serviced by its own thread and each thread
356 * is controlled by a single entry.
358 * Entries are distributed over a set of buckets. To avoid using modulo
359 * arithmetics the number of buckets is 2^n and is determined as the nearest
360 * power of two roundown of the number of CPUs in the system. Tunable
361 * variable 'taskq_maxbuckets' limits the maximum number of buckets. Each entry
362 * is attached to a bucket for its lifetime and can't migrate to other buckets.
364 * Entries that have scheduled tasks are not placed in any list. The dispatch
365 * function sets their "func" and "arg" fields and signals the corresponding
366 * thread to execute the task. Once the thread executes the task it clears the
367 * "func" field and places an entry on the bucket cache of free entries pointed
368 * by "tqbucket_freelist" field. ALL entries on the free list should have "func"
369 * field equal to NULL. The free list is a circular doubly-linked list identical
370 * in structure to the tq_task list above, but entries are taken from it in LIFO
371 * order - the last freed entry is the first to be allocated. The
372 * taskq_bucket_dispatch() function gets the most recently used entry from the
373 * free list, sets its "func" and "arg" fields and signals a worker thread.
375 * After executing each task a per-entry thread taskq_d_thread() places its
376 * entry on the bucket free list and goes to a timed sleep. If it wakes up
377 * without getting new task it removes the entry from the free list and destroys
378 * itself. The thread sleep time is controlled by a tunable variable
379 * `taskq_thread_timeout'.
381 * There are various statistics kept in the bucket which allows for later
382 * analysis of taskq usage patterns. Also, a global copy of taskq creation and
383 * death statistics is kept in the global taskq data structure. Since thread
384 * creation and death happen rarely, updating such global data does not present
385 * a performance problem.
387 * NOTE: Threads are not bound to any CPU and there is absolutely no association
388 * between the bucket and actual thread CPU, so buckets are used only to
389 * split resources and reduce resource contention. Having threads attached
390 * to the CPU denoted by a bucket may reduce number of times the job
391 * switches between CPUs.
393 * Current algorithm creates a thread whenever a bucket has no free
394 * entries. It would be nice to know how many threads are in the running
395 * state and don't create threads if all CPUs are busy with existing
396 * tasks, but it is unclear how such strategy can be implemented.
398 * Currently buckets are created statically as an array attached to task
399 * queue. On some system with nCPUs < max_ncpus it may waste system
400 * memory. One solution may be allocation of buckets when they are first
401 * touched, but it is not clear how useful it is.
403 * SUSPEND/RESUME implementation -----------------------------------------------
405 * Before executing a task taskq_thread() (executing non-dynamic task
406 * queues) obtains taskq's thread lock as a reader. The taskq_suspend()
407 * function gets the same lock as a writer blocking all non-dynamic task
408 * execution. The taskq_resume() function releases the lock allowing
409 * taskq_thread to continue execution.
411 * For dynamic task queues, each bucket is marked as TQBUCKET_SUSPEND by
412 * taskq_suspend() function. After that taskq_bucket_dispatch() always
413 * fails, so that taskq_dispatch() will either enqueue tasks for a
414 * suspended backing queue or fail if TQ_NOQUEUE is specified in dispatch
417 * NOTE: taskq_suspend() does not immediately block any tasks already
418 * scheduled for dynamic task queues. It only suspends new tasks
419 * scheduled after taskq_suspend() was called.
421 * taskq_member() function works by comparing a thread t_taskq pointer with
422 * the passed thread pointer.
424 * LOCKS and LOCK Hierarchy ----------------------------------------------------
426 * There are three locks used in task queues:
428 * 1) The taskq_t's tq_lock, protecting global task queue state.
430 * 2) Each per-CPU bucket has a lock for bucket management.
432 * 3) The global taskq_cpupct_lock, which protects the list of
433 * TASKQ_THREADS_CPU_PCT taskqs.
435 * If both (1) and (2) are needed, tq_lock should be taken *after* the bucket
438 * If both (1) and (3) are needed, tq_lock should be taken *after*
441 * DEBUG FACILITIES ------------------------------------------------------------
443 * For DEBUG kernels it is possible to induce random failures to
444 * taskq_dispatch() function when it is given TQ_NOSLEEP argument. The value of
445 * taskq_dmtbf and taskq_smtbf tunables control the mean time between induced
446 * failures for dynamic and static task queues respectively.
448 * Setting TASKQ_STATISTIC to 0 will disable per-bucket statistics.
450 * TUNABLES --------------------------------------------------------------------
452 * system_taskq_size - Size of the global system_taskq.
453 * This value is multiplied by nCPUs to determine
457 * taskq_minimum_nthreads_max
458 * - Minimum size of the thread list for a taskq.
459 * Useful for testing different thread pool
460 * sizes by overwriting tq_nthreads_target.
462 * taskq_thread_timeout - Maximum idle time for taskq_d_thread()
463 * Default value: 5 minutes
465 * taskq_maxbuckets - Maximum number of buckets in any task queue
468 * taskq_search_depth - Maximum # of buckets searched for a free entry
471 * taskq_dmtbf - Mean time between induced dispatch failures
472 * for dynamic task queues.
473 * Default value: UINT_MAX (no induced failures)
475 * taskq_smtbf - Mean time between induced dispatch failures
476 * for static task queues.
477 * Default value: UINT_MAX (no induced failures)
479 * CONDITIONAL compilation -----------------------------------------------------
481 * TASKQ_STATISTIC - If set will enable bucket statistic (default).
485 #include <sys/taskq_impl.h>
486 #include <sys/thread.h>
487 #include <sys/proc.h>
488 #include <sys/kmem.h>
489 #include <sys/vmem.h>
490 #include <sys/callb.h>
491 #include <sys/class.h>
492 #include <sys/systm.h>
493 #include <sys/cmn_err.h>
494 #include <sys/debug.h>
495 #include <sys/vmsystm.h> /* For throttlefree */
496 #include <sys/sysmacros.h>
497 #include <sys/cpuvar.h>
498 #include <sys/cpupart.h>
500 #include <sys/sysdc.h>
501 #include <sys/note.h>
503 static kmem_cache_t
*taskq_ent_cache
, *taskq_cache
;
506 * Pseudo instance numbers for taskqs without explicitly provided instance.
508 static vmem_t
*taskq_id_arena
;
510 /* Global system task queue for common use */
511 taskq_t
*system_taskq
;
514 * Maximum number of entries in global system taskq is
515 * system_taskq_size * max_ncpus
517 #define SYSTEM_TASKQ_SIZE 64
518 int system_taskq_size
= SYSTEM_TASKQ_SIZE
;
521 * Minimum size for tq_nthreads_max; useful for those who want to play around
522 * with increasing a taskq's tq_nthreads_target.
524 int taskq_minimum_nthreads_max
= 1;
527 * We want to ensure that when taskq_create() returns, there is at least
528 * one thread ready to handle requests. To guarantee this, we have to wait
529 * for the second thread, since the first one cannot process requests until
530 * the second thread has been created.
532 #define TASKQ_CREATE_ACTIVE_THREADS 2
534 /* Maximum percentage allowed for TASKQ_THREADS_CPU_PCT */
535 #define TASKQ_CPUPCT_MAX_PERCENT 1000
536 int taskq_cpupct_max_percent
= TASKQ_CPUPCT_MAX_PERCENT
;
539 * Dynamic task queue threads that don't get any work within
540 * taskq_thread_timeout destroy themselves
542 #define TASKQ_THREAD_TIMEOUT (60 * 5)
543 int taskq_thread_timeout
= TASKQ_THREAD_TIMEOUT
;
545 #define TASKQ_MAXBUCKETS 128
546 int taskq_maxbuckets
= TASKQ_MAXBUCKETS
;
549 * When a bucket has no available entries another buckets are tried.
550 * taskq_search_depth parameter limits the amount of buckets that we search
551 * before failing. This is mostly useful in systems with many CPUs where we may
552 * spend too much time scanning busy buckets.
554 #define TASKQ_SEARCH_DEPTH 4
555 int taskq_search_depth
= TASKQ_SEARCH_DEPTH
;
558 * Hashing function: mix various bits of x. May be pretty much anything.
560 #define TQ_HASH(x) ((x) ^ ((x) >> 11) ^ ((x) >> 17) ^ ((x) ^ 27))
563 * We do not create any new threads when the system is low on memory and start
564 * throttling memory allocations. The following macro tries to estimate such
567 #define ENOUGH_MEMORY() (freemem > throttlefree)
572 static taskq_t
*taskq_create_common(const char *, int, int, pri_t
, int,
573 int, proc_t
*, uint_t
, uint_t
);
574 static void taskq_thread(void *);
575 static void taskq_d_thread(taskq_ent_t
*);
576 static void taskq_bucket_extend(void *);
577 static int taskq_constructor(void *, void *, int);
578 static void taskq_destructor(void *, void *);
579 static int taskq_ent_constructor(void *, void *, int);
580 static void taskq_ent_destructor(void *, void *);
581 static taskq_ent_t
*taskq_ent_alloc(taskq_t
*, int);
582 static void taskq_ent_free(taskq_t
*, taskq_ent_t
*);
583 static int taskq_ent_exists(taskq_t
*, task_func_t
, void *);
584 static taskq_ent_t
*taskq_bucket_dispatch(taskq_bucket_t
*, task_func_t
,
588 * Task queues kstats.
591 kstat_named_t tq_pid
;
592 kstat_named_t tq_tasks
;
593 kstat_named_t tq_executed
;
594 kstat_named_t tq_maxtasks
;
595 kstat_named_t tq_totaltime
;
596 kstat_named_t tq_nalloc
;
597 kstat_named_t tq_nactive
;
598 kstat_named_t tq_pri
;
599 kstat_named_t tq_nthreads
;
600 kstat_named_t tq_nomem
;
602 { "pid", KSTAT_DATA_UINT64
},
603 { "tasks", KSTAT_DATA_UINT64
},
604 { "executed", KSTAT_DATA_UINT64
},
605 { "maxtasks", KSTAT_DATA_UINT64
},
606 { "totaltime", KSTAT_DATA_UINT64
},
607 { "nalloc", KSTAT_DATA_UINT64
},
608 { "nactive", KSTAT_DATA_UINT64
},
609 { "priority", KSTAT_DATA_UINT64
},
610 { "threads", KSTAT_DATA_UINT64
},
611 { "nomem", KSTAT_DATA_UINT64
},
614 struct taskq_d_kstat
{
615 kstat_named_t tqd_pri
;
616 kstat_named_t tqd_btasks
;
617 kstat_named_t tqd_bexecuted
;
618 kstat_named_t tqd_bmaxtasks
;
619 kstat_named_t tqd_bnalloc
;
620 kstat_named_t tqd_bnactive
;
621 kstat_named_t tqd_btotaltime
;
622 kstat_named_t tqd_hits
;
623 kstat_named_t tqd_misses
;
624 kstat_named_t tqd_overflows
;
625 kstat_named_t tqd_tcreates
;
626 kstat_named_t tqd_tdeaths
;
627 kstat_named_t tqd_maxthreads
;
628 kstat_named_t tqd_nomem
;
629 kstat_named_t tqd_disptcreates
;
630 kstat_named_t tqd_totaltime
;
631 kstat_named_t tqd_nalloc
;
632 kstat_named_t tqd_nfree
;
634 { "priority", KSTAT_DATA_UINT64
},
635 { "btasks", KSTAT_DATA_UINT64
},
636 { "bexecuted", KSTAT_DATA_UINT64
},
637 { "bmaxtasks", KSTAT_DATA_UINT64
},
638 { "bnalloc", KSTAT_DATA_UINT64
},
639 { "bnactive", KSTAT_DATA_UINT64
},
640 { "btotaltime", KSTAT_DATA_UINT64
},
641 { "hits", KSTAT_DATA_UINT64
},
642 { "misses", KSTAT_DATA_UINT64
},
643 { "overflows", KSTAT_DATA_UINT64
},
644 { "tcreates", KSTAT_DATA_UINT64
},
645 { "tdeaths", KSTAT_DATA_UINT64
},
646 { "maxthreads", KSTAT_DATA_UINT64
},
647 { "nomem", KSTAT_DATA_UINT64
},
648 { "disptcreates", KSTAT_DATA_UINT64
},
649 { "totaltime", KSTAT_DATA_UINT64
},
650 { "nalloc", KSTAT_DATA_UINT64
},
651 { "nfree", KSTAT_DATA_UINT64
},
654 static kmutex_t taskq_kstat_lock
;
655 static kmutex_t taskq_d_kstat_lock
;
656 static int taskq_kstat_update(kstat_t
*, int);
657 static int taskq_d_kstat_update(kstat_t
*, int);
660 * List of all TASKQ_THREADS_CPU_PCT taskqs.
662 static list_t taskq_cpupct_list
; /* protected by cpu_lock */
665 * Collect per-bucket statistic when TASKQ_STATISTIC is defined.
667 #define TASKQ_STATISTIC 1
670 #define TQ_STAT(b, x) b->tqbucket_stat.x++
672 #define TQ_STAT(b, x)
676 * Random fault injection.
679 uint_t taskq_dmtbf
= UINT_MAX
; /* mean time between injected failures */
680 uint_t taskq_smtbf
= UINT_MAX
; /* mean time between injected failures */
683 * TQ_NOSLEEP dispatches on dynamic task queues are always allowed to fail.
685 * TQ_NOSLEEP dispatches on static task queues can't arbitrarily fail because
686 * they could prepopulate the cache and make sure that they do not use more
687 * then minalloc entries. So, fault injection in this case insures that
688 * either TASKQ_PREPOPULATE is not set or there are more entries allocated
689 * than is specified by minalloc. TQ_NOALLOC dispatches are always allowed
690 * to fail, but for simplicity we treat them identically to TQ_NOSLEEP
694 #define TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flag) \
695 taskq_random = (taskq_random * 2416 + 374441) % 1771875;\
696 if ((flag & TQ_NOSLEEP) && \
697 taskq_random < 1771875 / taskq_dmtbf) { \
698 return ((uintptr_t)NULL); \
701 #define TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flag) \
702 taskq_random = (taskq_random * 2416 + 374441) % 1771875;\
703 if ((flag & (TQ_NOSLEEP | TQ_NOALLOC)) && \
704 (!(tq->tq_flags & TASKQ_PREPOPULATE) || \
705 (tq->tq_nalloc > tq->tq_minalloc)) && \
706 (taskq_random < (1771875 / taskq_smtbf))) { \
707 mutex_exit(&tq->tq_lock); \
708 return ((uintptr_t)NULL); \
711 #define TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flag)
712 #define TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flag)
715 #define IS_EMPTY(l) (((l).tqent_prev == (l).tqent_next) && \
716 ((l).tqent_prev == &(l)))
719 * Append `tqe' in the end of the doubly-linked list denoted by l.
721 #define TQ_APPEND(l, tqe) { \
722 tqe->tqent_next = &l; \
723 tqe->tqent_prev = l.tqent_prev; \
724 tqe->tqent_next->tqent_prev = tqe; \
725 tqe->tqent_prev->tqent_next = tqe; \
728 * Prepend 'tqe' to the beginning of l
730 #define TQ_PREPEND(l, tqe) { \
731 tqe->tqent_next = l.tqent_next; \
732 tqe->tqent_prev = &l; \
733 tqe->tqent_next->tqent_prev = tqe; \
734 tqe->tqent_prev->tqent_next = tqe; \
738 * Schedule a task specified by func and arg into the task queue entry tqe.
740 #define TQ_DO_ENQUEUE(tq, tqe, func, arg, front) { \
741 ASSERT(MUTEX_HELD(&tq->tq_lock)); \
744 TQ_PREPEND(tq->tq_task, tqe); \
746 TQ_APPEND(tq->tq_task, tqe); \
748 tqe->tqent_func = (func); \
749 tqe->tqent_arg = (arg); \
751 if (tq->tq_tasks - tq->tq_executed > tq->tq_maxtasks) \
752 tq->tq_maxtasks = tq->tq_tasks - tq->tq_executed; \
753 cv_signal(&tq->tq_dispatch_cv); \
754 DTRACE_PROBE2(taskq__enqueue, taskq_t *, tq, taskq_ent_t *, tqe); \
757 #define TQ_ENQUEUE(tq, tqe, func, arg) \
758 TQ_DO_ENQUEUE(tq, tqe, func, arg, 0)
760 #define TQ_ENQUEUE_FRONT(tq, tqe, func, arg) \
761 TQ_DO_ENQUEUE(tq, tqe, func, arg, 1)
764 * Do-nothing task which may be used to prepopulate thread caches.
768 nulltask(void *unused
)
774 taskq_constructor(void *buf
, void *cdrarg
, int kmflags
)
778 bzero(tq
, sizeof (taskq_t
));
780 mutex_init(&tq
->tq_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
781 rw_init(&tq
->tq_threadlock
, NULL
, RW_DEFAULT
, NULL
);
782 cv_init(&tq
->tq_dispatch_cv
, NULL
, CV_DEFAULT
, NULL
);
783 cv_init(&tq
->tq_exit_cv
, NULL
, CV_DEFAULT
, NULL
);
784 cv_init(&tq
->tq_wait_cv
, NULL
, CV_DEFAULT
, NULL
);
785 cv_init(&tq
->tq_maxalloc_cv
, NULL
, CV_DEFAULT
, NULL
);
787 tq
->tq_task
.tqent_next
= &tq
->tq_task
;
788 tq
->tq_task
.tqent_prev
= &tq
->tq_task
;
795 taskq_destructor(void *buf
, void *cdrarg
)
799 ASSERT(tq
->tq_nthreads
== 0);
800 ASSERT(tq
->tq_buckets
== NULL
);
801 ASSERT(tq
->tq_tcreates
== 0);
802 ASSERT(tq
->tq_tdeaths
== 0);
804 mutex_destroy(&tq
->tq_lock
);
805 rw_destroy(&tq
->tq_threadlock
);
806 cv_destroy(&tq
->tq_dispatch_cv
);
807 cv_destroy(&tq
->tq_exit_cv
);
808 cv_destroy(&tq
->tq_wait_cv
);
809 cv_destroy(&tq
->tq_maxalloc_cv
);
814 taskq_ent_constructor(void *buf
, void *cdrarg
, int kmflags
)
816 taskq_ent_t
*tqe
= buf
;
818 tqe
->tqent_thread
= NULL
;
819 cv_init(&tqe
->tqent_cv
, NULL
, CV_DEFAULT
, NULL
);
826 taskq_ent_destructor(void *buf
, void *cdrarg
)
828 taskq_ent_t
*tqe
= buf
;
830 ASSERT(tqe
->tqent_thread
== NULL
);
831 cv_destroy(&tqe
->tqent_cv
);
837 taskq_ent_cache
= kmem_cache_create("taskq_ent_cache",
838 sizeof (taskq_ent_t
), 0, taskq_ent_constructor
,
839 taskq_ent_destructor
, NULL
, NULL
, NULL
, 0);
840 taskq_cache
= kmem_cache_create("taskq_cache", sizeof (taskq_t
),
841 0, taskq_constructor
, taskq_destructor
, NULL
, NULL
, NULL
, 0);
842 taskq_id_arena
= vmem_create("taskq_id_arena",
843 (void *)1, INT32_MAX
, 1, NULL
, NULL
, NULL
, 0,
844 VM_SLEEP
| VMC_IDENTIFIER
);
846 list_create(&taskq_cpupct_list
, sizeof (taskq_t
),
847 offsetof(taskq_t
, tq_cpupct_link
));
851 taskq_update_nthreads(taskq_t
*tq
, uint_t ncpus
)
853 uint_t newtarget
= TASKQ_THREADS_PCT(ncpus
, tq
->tq_threads_ncpus_pct
);
855 ASSERT(MUTEX_HELD(&cpu_lock
));
856 ASSERT(MUTEX_HELD(&tq
->tq_lock
));
858 /* We must be going from non-zero to non-zero; no exiting. */
859 ASSERT3U(tq
->tq_nthreads_target
, !=, 0);
860 ASSERT3U(newtarget
, !=, 0);
862 ASSERT3U(newtarget
, <=, tq
->tq_nthreads_max
);
863 if (newtarget
!= tq
->tq_nthreads_target
) {
864 tq
->tq_flags
|= TASKQ_CHANGING
;
865 tq
->tq_nthreads_target
= newtarget
;
866 cv_broadcast(&tq
->tq_dispatch_cv
);
867 cv_broadcast(&tq
->tq_exit_cv
);
871 /* called during task queue creation */
873 taskq_cpupct_install(taskq_t
*tq
, cpupart_t
*cpup
)
875 ASSERT(tq
->tq_flags
& TASKQ_THREADS_CPU_PCT
);
877 mutex_enter(&cpu_lock
);
878 mutex_enter(&tq
->tq_lock
);
879 tq
->tq_cpupart
= cpup
->cp_id
;
880 taskq_update_nthreads(tq
, cpup
->cp_ncpus
);
881 mutex_exit(&tq
->tq_lock
);
883 list_insert_tail(&taskq_cpupct_list
, tq
);
884 mutex_exit(&cpu_lock
);
888 taskq_cpupct_remove(taskq_t
*tq
)
890 ASSERT(tq
->tq_flags
& TASKQ_THREADS_CPU_PCT
);
892 mutex_enter(&cpu_lock
);
893 list_remove(&taskq_cpupct_list
, tq
);
894 mutex_exit(&cpu_lock
);
899 taskq_cpu_setup(cpu_setup_t what
, int id
, void *arg
)
902 cpupart_t
*cp
= cpu
[id
]->cpu_part
;
903 uint_t ncpus
= cp
->cp_ncpus
;
905 ASSERT(MUTEX_HELD(&cpu_lock
));
910 case CPU_CPUPART_OUT
:
911 /* offlines are called *before* the cpu is offlined. */
921 return (0); /* doesn't affect cpu count */
924 for (tq
= list_head(&taskq_cpupct_list
); tq
!= NULL
;
925 tq
= list_next(&taskq_cpupct_list
, tq
)) {
927 mutex_enter(&tq
->tq_lock
);
929 * If the taskq is part of the cpuset which is changing,
930 * update its nthreads_target.
932 if (tq
->tq_cpupart
== cp
->cp_id
) {
933 taskq_update_nthreads(tq
, ncpus
);
935 mutex_exit(&tq
->tq_lock
);
943 mutex_enter(&cpu_lock
);
944 register_cpu_setup_func(taskq_cpu_setup
, NULL
);
946 * Make sure we're up to date. At this point in boot, there is only
947 * one processor set, so we only have to update the current CPU.
949 (void) taskq_cpu_setup(CPU_ON
, CPU
->cpu_id
, NULL
);
950 mutex_exit(&cpu_lock
);
954 * Create global system dynamic task queue.
957 system_taskq_init(void)
959 system_taskq
= taskq_create_common("system_taskq", 0,
960 system_taskq_size
* max_ncpus
, minclsyspri
, 4, 512, &p0
, 0,
961 TASKQ_DYNAMIC
| TASKQ_PREPOPULATE
);
967 * Allocates a new taskq_ent_t structure either from the free list or from the
968 * cache. Returns NULL if it can't be allocated.
970 * Assumes: tq->tq_lock is held.
973 taskq_ent_alloc(taskq_t
*tq
, int flags
)
975 int kmflags
= (flags
& TQ_NOSLEEP
) ? KM_NOSLEEP
: KM_SLEEP
;
980 ASSERT(MUTEX_HELD(&tq
->tq_lock
));
983 * TQ_NOALLOC allocations are allowed to use the freelist, even if
984 * we are below tq_minalloc.
986 again
: if ((tqe
= tq
->tq_freelist
) != NULL
&&
987 ((flags
& TQ_NOALLOC
) || tq
->tq_nalloc
>= tq
->tq_minalloc
)) {
988 tq
->tq_freelist
= tqe
->tqent_next
;
990 if (flags
& TQ_NOALLOC
)
993 if (tq
->tq_nalloc
>= tq
->tq_maxalloc
) {
994 if (kmflags
& KM_NOSLEEP
)
998 * We don't want to exceed tq_maxalloc, but we can't
999 * wait for other tasks to complete (and thus free up
1000 * task structures) without risking deadlock with
1001 * the caller. So, we just delay for one second
1002 * to throttle the allocation rate. If we have tasks
1003 * complete before one second timeout expires then
1004 * taskq_ent_free will signal us and we will
1005 * immediately retry the allocation (reap free).
1007 wait_time
= ddi_get_lbolt() + hz
;
1008 while (tq
->tq_freelist
== NULL
) {
1009 tq
->tq_maxalloc_wait
++;
1010 wait_rv
= cv_timedwait(&tq
->tq_maxalloc_cv
,
1011 &tq
->tq_lock
, wait_time
);
1012 tq
->tq_maxalloc_wait
--;
1016 if (tq
->tq_freelist
)
1017 goto again
; /* reap freelist */
1020 mutex_exit(&tq
->tq_lock
);
1022 tqe
= kmem_cache_alloc(taskq_ent_cache
, kmflags
);
1024 mutex_enter(&tq
->tq_lock
);
1034 * Free taskq_ent_t structure by either putting it on the free list or freeing
1037 * Assumes: tq->tq_lock is held.
1040 taskq_ent_free(taskq_t
*tq
, taskq_ent_t
*tqe
)
1042 ASSERT(MUTEX_HELD(&tq
->tq_lock
));
1044 if (tq
->tq_nalloc
<= tq
->tq_minalloc
) {
1045 tqe
->tqent_next
= tq
->tq_freelist
;
1046 tq
->tq_freelist
= tqe
;
1049 mutex_exit(&tq
->tq_lock
);
1050 kmem_cache_free(taskq_ent_cache
, tqe
);
1051 mutex_enter(&tq
->tq_lock
);
1054 if (tq
->tq_maxalloc_wait
)
1055 cv_signal(&tq
->tq_maxalloc_cv
);
1059 * taskq_ent_exists()
1061 * Return 1 if taskq already has entry for calling 'func(arg)'.
1063 * Assumes: tq->tq_lock is held.
1066 taskq_ent_exists(taskq_t
*tq
, task_func_t func
, void *arg
)
1070 ASSERT(MUTEX_HELD(&tq
->tq_lock
));
1072 for (tqe
= tq
->tq_task
.tqent_next
; tqe
!= &tq
->tq_task
;
1073 tqe
= tqe
->tqent_next
)
1074 if ((tqe
->tqent_func
== func
) && (tqe
->tqent_arg
== arg
))
1080 * Dispatch a task "func(arg)" to a free entry of bucket b.
1082 * Assumes: no bucket locks is held.
1084 * Returns: a pointer to an entry if dispatch was successful.
1085 * NULL if there are no free entries or if the bucket is suspended.
1087 static taskq_ent_t
*
1088 taskq_bucket_dispatch(taskq_bucket_t
*b
, task_func_t func
, void *arg
)
1092 ASSERT(MUTEX_NOT_HELD(&b
->tqbucket_lock
));
1093 ASSERT(func
!= NULL
);
1095 mutex_enter(&b
->tqbucket_lock
);
1097 ASSERT(b
->tqbucket_nfree
!= 0 || IS_EMPTY(b
->tqbucket_freelist
));
1098 ASSERT(b
->tqbucket_nfree
== 0 || !IS_EMPTY(b
->tqbucket_freelist
));
1101 * Get en entry from the freelist if there is one.
1102 * Schedule task into the entry.
1104 if ((b
->tqbucket_nfree
!= 0) &&
1105 !(b
->tqbucket_flags
& TQBUCKET_SUSPEND
)) {
1106 tqe
= b
->tqbucket_freelist
.tqent_prev
;
1108 ASSERT(tqe
!= &b
->tqbucket_freelist
);
1109 ASSERT(tqe
->tqent_thread
!= NULL
);
1111 tqe
->tqent_prev
->tqent_next
= tqe
->tqent_next
;
1112 tqe
->tqent_next
->tqent_prev
= tqe
->tqent_prev
;
1113 b
->tqbucket_nalloc
++;
1114 b
->tqbucket_nfree
--;
1115 tqe
->tqent_func
= func
;
1116 tqe
->tqent_arg
= arg
;
1117 TQ_STAT(b
, tqs_hits
);
1118 cv_signal(&tqe
->tqent_cv
);
1119 DTRACE_PROBE2(taskq__d__enqueue
, taskq_bucket_t
*, b
,
1120 taskq_ent_t
*, tqe
);
1123 TQ_STAT(b
, tqs_misses
);
1125 mutex_exit(&b
->tqbucket_lock
);
1132 * Assumes: func != NULL
1134 * Returns: NULL if dispatch failed.
1135 * non-NULL if task dispatched successfully.
1136 * Actual return value is the pointer to taskq entry that was used to
1137 * dispatch a task. This is useful for debugging.
1140 taskq_dispatch(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
)
1142 taskq_bucket_t
*bucket
= NULL
; /* Which bucket needs extension */
1143 taskq_ent_t
*tqe
= NULL
;
1148 ASSERT(func
!= NULL
);
1150 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
)) {
1152 * TQ_NOQUEUE flag can't be used with non-dynamic task queues.
1154 ASSERT(!(flags
& TQ_NOQUEUE
));
1156 * Enqueue the task to the underlying queue.
1158 mutex_enter(&tq
->tq_lock
);
1160 TASKQ_S_RANDOM_DISPATCH_FAILURE(tq
, flags
);
1162 if ((tqe
= taskq_ent_alloc(tq
, flags
)) == NULL
) {
1164 mutex_exit(&tq
->tq_lock
);
1165 return ((uintptr_t)NULL
);
1167 /* Make sure we start without any flags */
1168 tqe
->tqent_un
.tqent_flags
= 0;
1170 if (flags
& TQ_FRONT
) {
1171 TQ_ENQUEUE_FRONT(tq
, tqe
, func
, arg
);
1173 TQ_ENQUEUE(tq
, tqe
, func
, arg
);
1175 mutex_exit(&tq
->tq_lock
);
1176 return ((taskqid_t
)tqe
);
1180 * Dynamic taskq dispatching.
1182 ASSERT(!(flags
& (TQ_NOALLOC
| TQ_FRONT
)));
1183 TASKQ_D_RANDOM_DISPATCH_FAILURE(tq
, flags
);
1185 bsize
= tq
->tq_nbuckets
;
1189 * In a single-CPU case there is only one bucket, so get
1190 * entry directly from there.
1192 if ((tqe
= taskq_bucket_dispatch(tq
->tq_buckets
, func
, arg
))
1194 return ((taskqid_t
)tqe
); /* Fastpath */
1195 bucket
= tq
->tq_buckets
;
1199 uintptr_t h
= ((uintptr_t)CPU
+ (uintptr_t)arg
) >> 3;
1204 * The 'bucket' points to the original bucket that we hit. If we
1205 * can't allocate from it, we search other buckets, but only
1208 b
= &tq
->tq_buckets
[h
& (bsize
- 1)];
1209 ASSERT(b
->tqbucket_taskq
== tq
); /* Sanity check */
1212 * Do a quick check before grabbing the lock. If the bucket does
1213 * not have free entries now, chances are very small that it
1214 * will after we take the lock, so we just skip it.
1216 if (b
->tqbucket_nfree
!= 0) {
1217 if ((tqe
= taskq_bucket_dispatch(b
, func
, arg
)) != NULL
)
1218 return ((taskqid_t
)tqe
); /* Fastpath */
1220 TQ_STAT(b
, tqs_misses
);
1224 loopcount
= MIN(taskq_search_depth
, bsize
);
1226 * If bucket dispatch failed, search loopcount number of buckets
1227 * before we give up and fail.
1230 b
= &tq
->tq_buckets
[++h
& (bsize
- 1)];
1231 ASSERT(b
->tqbucket_taskq
== tq
); /* Sanity check */
1234 if (b
->tqbucket_nfree
!= 0) {
1235 tqe
= taskq_bucket_dispatch(b
, func
, arg
);
1237 TQ_STAT(b
, tqs_misses
);
1239 } while ((tqe
== NULL
) && (loopcount
> 0));
1243 * At this point we either scheduled a task and (tqe != NULL) or failed
1244 * (tqe == NULL). Try to recover from fails.
1248 * For KM_SLEEP dispatches, try to extend the bucket and retry dispatch.
1250 if ((tqe
== NULL
) && !(flags
& TQ_NOSLEEP
)) {
1252 * taskq_bucket_extend() may fail to do anything, but this is
1253 * fine - we deal with it later. If the bucket was successfully
1254 * extended, there is a good chance that taskq_bucket_dispatch()
1255 * will get this new entry, unless someone is racing with us and
1256 * stealing the new entry from under our nose.
1257 * taskq_bucket_extend() may sleep.
1259 taskq_bucket_extend(bucket
);
1260 TQ_STAT(bucket
, tqs_disptcreates
);
1261 if ((tqe
= taskq_bucket_dispatch(bucket
, func
, arg
)) != NULL
)
1262 return ((taskqid_t
)tqe
);
1265 ASSERT(bucket
!= NULL
);
1268 * Since there are not enough free entries in the bucket, add a
1269 * taskq entry to extend it in the background using backing queue
1270 * (unless we already have a taskq entry to perform that extension).
1272 mutex_enter(&tq
->tq_lock
);
1273 if (!taskq_ent_exists(tq
, taskq_bucket_extend
, bucket
)) {
1274 if ((tqe1
= taskq_ent_alloc(tq
, TQ_NOSLEEP
)) != NULL
) {
1275 TQ_ENQUEUE_FRONT(tq
, tqe1
, taskq_bucket_extend
, bucket
);
1282 * Dispatch failed and we can't find an entry to schedule a task.
1283 * Revert to the backing queue unless TQ_NOQUEUE was asked.
1285 if ((tqe
== NULL
) && !(flags
& TQ_NOQUEUE
)) {
1286 if ((tqe
= taskq_ent_alloc(tq
, flags
)) != NULL
) {
1287 TQ_ENQUEUE(tq
, tqe
, func
, arg
);
1292 mutex_exit(&tq
->tq_lock
);
1294 return ((taskqid_t
)tqe
);
1298 taskq_dispatch_ent(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
,
1301 ASSERT(func
!= NULL
);
1302 ASSERT(!(tq
->tq_flags
& TASKQ_DYNAMIC
));
1305 * Mark it as a prealloc'd task. This is important
1306 * to ensure that we don't free it later.
1308 tqe
->tqent_un
.tqent_flags
|= TQENT_FLAG_PREALLOC
;
1310 * Enqueue the task to the underlying queue.
1312 mutex_enter(&tq
->tq_lock
);
1314 if (flags
& TQ_FRONT
) {
1315 TQ_ENQUEUE_FRONT(tq
, tqe
, func
, arg
);
1317 TQ_ENQUEUE(tq
, tqe
, func
, arg
);
1319 mutex_exit(&tq
->tq_lock
);
1323 * Wait for all pending tasks to complete.
1324 * Calling taskq_wait from a task will cause deadlock.
1327 taskq_wait(taskq_t
*tq
)
1329 ASSERT(tq
!= curthread
->t_taskq
);
1331 mutex_enter(&tq
->tq_lock
);
1332 while (tq
->tq_task
.tqent_next
!= &tq
->tq_task
|| tq
->tq_active
!= 0)
1333 cv_wait(&tq
->tq_wait_cv
, &tq
->tq_lock
);
1334 mutex_exit(&tq
->tq_lock
);
1336 if (tq
->tq_flags
& TASKQ_DYNAMIC
) {
1337 taskq_bucket_t
*b
= tq
->tq_buckets
;
1339 for (; (b
!= NULL
) && (bid
< tq
->tq_nbuckets
); b
++, bid
++) {
1340 mutex_enter(&b
->tqbucket_lock
);
1341 while (b
->tqbucket_nalloc
> 0)
1342 cv_wait(&b
->tqbucket_cv
, &b
->tqbucket_lock
);
1343 mutex_exit(&b
->tqbucket_lock
);
1349 * Suspend execution of tasks.
1351 * Tasks in the queue part will be suspended immediately upon return from this
1352 * function. Pending tasks in the dynamic part will continue to execute, but all
1353 * new tasks will be suspended.
1356 taskq_suspend(taskq_t
*tq
)
1358 rw_enter(&tq
->tq_threadlock
, RW_WRITER
);
1360 if (tq
->tq_flags
& TASKQ_DYNAMIC
) {
1361 taskq_bucket_t
*b
= tq
->tq_buckets
;
1363 for (; (b
!= NULL
) && (bid
< tq
->tq_nbuckets
); b
++, bid
++) {
1364 mutex_enter(&b
->tqbucket_lock
);
1365 b
->tqbucket_flags
|= TQBUCKET_SUSPEND
;
1366 mutex_exit(&b
->tqbucket_lock
);
1370 * Mark task queue as being suspended. Needed for taskq_suspended().
1372 mutex_enter(&tq
->tq_lock
);
1373 ASSERT(!(tq
->tq_flags
& TASKQ_SUSPENDED
));
1374 tq
->tq_flags
|= TASKQ_SUSPENDED
;
1375 mutex_exit(&tq
->tq_lock
);
1379 * returns: 1 if tq is suspended, 0 otherwise.
1382 taskq_suspended(taskq_t
*tq
)
1384 return ((tq
->tq_flags
& TASKQ_SUSPENDED
) != 0);
1388 * Resume taskq execution.
1391 taskq_resume(taskq_t
*tq
)
1393 ASSERT(RW_WRITE_HELD(&tq
->tq_threadlock
));
1395 if (tq
->tq_flags
& TASKQ_DYNAMIC
) {
1396 taskq_bucket_t
*b
= tq
->tq_buckets
;
1398 for (; (b
!= NULL
) && (bid
< tq
->tq_nbuckets
); b
++, bid
++) {
1399 mutex_enter(&b
->tqbucket_lock
);
1400 b
->tqbucket_flags
&= ~TQBUCKET_SUSPEND
;
1401 mutex_exit(&b
->tqbucket_lock
);
1404 mutex_enter(&tq
->tq_lock
);
1405 ASSERT(tq
->tq_flags
& TASKQ_SUSPENDED
);
1406 tq
->tq_flags
&= ~TASKQ_SUSPENDED
;
1407 mutex_exit(&tq
->tq_lock
);
1409 rw_exit(&tq
->tq_threadlock
);
1413 taskq_member(taskq_t
*tq
, kthread_t
*thread
)
1415 return (thread
->t_taskq
== tq
);
1419 * Creates a thread in the taskq. We only allow one outstanding create at
1420 * a time. We drop and reacquire the tq_lock in order to avoid blocking other
1421 * taskq activity while thread_create() or lwp_kernel_create() run.
1423 * The first time we're called, we do some additional setup, and do not
1424 * return until there are enough threads to start servicing requests.
1427 taskq_thread_create(taskq_t
*tq
)
1430 const boolean_t first
= (tq
->tq_nthreads
== 0);
1432 ASSERT(MUTEX_HELD(&tq
->tq_lock
));
1433 ASSERT(tq
->tq_flags
& TASKQ_CHANGING
);
1434 ASSERT(tq
->tq_nthreads
< tq
->tq_nthreads_target
);
1435 ASSERT(!(tq
->tq_flags
& TASKQ_THREAD_CREATED
));
1438 tq
->tq_flags
|= TASKQ_THREAD_CREATED
;
1440 mutex_exit(&tq
->tq_lock
);
1443 * With TASKQ_DUTY_CYCLE the new thread must have an LWP
1444 * as explained in ../disp/sysdc.c (for the msacct data).
1445 * Otherwise simple kthreads are preferred.
1447 if ((tq
->tq_flags
& TASKQ_DUTY_CYCLE
) != 0) {
1448 /* Enforced in taskq_create_common */
1449 ASSERT3P(tq
->tq_proc
, !=, &p0
);
1450 t
= lwp_kernel_create(tq
->tq_proc
, taskq_thread
, tq
, TS_RUN
,
1453 t
= thread_create(NULL
, 0, taskq_thread
, tq
, 0, tq
->tq_proc
,
1454 TS_RUN
, tq
->tq_pri
);
1458 mutex_enter(&tq
->tq_lock
);
1463 * We know the thread cannot go away, since tq cannot be
1464 * destroyed until creation has completed. We can therefore
1465 * safely dereference t.
1467 if (tq
->tq_flags
& TASKQ_THREADS_CPU_PCT
) {
1468 taskq_cpupct_install(tq
, t
->t_cpupart
);
1470 mutex_enter(&tq
->tq_lock
);
1472 /* Wait until we can service requests. */
1473 while (tq
->tq_nthreads
!= tq
->tq_nthreads_target
&&
1474 tq
->tq_nthreads
< TASKQ_CREATE_ACTIVE_THREADS
) {
1475 cv_wait(&tq
->tq_wait_cv
, &tq
->tq_lock
);
1480 * Common "sleep taskq thread" function, which handles CPR stuff, as well
1481 * as giving a nice common point for debuggers to find inactive threads.
1484 taskq_thread_wait(taskq_t
*tq
, kmutex_t
*mx
, kcondvar_t
*cv
,
1485 callb_cpr_t
*cprinfo
, clock_t timeout
)
1489 if (!(tq
->tq_flags
& TASKQ_CPR_SAFE
)) {
1490 CALLB_CPR_SAFE_BEGIN(cprinfo
);
1495 ret
= cv_reltimedwait(cv
, mx
, timeout
, TR_CLOCK_TICK
);
1497 if (!(tq
->tq_flags
& TASKQ_CPR_SAFE
)) {
1498 CALLB_CPR_SAFE_END(cprinfo
, mx
);
1505 * Worker thread for processing task queue.
1508 taskq_thread(void *arg
)
1514 callb_cpr_t cprinfo
;
1515 hrtime_t start
, end
;
1518 curthread
->t_taskq
= tq
; /* mark ourselves for taskq_member() */
1520 if (curproc
!= &p0
&& (tq
->tq_flags
& TASKQ_DUTY_CYCLE
)) {
1521 sysdc_thread_enter(curthread
, tq
->tq_DC
,
1522 (tq
->tq_flags
& TASKQ_DC_BATCH
) ? SYSDC_THREAD_BATCH
: 0);
1525 if (tq
->tq_flags
& TASKQ_CPR_SAFE
) {
1526 CALLB_CPR_INIT_SAFE(curthread
, tq
->tq_name
);
1528 CALLB_CPR_INIT(&cprinfo
, &tq
->tq_lock
, callb_generic_cpr
,
1531 mutex_enter(&tq
->tq_lock
);
1532 thread_id
= ++tq
->tq_nthreads
;
1533 ASSERT(tq
->tq_flags
& TASKQ_THREAD_CREATED
);
1534 ASSERT(tq
->tq_flags
& TASKQ_CHANGING
);
1535 tq
->tq_flags
&= ~TASKQ_THREAD_CREATED
;
1537 VERIFY3S(thread_id
, <=, tq
->tq_nthreads_max
);
1539 if (tq
->tq_nthreads_max
== 1)
1540 tq
->tq_thread
= curthread
;
1542 tq
->tq_threadlist
[thread_id
- 1] = curthread
;
1544 /* Allow taskq_create_common()'s taskq_thread_create() to return. */
1545 if (tq
->tq_nthreads
== TASKQ_CREATE_ACTIVE_THREADS
)
1546 cv_broadcast(&tq
->tq_wait_cv
);
1549 if (tq
->tq_flags
& TASKQ_CHANGING
) {
1550 /* See if we're no longer needed */
1551 if (thread_id
> tq
->tq_nthreads_target
) {
1553 * To preserve the one-to-one mapping between
1554 * thread_id and thread, we must exit from
1555 * highest thread ID to least.
1557 * However, if everyone is exiting, the order
1558 * doesn't matter, so just exit immediately.
1559 * (this is safe, since you must wait for
1560 * nthreads to reach 0 after setting
1561 * tq_nthreads_target to 0)
1563 if (thread_id
== tq
->tq_nthreads
||
1564 tq
->tq_nthreads_target
== 0)
1567 /* Wait for higher thread_ids to exit */
1568 (void) taskq_thread_wait(tq
, &tq
->tq_lock
,
1569 &tq
->tq_exit_cv
, &cprinfo
, -1);
1574 * If no thread is starting taskq_thread(), we can
1575 * do some bookkeeping.
1577 if (!(tq
->tq_flags
& TASKQ_THREAD_CREATED
)) {
1578 /* Check if we've reached our target */
1579 if (tq
->tq_nthreads
== tq
->tq_nthreads_target
) {
1580 tq
->tq_flags
&= ~TASKQ_CHANGING
;
1581 cv_broadcast(&tq
->tq_wait_cv
);
1583 /* Check if we need to create a thread */
1584 if (tq
->tq_nthreads
< tq
->tq_nthreads_target
) {
1585 taskq_thread_create(tq
);
1586 continue; /* tq_lock was dropped */
1590 if ((tqe
= tq
->tq_task
.tqent_next
) == &tq
->tq_task
) {
1591 if (--tq
->tq_active
== 0)
1592 cv_broadcast(&tq
->tq_wait_cv
);
1593 (void) taskq_thread_wait(tq
, &tq
->tq_lock
,
1594 &tq
->tq_dispatch_cv
, &cprinfo
, -1);
1599 tqe
->tqent_prev
->tqent_next
= tqe
->tqent_next
;
1600 tqe
->tqent_next
->tqent_prev
= tqe
->tqent_prev
;
1601 mutex_exit(&tq
->tq_lock
);
1604 * For prealloc'd tasks, we don't free anything. We
1605 * have to check this now, because once we call the
1606 * function for a prealloc'd taskq, we can't touch the
1607 * tqent any longer (calling the function returns the
1608 * ownershp of the tqent back to caller of
1611 if ((!(tq
->tq_flags
& TASKQ_DYNAMIC
)) &&
1612 (tqe
->tqent_un
.tqent_flags
& TQENT_FLAG_PREALLOC
)) {
1613 /* clear pointers to assist assertion checks */
1614 tqe
->tqent_next
= tqe
->tqent_prev
= NULL
;
1620 rw_enter(&tq
->tq_threadlock
, RW_READER
);
1621 start
= gethrtime();
1622 DTRACE_PROBE2(taskq__exec__start
, taskq_t
*, tq
,
1623 taskq_ent_t
*, tqe
);
1624 tqe
->tqent_func(tqe
->tqent_arg
);
1625 DTRACE_PROBE2(taskq__exec__end
, taskq_t
*, tq
,
1626 taskq_ent_t
*, tqe
);
1628 rw_exit(&tq
->tq_threadlock
);
1630 mutex_enter(&tq
->tq_lock
);
1631 tq
->tq_totaltime
+= end
- start
;
1635 taskq_ent_free(tq
, tqe
);
1638 if (tq
->tq_nthreads_max
== 1)
1639 tq
->tq_thread
= NULL
;
1641 tq
->tq_threadlist
[thread_id
- 1] = NULL
;
1643 /* We're exiting, and therefore no longer active */
1644 ASSERT(tq
->tq_active
> 0);
1647 ASSERT(tq
->tq_nthreads
> 0);
1650 /* Wake up anyone waiting for us to exit */
1651 cv_broadcast(&tq
->tq_exit_cv
);
1652 if (tq
->tq_nthreads
== tq
->tq_nthreads_target
) {
1653 if (!(tq
->tq_flags
& TASKQ_THREAD_CREATED
))
1654 tq
->tq_flags
&= ~TASKQ_CHANGING
;
1656 cv_broadcast(&tq
->tq_wait_cv
);
1659 ASSERT(!(tq
->tq_flags
& TASKQ_CPR_SAFE
));
1660 CALLB_CPR_EXIT(&cprinfo
); /* drops tq->tq_lock */
1661 if (curthread
->t_lwp
!= NULL
) {
1662 mutex_enter(&curproc
->p_lock
);
1670 * Worker per-entry thread for dynamic dispatches.
1673 taskq_d_thread(taskq_ent_t
*tqe
)
1675 taskq_bucket_t
*bucket
= tqe
->tqent_un
.tqent_bucket
;
1676 taskq_t
*tq
= bucket
->tqbucket_taskq
;
1677 kmutex_t
*lock
= &bucket
->tqbucket_lock
;
1678 kcondvar_t
*cv
= &tqe
->tqent_cv
;
1679 callb_cpr_t cprinfo
;
1682 CALLB_CPR_INIT(&cprinfo
, lock
, callb_generic_cpr
, tq
->tq_name
);
1688 * If a task is scheduled (func != NULL), execute it, otherwise
1689 * sleep, waiting for a job.
1691 if (tqe
->tqent_func
!= NULL
) {
1695 ASSERT(bucket
->tqbucket_nalloc
> 0);
1698 * It is possible to free the entry right away before
1699 * actually executing the task so that subsequent
1700 * dispatches may immediately reuse it. But this,
1701 * effectively, creates a two-length queue in the entry
1702 * and may lead to a deadlock if the execution of the
1703 * current task depends on the execution of the next
1704 * scheduled task. So, we keep the entry busy until the
1705 * task is processed.
1709 start
= gethrtime();
1710 DTRACE_PROBE3(taskq__d__exec__start
, taskq_t
*, tq
,
1711 taskq_bucket_t
*, bucket
, taskq_ent_t
*, tqe
);
1712 tqe
->tqent_func(tqe
->tqent_arg
);
1713 DTRACE_PROBE3(taskq__d__exec__end
, taskq_t
*, tq
,
1714 taskq_bucket_t
*, bucket
, taskq_ent_t
*, tqe
);
1717 bucket
->tqbucket_totaltime
+= end
- start
;
1720 * Return the entry to the bucket free list.
1722 tqe
->tqent_func
= NULL
;
1723 TQ_APPEND(bucket
->tqbucket_freelist
, tqe
);
1724 bucket
->tqbucket_nalloc
--;
1725 bucket
->tqbucket_nfree
++;
1726 ASSERT(!IS_EMPTY(bucket
->tqbucket_freelist
));
1728 * taskq_wait() waits for nalloc to drop to zero on
1731 cv_signal(&bucket
->tqbucket_cv
);
1735 * At this point the entry must be in the bucket free list -
1736 * either because it was there initially or because it just
1737 * finished executing a task and put itself on the free list.
1739 ASSERT(bucket
->tqbucket_nfree
> 0);
1741 * Go to sleep unless we are closing.
1742 * If a thread is sleeping too long, it dies.
1744 if (! (bucket
->tqbucket_flags
& TQBUCKET_CLOSE
)) {
1745 w
= taskq_thread_wait(tq
, lock
, cv
,
1746 &cprinfo
, taskq_thread_timeout
* hz
);
1750 * At this point we may be in two different states:
1752 * (1) tqent_func is set which means that a new task is
1753 * dispatched and we need to execute it.
1755 * (2) Thread is sleeping for too long or we are closing. In
1756 * both cases destroy the thread and the entry.
1759 /* If func is NULL we should be on the freelist. */
1760 ASSERT((tqe
->tqent_func
!= NULL
) ||
1761 (bucket
->tqbucket_nfree
> 0));
1762 /* If func is non-NULL we should be allocated */
1763 ASSERT((tqe
->tqent_func
== NULL
) ||
1764 (bucket
->tqbucket_nalloc
> 0));
1766 /* Check freelist consistency */
1767 ASSERT((bucket
->tqbucket_nfree
> 0) ||
1768 IS_EMPTY(bucket
->tqbucket_freelist
));
1769 ASSERT((bucket
->tqbucket_nfree
== 0) ||
1770 !IS_EMPTY(bucket
->tqbucket_freelist
));
1772 if ((tqe
->tqent_func
== NULL
) &&
1773 ((w
== -1) || (bucket
->tqbucket_flags
& TQBUCKET_CLOSE
))) {
1775 * This thread is sleeping for too long or we are
1776 * closing - time to die.
1777 * Thread creation/destruction happens rarely,
1778 * so grabbing the lock is not a big performance issue.
1779 * The bucket lock is dropped by CALLB_CPR_EXIT().
1782 /* Remove the entry from the free list. */
1783 tqe
->tqent_prev
->tqent_next
= tqe
->tqent_next
;
1784 tqe
->tqent_next
->tqent_prev
= tqe
->tqent_prev
;
1785 ASSERT(bucket
->tqbucket_nfree
> 0);
1786 bucket
->tqbucket_nfree
--;
1788 TQ_STAT(bucket
, tqs_tdeaths
);
1789 cv_signal(&bucket
->tqbucket_cv
);
1790 tqe
->tqent_thread
= NULL
;
1791 mutex_enter(&tq
->tq_lock
);
1793 mutex_exit(&tq
->tq_lock
);
1794 CALLB_CPR_EXIT(&cprinfo
);
1795 kmem_cache_free(taskq_ent_cache
, tqe
);
1803 * Taskq creation. May sleep for memory.
1804 * Always use automatically generated instances to avoid kstat name space
1809 taskq_create(const char *name
, int nthreads
, pri_t pri
, int minalloc
,
1810 int maxalloc
, uint_t flags
)
1812 ASSERT((flags
& ~TASKQ_INTERFACE_FLAGS
) == 0);
1814 return (taskq_create_common(name
, 0, nthreads
, pri
, minalloc
,
1815 maxalloc
, &p0
, 0, flags
| TASKQ_NOINSTANCE
));
1819 * Create an instance of task queue. It is legal to create task queues with the
1820 * same name and different instances.
1822 * taskq_create_instance is used by ddi_taskq_create() where it gets the
1823 * instance from ddi_get_instance(). In some cases the instance is not
1824 * initialized and is set to -1. This case is handled as if no instance was
1828 taskq_create_instance(const char *name
, int instance
, int nthreads
, pri_t pri
,
1829 int minalloc
, int maxalloc
, uint_t flags
)
1831 ASSERT((flags
& ~TASKQ_INTERFACE_FLAGS
) == 0);
1832 ASSERT((instance
>= 0) || (instance
== -1));
1835 flags
|= TASKQ_NOINSTANCE
;
1838 return (taskq_create_common(name
, instance
, nthreads
,
1839 pri
, minalloc
, maxalloc
, &p0
, 0, flags
));
1843 taskq_create_proc(const char *name
, int nthreads
, pri_t pri
, int minalloc
,
1844 int maxalloc
, proc_t
*proc
, uint_t flags
)
1846 ASSERT((flags
& ~TASKQ_INTERFACE_FLAGS
) == 0);
1847 ASSERT(proc
->p_flag
& SSYS
);
1849 return (taskq_create_common(name
, 0, nthreads
, pri
, minalloc
,
1850 maxalloc
, proc
, 0, flags
| TASKQ_NOINSTANCE
));
1854 taskq_create_sysdc(const char *name
, int nthreads
, int minalloc
,
1855 int maxalloc
, proc_t
*proc
, uint_t dc
, uint_t flags
)
1857 ASSERT((flags
& ~TASKQ_INTERFACE_FLAGS
) == 0);
1858 ASSERT(proc
->p_flag
& SSYS
);
1860 return (taskq_create_common(name
, 0, nthreads
, minclsyspri
, minalloc
,
1861 maxalloc
, proc
, dc
, flags
| TASKQ_NOINSTANCE
| TASKQ_DUTY_CYCLE
));
1865 taskq_create_common(const char *name
, int instance
, int nthreads
, pri_t pri
,
1866 int minalloc
, int maxalloc
, proc_t
*proc
, uint_t dc
, uint_t flags
)
1868 taskq_t
*tq
= kmem_cache_alloc(taskq_cache
, KM_SLEEP
);
1869 uint_t ncpus
= ((boot_max_ncpus
== -1) ? max_ncpus
: boot_max_ncpus
);
1870 uint_t bsize
; /* # of buckets - always power of 2 */
1874 * TASKQ_DYNAMIC, TASKQ_CPR_SAFE and TASKQ_THREADS_CPU_PCT are all
1875 * mutually incompatible.
1877 IMPLY((flags
& TASKQ_DYNAMIC
), !(flags
& TASKQ_CPR_SAFE
));
1878 IMPLY((flags
& TASKQ_DYNAMIC
), !(flags
& TASKQ_THREADS_CPU_PCT
));
1879 IMPLY((flags
& TASKQ_CPR_SAFE
), !(flags
& TASKQ_THREADS_CPU_PCT
));
1881 /* Cannot have DYNAMIC with DUTY_CYCLE */
1882 IMPLY((flags
& TASKQ_DYNAMIC
), !(flags
& TASKQ_DUTY_CYCLE
));
1884 /* Cannot have DUTY_CYCLE with a p0 kernel process */
1885 IMPLY((flags
& TASKQ_DUTY_CYCLE
), proc
!= &p0
);
1887 /* Cannot have DC_BATCH without DUTY_CYCLE */
1888 ASSERT((flags
& (TASKQ_DUTY_CYCLE
|TASKQ_DC_BATCH
)) != TASKQ_DC_BATCH
);
1890 ASSERT(proc
!= NULL
);
1892 bsize
= 1 << (highbit(ncpus
) - 1);
1894 bsize
= MIN(bsize
, taskq_maxbuckets
);
1896 if (flags
& TASKQ_DYNAMIC
) {
1897 ASSERT3S(nthreads
, >=, 1);
1898 tq
->tq_maxsize
= nthreads
;
1900 /* For dynamic task queues use just one backup thread */
1901 nthreads
= max_nthreads
= 1;
1903 } else if (flags
& TASKQ_THREADS_CPU_PCT
) {
1905 ASSERT3S(nthreads
, >=, 0);
1908 if (pct
> taskq_cpupct_max_percent
)
1909 pct
= taskq_cpupct_max_percent
;
1912 * If you're using THREADS_CPU_PCT, the process for the
1913 * taskq threads must be curproc. This allows any pset
1914 * binding to be inherited correctly. If proc is &p0,
1915 * we won't be creating LWPs, so new threads will be assigned
1916 * to the default processor set.
1918 ASSERT(curproc
== proc
|| proc
== &p0
);
1919 tq
->tq_threads_ncpus_pct
= pct
;
1920 nthreads
= 1; /* corrected in taskq_thread_create() */
1921 max_nthreads
= TASKQ_THREADS_PCT(max_ncpus
, pct
);
1924 ASSERT3S(nthreads
, >=, 1);
1925 max_nthreads
= nthreads
;
1928 if (max_nthreads
< taskq_minimum_nthreads_max
)
1929 max_nthreads
= taskq_minimum_nthreads_max
;
1932 * Make sure the name is 0-terminated, and conforms to the rules for
1935 (void) strncpy(tq
->tq_name
, name
, TASKQ_NAMELEN
+ 1);
1936 strident_canon(tq
->tq_name
, TASKQ_NAMELEN
+ 1);
1938 tq
->tq_flags
= flags
| TASKQ_CHANGING
;
1940 tq
->tq_instance
= instance
;
1941 tq
->tq_nthreads_target
= nthreads
;
1942 tq
->tq_nthreads_max
= max_nthreads
;
1943 tq
->tq_minalloc
= minalloc
;
1944 tq
->tq_maxalloc
= maxalloc
;
1945 tq
->tq_nbuckets
= bsize
;
1949 list_link_init(&tq
->tq_cpupct_link
);
1951 if (max_nthreads
> 1)
1952 tq
->tq_threadlist
= kmem_alloc(
1953 sizeof (kthread_t
*) * max_nthreads
, KM_SLEEP
);
1955 mutex_enter(&tq
->tq_lock
);
1956 if (flags
& TASKQ_PREPOPULATE
) {
1957 while (minalloc
-- > 0)
1958 taskq_ent_free(tq
, taskq_ent_alloc(tq
, TQ_SLEEP
));
1962 * Before we start creating threads for this taskq, take a
1963 * zone hold so the zone can't go away before taskq_destroy
1964 * makes sure all the taskq threads are gone. This hold is
1965 * similar in purpose to those taken by zthread_create().
1967 zone_hold(tq
->tq_proc
->p_zone
);
1970 * Create the first thread, which will create any other threads
1971 * necessary. taskq_thread_create will not return until we have
1972 * enough threads to be able to process requests.
1974 taskq_thread_create(tq
);
1975 mutex_exit(&tq
->tq_lock
);
1977 if (flags
& TASKQ_DYNAMIC
) {
1978 taskq_bucket_t
*bucket
= kmem_zalloc(sizeof (taskq_bucket_t
) *
1982 tq
->tq_buckets
= bucket
;
1984 /* Initialize each bucket */
1985 for (b_id
= 0; b_id
< bsize
; b_id
++, bucket
++) {
1986 mutex_init(&bucket
->tqbucket_lock
, NULL
, MUTEX_DEFAULT
,
1988 cv_init(&bucket
->tqbucket_cv
, NULL
, CV_DEFAULT
, NULL
);
1989 bucket
->tqbucket_taskq
= tq
;
1990 bucket
->tqbucket_freelist
.tqent_next
=
1991 bucket
->tqbucket_freelist
.tqent_prev
=
1992 &bucket
->tqbucket_freelist
;
1993 if (flags
& TASKQ_PREPOPULATE
)
1994 taskq_bucket_extend(bucket
);
2000 * We have two cases:
2001 * 1) Instance is provided to taskq_create_instance(). In this case it
2002 * should be >= 0 and we use it.
2004 * 2) Instance is not provided and is automatically generated
2006 if (flags
& TASKQ_NOINSTANCE
) {
2007 instance
= tq
->tq_instance
=
2008 (int)(uintptr_t)vmem_alloc(taskq_id_arena
, 1, VM_SLEEP
);
2011 if (flags
& TASKQ_DYNAMIC
) {
2012 if ((tq
->tq_kstat
= kstat_create("unix", instance
,
2013 tq
->tq_name
, "taskq_d", KSTAT_TYPE_NAMED
,
2014 sizeof (taskq_d_kstat
) / sizeof (kstat_named_t
),
2015 KSTAT_FLAG_VIRTUAL
)) != NULL
) {
2016 tq
->tq_kstat
->ks_lock
= &taskq_d_kstat_lock
;
2017 tq
->tq_kstat
->ks_data
= &taskq_d_kstat
;
2018 tq
->tq_kstat
->ks_update
= taskq_d_kstat_update
;
2019 tq
->tq_kstat
->ks_private
= tq
;
2020 kstat_install(tq
->tq_kstat
);
2023 if ((tq
->tq_kstat
= kstat_create("unix", instance
, tq
->tq_name
,
2024 "taskq", KSTAT_TYPE_NAMED
,
2025 sizeof (taskq_kstat
) / sizeof (kstat_named_t
),
2026 KSTAT_FLAG_VIRTUAL
)) != NULL
) {
2027 tq
->tq_kstat
->ks_lock
= &taskq_kstat_lock
;
2028 tq
->tq_kstat
->ks_data
= &taskq_kstat
;
2029 tq
->tq_kstat
->ks_update
= taskq_kstat_update
;
2030 tq
->tq_kstat
->ks_private
= tq
;
2031 kstat_install(tq
->tq_kstat
);
2041 * Assumes: by the time taskq_destroy is called no one will use this task queue
2042 * in any way and no one will try to dispatch entries in it.
2045 taskq_destroy(taskq_t
*tq
)
2047 taskq_bucket_t
*b
= tq
->tq_buckets
;
2050 ASSERT(! (tq
->tq_flags
& TASKQ_CPR_SAFE
));
2055 if (tq
->tq_kstat
!= NULL
) {
2056 kstat_delete(tq
->tq_kstat
);
2057 tq
->tq_kstat
= NULL
;
2061 * Destroy instance if needed.
2063 if (tq
->tq_flags
& TASKQ_NOINSTANCE
) {
2064 vmem_free(taskq_id_arena
, (void *)(uintptr_t)(tq
->tq_instance
),
2066 tq
->tq_instance
= 0;
2070 * Unregister from the cpupct list.
2072 if (tq
->tq_flags
& TASKQ_THREADS_CPU_PCT
) {
2073 taskq_cpupct_remove(tq
);
2077 * Wait for any pending entries to complete.
2081 mutex_enter(&tq
->tq_lock
);
2082 ASSERT((tq
->tq_task
.tqent_next
== &tq
->tq_task
) &&
2083 (tq
->tq_active
== 0));
2085 /* notify all the threads that they need to exit */
2086 tq
->tq_nthreads_target
= 0;
2088 tq
->tq_flags
|= TASKQ_CHANGING
;
2089 cv_broadcast(&tq
->tq_dispatch_cv
);
2090 cv_broadcast(&tq
->tq_exit_cv
);
2092 while (tq
->tq_nthreads
!= 0)
2093 cv_wait(&tq
->tq_wait_cv
, &tq
->tq_lock
);
2095 if (tq
->tq_nthreads_max
!= 1)
2096 kmem_free(tq
->tq_threadlist
, sizeof (kthread_t
*) *
2097 tq
->tq_nthreads_max
);
2099 tq
->tq_minalloc
= 0;
2100 while (tq
->tq_nalloc
!= 0)
2101 taskq_ent_free(tq
, taskq_ent_alloc(tq
, TQ_SLEEP
));
2103 mutex_exit(&tq
->tq_lock
);
2106 * Mark each bucket as closing and wakeup all sleeping threads.
2108 for (; (b
!= NULL
) && (bid
< tq
->tq_nbuckets
); b
++, bid
++) {
2111 mutex_enter(&b
->tqbucket_lock
);
2113 b
->tqbucket_flags
|= TQBUCKET_CLOSE
;
2114 /* Wakeup all sleeping threads */
2116 for (tqe
= b
->tqbucket_freelist
.tqent_next
;
2117 tqe
!= &b
->tqbucket_freelist
; tqe
= tqe
->tqent_next
)
2118 cv_signal(&tqe
->tqent_cv
);
2120 ASSERT(b
->tqbucket_nalloc
== 0);
2123 * At this point we waited for all pending jobs to complete (in
2124 * both the task queue and the bucket and no new jobs should
2125 * arrive. Wait for all threads to die.
2127 while (b
->tqbucket_nfree
> 0)
2128 cv_wait(&b
->tqbucket_cv
, &b
->tqbucket_lock
);
2129 mutex_exit(&b
->tqbucket_lock
);
2130 mutex_destroy(&b
->tqbucket_lock
);
2131 cv_destroy(&b
->tqbucket_cv
);
2134 if (tq
->tq_buckets
!= NULL
) {
2135 ASSERT(tq
->tq_flags
& TASKQ_DYNAMIC
);
2136 kmem_free(tq
->tq_buckets
,
2137 sizeof (taskq_bucket_t
) * tq
->tq_nbuckets
);
2139 /* Cleanup fields before returning tq to the cache */
2140 tq
->tq_buckets
= NULL
;
2141 tq
->tq_tcreates
= 0;
2144 ASSERT(!(tq
->tq_flags
& TASKQ_DYNAMIC
));
2148 * Now that all the taskq threads are gone, we can
2149 * drop the zone hold taken in taskq_create_common
2151 zone_rele(tq
->tq_proc
->p_zone
);
2153 tq
->tq_threads_ncpus_pct
= 0;
2154 tq
->tq_totaltime
= 0;
2156 tq
->tq_maxtasks
= 0;
2157 tq
->tq_executed
= 0;
2158 kmem_cache_free(taskq_cache
, tq
);
2162 * Extend a bucket with a new entry on the free list and attach a worker thread
2165 * Argument: pointer to the bucket.
2167 * This function may quietly fail. It is only used by taskq_dispatch() which
2168 * handles such failures properly.
2171 taskq_bucket_extend(void *arg
)
2174 taskq_bucket_t
*b
= (taskq_bucket_t
*)arg
;
2175 taskq_t
*tq
= b
->tqbucket_taskq
;
2178 mutex_enter(&tq
->tq_lock
);
2180 if (! ENOUGH_MEMORY()) {
2182 mutex_exit(&tq
->tq_lock
);
2187 * Observe global taskq limits on the number of threads.
2189 if (tq
->tq_tcreates
++ - tq
->tq_tdeaths
> tq
->tq_maxsize
) {
2191 mutex_exit(&tq
->tq_lock
);
2194 mutex_exit(&tq
->tq_lock
);
2196 tqe
= kmem_cache_alloc(taskq_ent_cache
, KM_NOSLEEP
);
2199 mutex_enter(&tq
->tq_lock
);
2202 mutex_exit(&tq
->tq_lock
);
2206 ASSERT(tqe
->tqent_thread
== NULL
);
2208 tqe
->tqent_un
.tqent_bucket
= b
;
2211 * Create a thread in a TS_STOPPED state first. If it is successfully
2212 * created, place the entry on the free list and start the thread.
2214 tqe
->tqent_thread
= thread_create(NULL
, 0, taskq_d_thread
, tqe
,
2215 0, tq
->tq_proc
, TS_STOPPED
, tq
->tq_pri
);
2218 * Once the entry is ready, link it to the the bucket free list.
2220 mutex_enter(&b
->tqbucket_lock
);
2221 tqe
->tqent_func
= NULL
;
2222 TQ_APPEND(b
->tqbucket_freelist
, tqe
);
2223 b
->tqbucket_nfree
++;
2224 TQ_STAT(b
, tqs_tcreates
);
2227 nthreads
= b
->tqbucket_stat
.tqs_tcreates
-
2228 b
->tqbucket_stat
.tqs_tdeaths
;
2229 b
->tqbucket_stat
.tqs_maxthreads
= MAX(nthreads
,
2230 b
->tqbucket_stat
.tqs_maxthreads
);
2233 mutex_exit(&b
->tqbucket_lock
);
2235 * Start the stopped thread.
2237 thread_lock(tqe
->tqent_thread
);
2238 tqe
->tqent_thread
->t_taskq
= tq
;
2239 tqe
->tqent_thread
->t_schedflag
|= TS_ALLSTART
;
2240 setrun_locked(tqe
->tqent_thread
);
2241 thread_unlock(tqe
->tqent_thread
);
2245 taskq_kstat_update(kstat_t
*ksp
, int rw
)
2247 struct taskq_kstat
*tqsp
= &taskq_kstat
;
2248 taskq_t
*tq
= ksp
->ks_private
;
2250 if (rw
== KSTAT_WRITE
)
2253 tqsp
->tq_pid
.value
.ui64
= tq
->tq_proc
->p_pid
;
2254 tqsp
->tq_tasks
.value
.ui64
= tq
->tq_tasks
;
2255 tqsp
->tq_executed
.value
.ui64
= tq
->tq_executed
;
2256 tqsp
->tq_maxtasks
.value
.ui64
= tq
->tq_maxtasks
;
2257 tqsp
->tq_totaltime
.value
.ui64
= tq
->tq_totaltime
;
2258 tqsp
->tq_nactive
.value
.ui64
= tq
->tq_active
;
2259 tqsp
->tq_nalloc
.value
.ui64
= tq
->tq_nalloc
;
2260 tqsp
->tq_pri
.value
.ui64
= tq
->tq_pri
;
2261 tqsp
->tq_nthreads
.value
.ui64
= tq
->tq_nthreads
;
2262 tqsp
->tq_nomem
.value
.ui64
= tq
->tq_nomem
;
2267 taskq_d_kstat_update(kstat_t
*ksp
, int rw
)
2269 struct taskq_d_kstat
*tqsp
= &taskq_d_kstat
;
2270 taskq_t
*tq
= ksp
->ks_private
;
2271 taskq_bucket_t
*b
= tq
->tq_buckets
;
2274 if (rw
== KSTAT_WRITE
)
2277 ASSERT(tq
->tq_flags
& TASKQ_DYNAMIC
);
2279 tqsp
->tqd_btasks
.value
.ui64
= tq
->tq_tasks
;
2280 tqsp
->tqd_bexecuted
.value
.ui64
= tq
->tq_executed
;
2281 tqsp
->tqd_bmaxtasks
.value
.ui64
= tq
->tq_maxtasks
;
2282 tqsp
->tqd_bnalloc
.value
.ui64
= tq
->tq_nalloc
;
2283 tqsp
->tqd_bnactive
.value
.ui64
= tq
->tq_active
;
2284 tqsp
->tqd_btotaltime
.value
.ui64
= tq
->tq_totaltime
;
2285 tqsp
->tqd_pri
.value
.ui64
= tq
->tq_pri
;
2286 tqsp
->tqd_nomem
.value
.ui64
= tq
->tq_nomem
;
2288 tqsp
->tqd_hits
.value
.ui64
= 0;
2289 tqsp
->tqd_misses
.value
.ui64
= 0;
2290 tqsp
->tqd_overflows
.value
.ui64
= 0;
2291 tqsp
->tqd_tcreates
.value
.ui64
= 0;
2292 tqsp
->tqd_tdeaths
.value
.ui64
= 0;
2293 tqsp
->tqd_maxthreads
.value
.ui64
= 0;
2294 tqsp
->tqd_nomem
.value
.ui64
= 0;
2295 tqsp
->tqd_disptcreates
.value
.ui64
= 0;
2296 tqsp
->tqd_totaltime
.value
.ui64
= 0;
2297 tqsp
->tqd_nalloc
.value
.ui64
= 0;
2298 tqsp
->tqd_nfree
.value
.ui64
= 0;
2300 for (; (b
!= NULL
) && (bid
< tq
->tq_nbuckets
); b
++, bid
++) {
2301 tqsp
->tqd_hits
.value
.ui64
+= b
->tqbucket_stat
.tqs_hits
;
2302 tqsp
->tqd_misses
.value
.ui64
+= b
->tqbucket_stat
.tqs_misses
;
2303 tqsp
->tqd_overflows
.value
.ui64
+= b
->tqbucket_stat
.tqs_overflow
;
2304 tqsp
->tqd_tcreates
.value
.ui64
+= b
->tqbucket_stat
.tqs_tcreates
;
2305 tqsp
->tqd_tdeaths
.value
.ui64
+= b
->tqbucket_stat
.tqs_tdeaths
;
2306 tqsp
->tqd_maxthreads
.value
.ui64
+=
2307 b
->tqbucket_stat
.tqs_maxthreads
;
2308 tqsp
->tqd_disptcreates
.value
.ui64
+=
2309 b
->tqbucket_stat
.tqs_disptcreates
;
2310 tqsp
->tqd_totaltime
.value
.ui64
+= b
->tqbucket_totaltime
;
2311 tqsp
->tqd_nalloc
.value
.ui64
+= b
->tqbucket_nalloc
;
2312 tqsp
->tqd_nfree
.value
.ui64
+= b
->tqbucket_nfree
;