Unleashed v1.4
[unleashed.git] / kernel / os / taskq.c
blob91f42722a66bbf1b2c0f84fc2a710990c0811425
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
28 * Copyright (c) 2017 by Delphix. All rights reserved.
29 * Copyright 2018, Joyent, Inc.
33 * Kernel task queues: general-purpose asynchronous task scheduling.
35 * A common problem in kernel programming is the need to schedule tasks
36 * to be performed later, by another thread. There are several reasons
37 * you may want or need to do this:
39 * (1) The task isn't time-critical, but your current code path is.
41 * (2) The task may require grabbing locks that you already hold.
43 * (3) The task may need to block (e.g. to wait for memory), but you
44 * cannot block in your current context.
46 * (4) Your code path can't complete because of some condition, but you can't
47 * sleep or fail, so you queue the task for later execution when condition
48 * disappears.
50 * (5) You just want a simple way to launch multiple tasks in parallel.
52 * Task queues provide such a facility. In its simplest form (used when
53 * performance is not a critical consideration) a task queue consists of a
54 * single list of tasks, together with one or more threads to service the
55 * list. There are some cases when this simple queue is not sufficient:
57 * (1) The task queues are very hot and there is a need to avoid data and lock
58 * contention over global resources.
60 * (2) Some tasks may depend on other tasks to complete, so they can't be put in
61 * the same list managed by the same thread.
63 * (3) Some tasks may block for a long time, and this should not block other
64 * tasks in the queue.
66 * To provide useful service in such cases we define a "dynamic task queue"
67 * which has an individual thread for each of the tasks. These threads are
68 * dynamically created as they are needed and destroyed when they are not in
69 * use. The API for managing task pools is the same as for managing task queues
70 * with the exception of a taskq creation flag TASKQ_DYNAMIC which tells that
71 * dynamic task pool behavior is desired.
73 * Dynamic task queues may also place tasks in the normal queue (called "backing
74 * queue") when task pool runs out of resources. Users of task queues may
75 * disallow such queued scheduling by specifying TQ_NOQUEUE in the dispatch
76 * flags.
78 * The backing task queue is also used for scheduling internal tasks needed for
79 * dynamic task queue maintenance.
81 * INTERFACES ==================================================================
83 * taskq_t *taskq_create(name, nthreads, pri, minalloc, maxalloc, flags);
85 * Create a taskq with specified properties.
86 * Possible 'flags':
88 * TASKQ_DYNAMIC: Create task pool for task management. If this flag is
89 * specified, 'nthreads' specifies the maximum number of threads in
90 * the task queue. Task execution order for dynamic task queues is
91 * not predictable.
93 * If this flag is not specified (default case) a
94 * single-list task queue is created with 'nthreads' threads
95 * servicing it. Entries in this queue are managed by
96 * taskq_ent_alloc() and taskq_ent_free() which try to keep the
97 * task population between 'minalloc' and 'maxalloc', but the
98 * latter limit is only advisory for TQ_SLEEP dispatches and the
99 * former limit is only advisory for TQ_NOALLOC dispatches. If
100 * TASKQ_PREPOPULATE is set in 'flags', the taskq will be
101 * prepopulated with 'minalloc' task structures.
103 * Since non-DYNAMIC taskqs are queues, tasks are guaranteed to be
104 * executed in the order they are scheduled if nthreads == 1.
105 * If nthreads > 1, task execution order is not predictable.
107 * TASKQ_PREPOPULATE: Prepopulate task queue with threads.
108 * Also prepopulate the task queue with 'minalloc' task structures.
110 * TASKQ_THREADS_CPU_PCT: This flag specifies that 'nthreads' should be
111 * interpreted as a percentage of the # of online CPUs on the
112 * system. The taskq subsystem will automatically adjust the
113 * number of threads in the taskq in response to CPU online
114 * and offline events, to keep the ratio. nthreads must be in
115 * the range [0,100].
117 * The calculation used is:
119 * MAX((ncpus_online * percentage)/100, 1)
121 * This flag is not supported for DYNAMIC task queues.
122 * This flag is not compatible with TASKQ_CPR_SAFE.
124 * TASKQ_CPR_SAFE: This flag specifies that users of the task queue will
125 * use their own protocol for handling CPR issues. This flag is not
126 * supported for DYNAMIC task queues. This flag is not compatible
127 * with TASKQ_THREADS_CPU_PCT.
129 * The 'pri' field specifies the default priority for the threads that
130 * service all scheduled tasks.
132 * taskq_t *taskq_create_instance(name, instance, nthreads, pri, minalloc,
133 * maxalloc, flags);
135 * Like taskq_create(), but takes an instance number (or -1 to indicate
136 * no instance).
138 * taskq_t *taskq_create_proc(name, nthreads, pri, minalloc, maxalloc, proc,
139 * flags);
141 * Like taskq_create(), but creates the taskq threads in the specified
142 * system process. If proc != &p0, this must be called from a thread
143 * in that process.
145 * taskq_t *taskq_create_sysdc(name, nthreads, minalloc, maxalloc, proc,
146 * dc, flags);
148 * Like taskq_create_proc(), but the taskq threads will use the
149 * System Duty Cycle (SDC) scheduling class with a duty cycle of dc.
151 * void taskq_destroy(tap):
153 * Waits for any scheduled tasks to complete, then destroys the taskq.
154 * Caller should guarantee that no new tasks are scheduled in the closing
155 * taskq.
157 * taskqid_t taskq_dispatch(tq, func, arg, flags):
159 * Dispatches the task "func(arg)" to taskq. The 'flags' indicates whether
160 * the caller is willing to block for memory. The function returns an
161 * opaque value which is zero iff dispatch fails. If flags is TQ_NOSLEEP
162 * or TQ_NOALLOC and the task can't be dispatched, taskq_dispatch() fails
163 * and returns (taskqid_t)0.
165 * ASSUMES: func != NULL.
167 * Possible flags:
168 * TQ_NOSLEEP: Do not wait for resources; may fail.
170 * TQ_NOALLOC: Do not allocate memory; may fail. May only be used with
171 * non-dynamic task queues.
173 * TQ_NOQUEUE: Do not enqueue a task if it can't dispatch it due to
174 * lack of available resources and fail. If this flag is not
175 * set, and the task pool is exhausted, the task may be scheduled
176 * in the backing queue. This flag may ONLY be used with dynamic
177 * task queues.
179 * NOTE: This flag should always be used when a task queue is used
180 * for tasks that may depend on each other for completion.
181 * Enqueueing dependent tasks may create deadlocks.
183 * TQ_SLEEP: May block waiting for resources. May still fail for
184 * dynamic task queues if TQ_NOQUEUE is also specified, otherwise
185 * always succeed.
187 * TQ_FRONT: Puts the new task at the front of the queue. Be careful.
189 * NOTE: Dynamic task queues are much more likely to fail in
190 * taskq_dispatch() (especially if TQ_NOQUEUE was specified), so it
191 * is important to have backup strategies handling such failures.
193 * void taskq_dispatch_ent(tq, func, arg, flags, tqent)
195 * This is a light-weight form of taskq_dispatch(), that uses a
196 * preallocated taskq_ent_t structure for scheduling. As a
197 * result, it does not perform allocations and cannot ever fail.
198 * Note especially that it cannot be used with TASKQ_DYNAMIC
199 * taskqs. The memory for the tqent must not be modified or used
200 * until the function (func) is called. (However, func itself
201 * may safely modify or free this memory, once it is called.)
202 * Note that the taskq framework will NOT free this memory.
204 * boolean_t taskq_empty(tq)
206 * Queries if there are tasks pending on the queue.
208 * void taskq_wait(tq):
210 * Waits for all previously scheduled tasks to complete.
212 * NOTE: It does not stop any new task dispatches.
213 * Do NOT call taskq_wait() from a task: it will cause deadlock.
215 * void taskq_suspend(tq)
217 * Suspend all task execution. Tasks already scheduled for a dynamic task
218 * queue will still be executed, but all new scheduled tasks will be
219 * suspended until taskq_resume() is called.
221 * int taskq_suspended(tq)
223 * Returns 1 if taskq is suspended and 0 otherwise. It is intended to
224 * ASSERT that the task queue is suspended.
226 * void taskq_resume(tq)
228 * Resume task queue execution.
230 * int taskq_member(tq, thread)
232 * Returns 1 if 'thread' belongs to taskq 'tq' and 0 otherwise. The
233 * intended use is to ASSERT that a given function is called in taskq
234 * context only.
236 * system_taskq
238 * Global system-wide dynamic task queue for common uses. It may be used by
239 * any subsystem that needs to schedule tasks and does not need to manage
240 * its own task queues. It is initialized quite early during system boot.
242 * IMPLEMENTATION ==============================================================
244 * This is schematic representation of the task queue structures.
246 * taskq:
247 * +-------------+
248 * | tq_lock | +---< taskq_ent_free()
249 * +-------------+ |
250 * |... | | tqent: tqent:
251 * +-------------+ | +------------+ +------------+
252 * | tq_freelist |-->| tqent_next |--> ... ->| tqent_next |
253 * +-------------+ +------------+ +------------+
254 * |... | | ... | | ... |
255 * +-------------+ +------------+ +------------+
256 * | tq_task | |
257 * | | +-------------->taskq_ent_alloc()
258 * +--------------------------------------------------------------------------+
259 * | | | tqent tqent |
260 * | +---------------------+ +--> +------------+ +--> +------------+ |
261 * | | ... | | | func, arg | | | func, arg | |
262 * +>+---------------------+ <---|-+ +------------+ <---|-+ +------------+ |
263 * | tq_taskq.tqent_next | ----+ | | tqent_next | --->+ | | tqent_next |--+
264 * +---------------------+ | +------------+ ^ | +------------+
265 * +-| tq_task.tqent_prev | +--| tqent_prev | | +--| tqent_prev | ^
266 * | +---------------------+ +------------+ | +------------+ |
267 * | |... | | ... | | | ... | |
268 * | +---------------------+ +------------+ | +------------+ |
269 * | ^ | |
270 * | | | |
271 * +--------------------------------------+--------------+ TQ_APPEND() -+
272 * | | |
273 * |... | taskq_thread()-----+
274 * +-------------+
275 * | tq_buckets |--+-------> [ NULL ] (for regular task queues)
276 * +-------------+ |
277 * | DYNAMIC TASK QUEUES:
279 * +-> taskq_bucket[nCPU] taskq_bucket_dispatch()
280 * +-------------------+ ^
281 * +--->| tqbucket_lock | |
282 * | +-------------------+ +--------+ +--------+
283 * | | tqbucket_freelist |-->| tqent |-->...| tqent | ^
284 * | +-------------------+<--+--------+<--...+--------+ |
285 * | | ... | | thread | | thread | |
286 * | +-------------------+ +--------+ +--------+ |
287 * | +-------------------+ |
288 * taskq_dispatch()--+--->| tqbucket_lock | TQ_APPEND()------+
289 * TQ_HASH() | +-------------------+ +--------+ +--------+
290 * | | tqbucket_freelist |-->| tqent |-->...| tqent |
291 * | +-------------------+<--+--------+<--...+--------+
292 * | | ... | | thread | | thread |
293 * | +-------------------+ +--------+ +--------+
294 * +---> ...
297 * Task queues use tq_task field to link new entry in the queue. The queue is a
298 * circular doubly-linked list. Entries are put in the end of the list with
299 * TQ_APPEND() and processed from the front of the list by taskq_thread() in
300 * FIFO order. Task queue entries are cached in the free list managed by
301 * taskq_ent_alloc() and taskq_ent_free() functions.
303 * All threads used by task queues mark t_taskq field of the thread to
304 * point to the task queue.
306 * Taskq Thread Management -----------------------------------------------------
308 * Taskq's non-dynamic threads are managed with several variables and flags:
310 * * tq_nthreads - The number of threads in taskq_thread() for the
311 * taskq.
313 * * tq_active - The number of threads not waiting on a CV in
314 * taskq_thread(); includes newly created threads
315 * not yet counted in tq_nthreads.
317 * * tq_nthreads_target
318 * - The number of threads desired for the taskq.
320 * * tq_flags & TASKQ_CHANGING
321 * - Indicates that tq_nthreads != tq_nthreads_target.
323 * * tq_flags & TASKQ_THREAD_CREATED
324 * - Indicates that a thread is being created in the taskq.
326 * During creation, tq_nthreads and tq_active are set to 0, and
327 * tq_nthreads_target is set to the number of threads desired. The
328 * TASKQ_CHANGING flag is set, and taskq_thread_create() is called to
329 * create the first thread. taskq_thread_create() increments tq_active,
330 * sets TASKQ_THREAD_CREATED, and creates the new thread.
332 * Each thread starts in taskq_thread(), clears the TASKQ_THREAD_CREATED
333 * flag, and increments tq_nthreads. It stores the new value of
334 * tq_nthreads as its "thread_id", and stores its thread pointer in the
335 * tq_threadlist at the (thread_id - 1). We keep the thread_id space
336 * densely packed by requiring that only the largest thread_id can exit during
337 * normal adjustment. The exception is during the destruction of the
338 * taskq; once tq_nthreads_target is set to zero, no new threads will be created
339 * for the taskq queue, so every thread can exit without any ordering being
340 * necessary.
342 * Threads will only process work if their thread id is <= tq_nthreads_target.
344 * When TASKQ_CHANGING is set, threads will check the current thread target
345 * whenever they wake up, and do whatever they can to apply its effects.
347 * TASKQ_THREAD_CPU_PCT --------------------------------------------------------
349 * When a taskq is created with TASKQ_THREAD_CPU_PCT, we store their requested
350 * percentage in tq_threads_ncpus_pct, start them off with the correct thread
351 * target, and add them to the taskq_cpupct_list for later adjustment.
353 * We register taskq_cpu_setup() to be called whenever a CPU changes state. It
354 * walks the list of TASKQ_THREAD_CPU_PCT taskqs, adjusts their nthread_target
355 * if need be, and wakes up all of the threads to process the change.
357 * Dynamic Task Queues Implementation ------------------------------------------
359 * For a dynamic task queues there is a 1-to-1 mapping between a thread and
360 * taskq_ent_structure. Each entry is serviced by its own thread and each thread
361 * is controlled by a single entry.
363 * Entries are distributed over a set of buckets. To avoid using modulo
364 * arithmetics the number of buckets is 2^n and is determined as the nearest
365 * power of two roundown of the number of CPUs in the system. Tunable
366 * variable 'taskq_maxbuckets' limits the maximum number of buckets. Each entry
367 * is attached to a bucket for its lifetime and can't migrate to other buckets.
369 * Entries that have scheduled tasks are not placed in any list. The dispatch
370 * function sets their "func" and "arg" fields and signals the corresponding
371 * thread to execute the task. Once the thread executes the task it clears the
372 * "func" field and places an entry on the bucket cache of free entries pointed
373 * by "tqbucket_freelist" field. ALL entries on the free list should have "func"
374 * field equal to NULL. The free list is a circular doubly-linked list identical
375 * in structure to the tq_task list above, but entries are taken from it in LIFO
376 * order - the last freed entry is the first to be allocated. The
377 * taskq_bucket_dispatch() function gets the most recently used entry from the
378 * free list, sets its "func" and "arg" fields and signals a worker thread.
380 * After executing each task a per-entry thread taskq_d_thread() places its
381 * entry on the bucket free list and goes to a timed sleep. If it wakes up
382 * without getting new task it removes the entry from the free list and destroys
383 * itself. The thread sleep time is controlled by a tunable variable
384 * `taskq_thread_timeout'.
386 * There are various statistics kept in the bucket which allows for later
387 * analysis of taskq usage patterns. Also, a global copy of taskq creation and
388 * death statistics is kept in the global taskq data structure. Since thread
389 * creation and death happen rarely, updating such global data does not present
390 * a performance problem.
392 * NOTE: Threads are not bound to any CPU and there is absolutely no association
393 * between the bucket and actual thread CPU, so buckets are used only to
394 * split resources and reduce resource contention. Having threads attached
395 * to the CPU denoted by a bucket may reduce number of times the job
396 * switches between CPUs.
398 * Current algorithm creates a thread whenever a bucket has no free
399 * entries. It would be nice to know how many threads are in the running
400 * state and don't create threads if all CPUs are busy with existing
401 * tasks, but it is unclear how such strategy can be implemented.
403 * Currently buckets are created statically as an array attached to task
404 * queue. On some system with nCPUs < max_ncpus it may waste system
405 * memory. One solution may be allocation of buckets when they are first
406 * touched, but it is not clear how useful it is.
408 * SUSPEND/RESUME implementation -----------------------------------------------
410 * Before executing a task taskq_thread() (executing non-dynamic task
411 * queues) obtains taskq's thread lock as a reader. The taskq_suspend()
412 * function gets the same lock as a writer blocking all non-dynamic task
413 * execution. The taskq_resume() function releases the lock allowing
414 * taskq_thread to continue execution.
416 * For dynamic task queues, each bucket is marked as TQBUCKET_SUSPEND by
417 * taskq_suspend() function. After that taskq_bucket_dispatch() always
418 * fails, so that taskq_dispatch() will either enqueue tasks for a
419 * suspended backing queue or fail if TQ_NOQUEUE is specified in dispatch
420 * flags.
422 * NOTE: taskq_suspend() does not immediately block any tasks already
423 * scheduled for dynamic task queues. It only suspends new tasks
424 * scheduled after taskq_suspend() was called.
426 * taskq_member() function works by comparing a thread t_taskq pointer with
427 * the passed thread pointer.
429 * LOCKS and LOCK Hierarchy ----------------------------------------------------
431 * There are three locks used in task queues:
433 * 1) The taskq_t's tq_lock, protecting global task queue state.
435 * 2) Each per-CPU bucket has a lock for bucket management.
437 * 3) The global taskq_cpupct_lock, which protects the list of
438 * TASKQ_THREADS_CPU_PCT taskqs.
440 * If both (1) and (2) are needed, tq_lock should be taken *after* the bucket
441 * lock.
443 * If both (1) and (3) are needed, tq_lock should be taken *after*
444 * taskq_cpupct_lock.
446 * DEBUG FACILITIES ------------------------------------------------------------
448 * For DEBUG kernels it is possible to induce random failures to
449 * taskq_dispatch() function when it is given TQ_NOSLEEP argument. The value of
450 * taskq_dmtbf and taskq_smtbf tunables control the mean time between induced
451 * failures for dynamic and static task queues respectively.
453 * Setting TASKQ_STATISTIC to 0 will disable per-bucket statistics.
455 * TUNABLES --------------------------------------------------------------------
457 * system_taskq_size - Size of the global system_taskq.
458 * This value is multiplied by nCPUs to determine
459 * actual size.
460 * Default value: 64
462 * taskq_minimum_nthreads_max
463 * - Minimum size of the thread list for a taskq.
464 * Useful for testing different thread pool
465 * sizes by overwriting tq_nthreads_target.
467 * taskq_thread_timeout - Maximum idle time for taskq_d_thread()
468 * Default value: 5 minutes
470 * taskq_maxbuckets - Maximum number of buckets in any task queue
471 * Default value: 128
473 * taskq_search_depth - Maximum # of buckets searched for a free entry
474 * Default value: 4
476 * taskq_dmtbf - Mean time between induced dispatch failures
477 * for dynamic task queues.
478 * Default value: UINT_MAX (no induced failures)
480 * taskq_smtbf - Mean time between induced dispatch failures
481 * for static task queues.
482 * Default value: UINT_MAX (no induced failures)
484 * CONDITIONAL compilation -----------------------------------------------------
486 * TASKQ_STATISTIC - If set will enable bucket statistic (default).
490 #include <sys/taskq_impl.h>
491 #include <sys/thread.h>
492 #include <sys/proc.h>
493 #include <sys/kmem.h>
494 #include <sys/vmem.h>
495 #include <sys/callb.h>
496 #include <sys/class.h>
497 #include <sys/systm.h>
498 #include <sys/cmn_err.h>
499 #include <sys/debug.h>
500 #include <sys/vmsystm.h> /* For throttlefree */
501 #include <sys/sysmacros.h>
502 #include <sys/cpuvar.h>
503 #include <sys/cpupart.h>
504 #include <sys/sdt.h>
505 #include <sys/sysdc.h>
506 #include <sys/note.h>
508 static kmem_cache_t *taskq_ent_cache, *taskq_cache;
511 * Pseudo instance numbers for taskqs without explicitly provided instance.
513 static vmem_t *taskq_id_arena;
515 /* Global system task queue for common use */
516 taskq_t *system_taskq;
519 * Maximum number of entries in global system taskq is
520 * system_taskq_size * max_ncpus
522 #define SYSTEM_TASKQ_SIZE 64
523 int system_taskq_size = SYSTEM_TASKQ_SIZE;
526 * Minimum size for tq_nthreads_max; useful for those who want to play around
527 * with increasing a taskq's tq_nthreads_target.
529 int taskq_minimum_nthreads_max = 1;
532 * We want to ensure that when taskq_create() returns, there is at least
533 * one thread ready to handle requests. To guarantee this, we have to wait
534 * for the second thread, since the first one cannot process requests until
535 * the second thread has been created.
537 #define TASKQ_CREATE_ACTIVE_THREADS 2
539 /* Maximum percentage allowed for TASKQ_THREADS_CPU_PCT */
540 #define TASKQ_CPUPCT_MAX_PERCENT 1000
541 int taskq_cpupct_max_percent = TASKQ_CPUPCT_MAX_PERCENT;
544 * Dynamic task queue threads that don't get any work within
545 * taskq_thread_timeout destroy themselves
547 #define TASKQ_THREAD_TIMEOUT (60 * 5)
548 int taskq_thread_timeout = TASKQ_THREAD_TIMEOUT;
550 #define TASKQ_MAXBUCKETS 128
551 int taskq_maxbuckets = TASKQ_MAXBUCKETS;
554 * When a bucket has no available entries another buckets are tried.
555 * taskq_search_depth parameter limits the amount of buckets that we search
556 * before failing. This is mostly useful in systems with many CPUs where we may
557 * spend too much time scanning busy buckets.
559 #define TASKQ_SEARCH_DEPTH 4
560 int taskq_search_depth = TASKQ_SEARCH_DEPTH;
563 * Hashing function: mix various bits of x. May be pretty much anything.
565 #define TQ_HASH(x) ((x) ^ ((x) >> 11) ^ ((x) >> 17) ^ ((x) ^ 27))
568 * We do not create any new threads when the system is low on memory and start
569 * throttling memory allocations. The following macro tries to estimate such
570 * condition.
572 #define ENOUGH_MEMORY() (freemem > throttlefree)
575 * Static functions.
577 static taskq_t *taskq_create_common(const char *, int, int, pri_t, int,
578 int, proc_t *, uint_t, uint_t);
579 static void taskq_thread(void *);
580 static void taskq_d_thread(taskq_ent_t *);
581 static void taskq_bucket_extend(void *);
582 static int taskq_constructor(void *, void *, int);
583 static void taskq_destructor(void *, void *);
584 static int taskq_ent_constructor(void *, void *, int);
585 static void taskq_ent_destructor(void *, void *);
586 static taskq_ent_t *taskq_ent_alloc(taskq_t *, int);
587 static void taskq_ent_free(taskq_t *, taskq_ent_t *);
588 static int taskq_ent_exists(taskq_t *, task_func_t, void *);
589 static taskq_ent_t *taskq_bucket_dispatch(taskq_bucket_t *, task_func_t,
590 void *);
593 * Task queues kstats.
595 struct taskq_kstat {
596 kstat_named_t tq_pid;
597 kstat_named_t tq_tasks;
598 kstat_named_t tq_executed;
599 kstat_named_t tq_maxtasks;
600 kstat_named_t tq_totaltime;
601 kstat_named_t tq_nalloc;
602 kstat_named_t tq_nactive;
603 kstat_named_t tq_pri;
604 kstat_named_t tq_nthreads;
605 kstat_named_t tq_nomem;
606 } taskq_kstat = {
607 { "pid", KSTAT_DATA_UINT64 },
608 { "tasks", KSTAT_DATA_UINT64 },
609 { "executed", KSTAT_DATA_UINT64 },
610 { "maxtasks", KSTAT_DATA_UINT64 },
611 { "totaltime", KSTAT_DATA_UINT64 },
612 { "nalloc", KSTAT_DATA_UINT64 },
613 { "nactive", KSTAT_DATA_UINT64 },
614 { "priority", KSTAT_DATA_UINT64 },
615 { "threads", KSTAT_DATA_UINT64 },
616 { "nomem", KSTAT_DATA_UINT64 },
619 struct taskq_d_kstat {
620 kstat_named_t tqd_pri;
621 kstat_named_t tqd_btasks;
622 kstat_named_t tqd_bexecuted;
623 kstat_named_t tqd_bmaxtasks;
624 kstat_named_t tqd_bnalloc;
625 kstat_named_t tqd_bnactive;
626 kstat_named_t tqd_btotaltime;
627 kstat_named_t tqd_hits;
628 kstat_named_t tqd_misses;
629 kstat_named_t tqd_overflows;
630 kstat_named_t tqd_tcreates;
631 kstat_named_t tqd_tdeaths;
632 kstat_named_t tqd_maxthreads;
633 kstat_named_t tqd_nomem;
634 kstat_named_t tqd_disptcreates;
635 kstat_named_t tqd_totaltime;
636 kstat_named_t tqd_nalloc;
637 kstat_named_t tqd_nfree;
638 } taskq_d_kstat = {
639 { "priority", KSTAT_DATA_UINT64 },
640 { "btasks", KSTAT_DATA_UINT64 },
641 { "bexecuted", KSTAT_DATA_UINT64 },
642 { "bmaxtasks", KSTAT_DATA_UINT64 },
643 { "bnalloc", KSTAT_DATA_UINT64 },
644 { "bnactive", KSTAT_DATA_UINT64 },
645 { "btotaltime", KSTAT_DATA_UINT64 },
646 { "hits", KSTAT_DATA_UINT64 },
647 { "misses", KSTAT_DATA_UINT64 },
648 { "overflows", KSTAT_DATA_UINT64 },
649 { "tcreates", KSTAT_DATA_UINT64 },
650 { "tdeaths", KSTAT_DATA_UINT64 },
651 { "maxthreads", KSTAT_DATA_UINT64 },
652 { "nomem", KSTAT_DATA_UINT64 },
653 { "disptcreates", KSTAT_DATA_UINT64 },
654 { "totaltime", KSTAT_DATA_UINT64 },
655 { "nalloc", KSTAT_DATA_UINT64 },
656 { "nfree", KSTAT_DATA_UINT64 },
659 static kmutex_t taskq_kstat_lock;
660 static kmutex_t taskq_d_kstat_lock;
661 static int taskq_kstat_update(kstat_t *, int);
662 static int taskq_d_kstat_update(kstat_t *, int);
665 * List of all TASKQ_THREADS_CPU_PCT taskqs.
667 static list_t taskq_cpupct_list; /* protected by cpu_lock */
670 * Collect per-bucket statistic when TASKQ_STATISTIC is defined.
672 #define TASKQ_STATISTIC 1
674 #if TASKQ_STATISTIC
675 #define TQ_STAT(b, x) b->tqbucket_stat.x++
676 #else
677 #define TQ_STAT(b, x)
678 #endif
681 * Random fault injection.
683 uint_t taskq_random;
684 uint_t taskq_dmtbf = UINT_MAX; /* mean time between injected failures */
685 uint_t taskq_smtbf = UINT_MAX; /* mean time between injected failures */
688 * TQ_NOSLEEP dispatches on dynamic task queues are always allowed to fail.
690 * TQ_NOSLEEP dispatches on static task queues can't arbitrarily fail because
691 * they could prepopulate the cache and make sure that they do not use more
692 * then minalloc entries. So, fault injection in this case insures that
693 * either TASKQ_PREPOPULATE is not set or there are more entries allocated
694 * than is specified by minalloc. TQ_NOALLOC dispatches are always allowed
695 * to fail, but for simplicity we treat them identically to TQ_NOSLEEP
696 * dispatches.
698 #ifdef DEBUG
699 #define TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flag) \
700 taskq_random = (taskq_random * 2416 + 374441) % 1771875;\
701 if ((flag & TQ_NOSLEEP) && \
702 taskq_random < 1771875 / taskq_dmtbf) { \
703 return ((uintptr_t)NULL); \
706 #define TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flag) \
707 taskq_random = (taskq_random * 2416 + 374441) % 1771875;\
708 if ((flag & (TQ_NOSLEEP | TQ_NOALLOC)) && \
709 (!(tq->tq_flags & TASKQ_PREPOPULATE) || \
710 (tq->tq_nalloc > tq->tq_minalloc)) && \
711 (taskq_random < (1771875 / taskq_smtbf))) { \
712 mutex_exit(&tq->tq_lock); \
713 return ((uintptr_t)NULL); \
715 #else
716 #define TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flag)
717 #define TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flag)
718 #endif
720 #define IS_EMPTY(l) (((l).tqent_prev == (l).tqent_next) && \
721 ((l).tqent_prev == &(l)))
724 * Append `tqe' in the end of the doubly-linked list denoted by l.
726 #define TQ_APPEND(l, tqe) { \
727 tqe->tqent_next = &l; \
728 tqe->tqent_prev = l.tqent_prev; \
729 tqe->tqent_next->tqent_prev = tqe; \
730 tqe->tqent_prev->tqent_next = tqe; \
733 * Prepend 'tqe' to the beginning of l
735 #define TQ_PREPEND(l, tqe) { \
736 tqe->tqent_next = l.tqent_next; \
737 tqe->tqent_prev = &l; \
738 tqe->tqent_next->tqent_prev = tqe; \
739 tqe->tqent_prev->tqent_next = tqe; \
743 * Schedule a task specified by func and arg into the task queue entry tqe.
745 #define TQ_DO_ENQUEUE(tq, tqe, func, arg, front) { \
746 ASSERT(MUTEX_HELD(&tq->tq_lock)); \
747 _NOTE(CONSTCOND) \
748 if (front) { \
749 TQ_PREPEND(tq->tq_task, tqe); \
750 } else { \
751 TQ_APPEND(tq->tq_task, tqe); \
753 tqe->tqent_func = (func); \
754 tqe->tqent_arg = (arg); \
755 tq->tq_tasks++; \
756 if (tq->tq_tasks - tq->tq_executed > tq->tq_maxtasks) \
757 tq->tq_maxtasks = tq->tq_tasks - tq->tq_executed; \
758 cv_signal(&tq->tq_dispatch_cv); \
759 DTRACE_PROBE2(taskq__enqueue, taskq_t *, tq, taskq_ent_t *, tqe); \
762 #define TQ_ENQUEUE(tq, tqe, func, arg) \
763 TQ_DO_ENQUEUE(tq, tqe, func, arg, 0)
765 #define TQ_ENQUEUE_FRONT(tq, tqe, func, arg) \
766 TQ_DO_ENQUEUE(tq, tqe, func, arg, 1)
769 * Do-nothing task which may be used to prepopulate thread caches.
771 /*ARGSUSED*/
772 void
773 nulltask(void *unused)
777 /*ARGSUSED*/
778 static int
779 taskq_constructor(void *buf, void *cdrarg, int kmflags)
781 taskq_t *tq = buf;
783 bzero(tq, sizeof (taskq_t));
785 mutex_init(&tq->tq_lock, NULL, MUTEX_DEFAULT, NULL);
786 rw_init(&tq->tq_threadlock, NULL, RW_DEFAULT, NULL);
787 cv_init(&tq->tq_dispatch_cv, NULL, CV_DEFAULT, NULL);
788 cv_init(&tq->tq_exit_cv, NULL, CV_DEFAULT, NULL);
789 cv_init(&tq->tq_wait_cv, NULL, CV_DEFAULT, NULL);
790 cv_init(&tq->tq_maxalloc_cv, NULL, CV_DEFAULT, NULL);
792 tq->tq_task.tqent_next = &tq->tq_task;
793 tq->tq_task.tqent_prev = &tq->tq_task;
795 return (0);
798 /*ARGSUSED*/
799 static void
800 taskq_destructor(void *buf, void *cdrarg)
802 taskq_t *tq = buf;
804 ASSERT(tq->tq_nthreads == 0);
805 ASSERT(tq->tq_buckets == NULL);
806 ASSERT(tq->tq_tcreates == 0);
807 ASSERT(tq->tq_tdeaths == 0);
809 mutex_destroy(&tq->tq_lock);
810 rw_destroy(&tq->tq_threadlock);
811 cv_destroy(&tq->tq_dispatch_cv);
812 cv_destroy(&tq->tq_exit_cv);
813 cv_destroy(&tq->tq_wait_cv);
814 cv_destroy(&tq->tq_maxalloc_cv);
817 /*ARGSUSED*/
818 static int
819 taskq_ent_constructor(void *buf, void *cdrarg, int kmflags)
821 taskq_ent_t *tqe = buf;
823 tqe->tqent_thread = NULL;
824 cv_init(&tqe->tqent_cv, NULL, CV_DEFAULT, NULL);
826 return (0);
829 /*ARGSUSED*/
830 static void
831 taskq_ent_destructor(void *buf, void *cdrarg)
833 taskq_ent_t *tqe = buf;
835 ASSERT(tqe->tqent_thread == NULL);
836 cv_destroy(&tqe->tqent_cv);
839 void
840 taskq_init(void)
842 taskq_ent_cache = kmem_cache_create("taskq_ent_cache",
843 sizeof (taskq_ent_t), 0, taskq_ent_constructor,
844 taskq_ent_destructor, NULL, NULL, NULL, 0);
845 taskq_cache = kmem_cache_create("taskq_cache", sizeof (taskq_t),
846 0, taskq_constructor, taskq_destructor, NULL, NULL, NULL, 0);
847 taskq_id_arena = vmem_create("taskq_id_arena",
848 (void *)1, INT32_MAX, 1, NULL, NULL, NULL, 0,
849 VM_SLEEP | VMC_IDENTIFIER);
851 list_create(&taskq_cpupct_list, sizeof (taskq_t),
852 offsetof(taskq_t, tq_cpupct_link));
855 static void
856 taskq_update_nthreads(taskq_t *tq, uint_t ncpus)
858 uint_t newtarget = TASKQ_THREADS_PCT(ncpus, tq->tq_threads_ncpus_pct);
860 ASSERT(MUTEX_HELD(&cpu_lock));
861 ASSERT(MUTEX_HELD(&tq->tq_lock));
863 /* We must be going from non-zero to non-zero; no exiting. */
864 ASSERT3U(tq->tq_nthreads_target, !=, 0);
865 ASSERT3U(newtarget, !=, 0);
867 ASSERT3U(newtarget, <=, tq->tq_nthreads_max);
868 if (newtarget != tq->tq_nthreads_target) {
869 tq->tq_flags |= TASKQ_CHANGING;
870 tq->tq_nthreads_target = newtarget;
871 cv_broadcast(&tq->tq_dispatch_cv);
872 cv_broadcast(&tq->tq_exit_cv);
876 /* called during task queue creation */
877 static void
878 taskq_cpupct_install(taskq_t *tq, cpupart_t *cpup)
880 ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
882 mutex_enter(&cpu_lock);
883 mutex_enter(&tq->tq_lock);
884 tq->tq_cpupart = cpup->cp_id;
885 taskq_update_nthreads(tq, cpup->cp_ncpus);
886 mutex_exit(&tq->tq_lock);
888 list_insert_tail(&taskq_cpupct_list, tq);
889 mutex_exit(&cpu_lock);
892 static void
893 taskq_cpupct_remove(taskq_t *tq)
895 ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
897 mutex_enter(&cpu_lock);
898 list_remove(&taskq_cpupct_list, tq);
899 mutex_exit(&cpu_lock);
902 /*ARGSUSED*/
903 static int
904 taskq_cpu_setup(cpu_setup_t what, int id, void *arg)
906 taskq_t *tq;
907 cpupart_t *cp = cpu[id]->cpu_part;
908 uint_t ncpus = cp->cp_ncpus;
910 ASSERT(MUTEX_HELD(&cpu_lock));
911 ASSERT(ncpus > 0);
913 switch (what) {
914 case CPU_OFF:
915 case CPU_CPUPART_OUT:
916 /* offlines are called *before* the cpu is offlined. */
917 if (ncpus > 1)
918 ncpus--;
919 break;
921 case CPU_ON:
922 case CPU_CPUPART_IN:
923 break;
925 default:
926 return (0); /* doesn't affect cpu count */
929 for (tq = list_head(&taskq_cpupct_list); tq != NULL;
930 tq = list_next(&taskq_cpupct_list, tq)) {
932 mutex_enter(&tq->tq_lock);
934 * If the taskq is part of the cpuset which is changing,
935 * update its nthreads_target.
937 if (tq->tq_cpupart == cp->cp_id) {
938 taskq_update_nthreads(tq, ncpus);
940 mutex_exit(&tq->tq_lock);
942 return (0);
945 void
946 taskq_mp_init(void)
948 mutex_enter(&cpu_lock);
949 register_cpu_setup_func(taskq_cpu_setup, NULL);
951 * Make sure we're up to date. At this point in boot, there is only
952 * one processor set, so we only have to update the current CPU.
954 (void) taskq_cpu_setup(CPU_ON, CPU->cpu_id, NULL);
955 mutex_exit(&cpu_lock);
959 * Create global system dynamic task queue.
961 void
962 system_taskq_init(void)
964 system_taskq = taskq_create_common("system_taskq", 0,
965 system_taskq_size * max_ncpus, minclsyspri, 4, 512, &p0, 0,
966 TASKQ_DYNAMIC | TASKQ_PREPOPULATE);
970 * taskq_ent_alloc()
972 * Allocates a new taskq_ent_t structure either from the free list or from the
973 * cache. Returns NULL if it can't be allocated.
975 * Assumes: tq->tq_lock is held.
977 static taskq_ent_t *
978 taskq_ent_alloc(taskq_t *tq, int flags)
980 int kmflags = (flags & TQ_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
981 taskq_ent_t *tqe;
982 clock_t wait_time;
983 clock_t wait_rv;
985 ASSERT(MUTEX_HELD(&tq->tq_lock));
988 * TQ_NOALLOC allocations are allowed to use the freelist, even if
989 * we are below tq_minalloc.
991 again: if ((tqe = tq->tq_freelist) != NULL &&
992 ((flags & TQ_NOALLOC) || tq->tq_nalloc >= tq->tq_minalloc)) {
993 tq->tq_freelist = tqe->tqent_next;
994 } else {
995 if (flags & TQ_NOALLOC)
996 return (NULL);
998 if (tq->tq_nalloc >= tq->tq_maxalloc) {
999 if (kmflags & KM_NOSLEEP)
1000 return (NULL);
1003 * We don't want to exceed tq_maxalloc, but we can't
1004 * wait for other tasks to complete (and thus free up
1005 * task structures) without risking deadlock with
1006 * the caller. So, we just delay for one second
1007 * to throttle the allocation rate. If we have tasks
1008 * complete before one second timeout expires then
1009 * taskq_ent_free will signal us and we will
1010 * immediately retry the allocation (reap free).
1012 wait_time = ddi_get_lbolt() + hz;
1013 while (tq->tq_freelist == NULL) {
1014 tq->tq_maxalloc_wait++;
1015 wait_rv = cv_timedwait(&tq->tq_maxalloc_cv,
1016 &tq->tq_lock, wait_time);
1017 tq->tq_maxalloc_wait--;
1018 if (wait_rv == -1)
1019 break;
1021 if (tq->tq_freelist)
1022 goto again; /* reap freelist */
1025 mutex_exit(&tq->tq_lock);
1027 tqe = kmem_cache_alloc(taskq_ent_cache, kmflags);
1029 mutex_enter(&tq->tq_lock);
1030 if (tqe != NULL)
1031 tq->tq_nalloc++;
1033 return (tqe);
1037 * taskq_ent_free()
1039 * Free taskq_ent_t structure by either putting it on the free list or freeing
1040 * it to the cache.
1042 * Assumes: tq->tq_lock is held.
1044 static void
1045 taskq_ent_free(taskq_t *tq, taskq_ent_t *tqe)
1047 ASSERT(MUTEX_HELD(&tq->tq_lock));
1049 if (tq->tq_nalloc <= tq->tq_minalloc) {
1050 tqe->tqent_next = tq->tq_freelist;
1051 tq->tq_freelist = tqe;
1052 } else {
1053 tq->tq_nalloc--;
1054 mutex_exit(&tq->tq_lock);
1055 kmem_cache_free(taskq_ent_cache, tqe);
1056 mutex_enter(&tq->tq_lock);
1059 if (tq->tq_maxalloc_wait)
1060 cv_signal(&tq->tq_maxalloc_cv);
1064 * taskq_ent_exists()
1066 * Return 1 if taskq already has entry for calling 'func(arg)'.
1068 * Assumes: tq->tq_lock is held.
1070 static int
1071 taskq_ent_exists(taskq_t *tq, task_func_t func, void *arg)
1073 taskq_ent_t *tqe;
1075 ASSERT(MUTEX_HELD(&tq->tq_lock));
1077 for (tqe = tq->tq_task.tqent_next; tqe != &tq->tq_task;
1078 tqe = tqe->tqent_next)
1079 if ((tqe->tqent_func == func) && (tqe->tqent_arg == arg))
1080 return (1);
1081 return (0);
1085 * Dispatch a task "func(arg)" to a free entry of bucket b.
1087 * Assumes: no bucket locks is held.
1089 * Returns: a pointer to an entry if dispatch was successful.
1090 * NULL if there are no free entries or if the bucket is suspended.
1092 static taskq_ent_t *
1093 taskq_bucket_dispatch(taskq_bucket_t *b, task_func_t func, void *arg)
1095 taskq_ent_t *tqe;
1097 ASSERT(MUTEX_NOT_HELD(&b->tqbucket_lock));
1098 ASSERT(func != NULL);
1100 mutex_enter(&b->tqbucket_lock);
1102 ASSERT(b->tqbucket_nfree != 0 || IS_EMPTY(b->tqbucket_freelist));
1103 ASSERT(b->tqbucket_nfree == 0 || !IS_EMPTY(b->tqbucket_freelist));
1106 * Get en entry from the freelist if there is one.
1107 * Schedule task into the entry.
1109 if ((b->tqbucket_nfree != 0) &&
1110 !(b->tqbucket_flags & TQBUCKET_SUSPEND)) {
1111 tqe = b->tqbucket_freelist.tqent_prev;
1113 ASSERT(tqe != &b->tqbucket_freelist);
1114 ASSERT(tqe->tqent_thread != NULL);
1116 tqe->tqent_prev->tqent_next = tqe->tqent_next;
1117 tqe->tqent_next->tqent_prev = tqe->tqent_prev;
1118 b->tqbucket_nalloc++;
1119 b->tqbucket_nfree--;
1120 tqe->tqent_func = func;
1121 tqe->tqent_arg = arg;
1122 TQ_STAT(b, tqs_hits);
1123 cv_signal(&tqe->tqent_cv);
1124 DTRACE_PROBE2(taskq__d__enqueue, taskq_bucket_t *, b,
1125 taskq_ent_t *, tqe);
1126 } else {
1127 tqe = NULL;
1128 TQ_STAT(b, tqs_misses);
1130 mutex_exit(&b->tqbucket_lock);
1131 return (tqe);
1135 * Dispatch a task.
1137 * Assumes: func != NULL
1139 * Returns: NULL if dispatch failed.
1140 * non-NULL if task dispatched successfully.
1141 * Actual return value is the pointer to taskq entry that was used to
1142 * dispatch a task. This is useful for debugging.
1144 taskqid_t
1145 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
1147 taskq_bucket_t *bucket = NULL; /* Which bucket needs extension */
1148 taskq_ent_t *tqe = NULL;
1149 taskq_ent_t *tqe1;
1150 uint_t bsize;
1152 ASSERT(tq != NULL);
1153 ASSERT(func != NULL);
1155 if (!(tq->tq_flags & TASKQ_DYNAMIC)) {
1157 * TQ_NOQUEUE flag can't be used with non-dynamic task queues.
1159 ASSERT(!(flags & TQ_NOQUEUE));
1161 * Enqueue the task to the underlying queue.
1163 mutex_enter(&tq->tq_lock);
1165 TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flags);
1167 if ((tqe = taskq_ent_alloc(tq, flags)) == NULL) {
1168 tq->tq_nomem++;
1169 mutex_exit(&tq->tq_lock);
1170 return ((uintptr_t)NULL);
1172 /* Make sure we start without any flags */
1173 tqe->tqent_un.tqent_flags = 0;
1175 if (flags & TQ_FRONT) {
1176 TQ_ENQUEUE_FRONT(tq, tqe, func, arg);
1177 } else {
1178 TQ_ENQUEUE(tq, tqe, func, arg);
1180 mutex_exit(&tq->tq_lock);
1181 return ((taskqid_t)tqe);
1185 * Dynamic taskq dispatching.
1187 ASSERT(!(flags & (TQ_NOALLOC | TQ_FRONT)));
1188 TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flags);
1190 bsize = tq->tq_nbuckets;
1192 if (bsize == 1) {
1194 * In a single-CPU case there is only one bucket, so get
1195 * entry directly from there.
1197 if ((tqe = taskq_bucket_dispatch(tq->tq_buckets, func, arg))
1198 != NULL)
1199 return ((taskqid_t)tqe); /* Fastpath */
1200 bucket = tq->tq_buckets;
1201 } else {
1202 int loopcount;
1203 taskq_bucket_t *b;
1204 uintptr_t h = ((uintptr_t)CPU + (uintptr_t)arg) >> 3;
1206 h = TQ_HASH(h);
1209 * The 'bucket' points to the original bucket that we hit. If we
1210 * can't allocate from it, we search other buckets, but only
1211 * extend this one.
1213 b = &tq->tq_buckets[h & (bsize - 1)];
1214 ASSERT(b->tqbucket_taskq == tq); /* Sanity check */
1217 * Do a quick check before grabbing the lock. If the bucket does
1218 * not have free entries now, chances are very small that it
1219 * will after we take the lock, so we just skip it.
1221 if (b->tqbucket_nfree != 0) {
1222 if ((tqe = taskq_bucket_dispatch(b, func, arg)) != NULL)
1223 return ((taskqid_t)tqe); /* Fastpath */
1224 } else {
1225 TQ_STAT(b, tqs_misses);
1228 bucket = b;
1229 loopcount = MIN(taskq_search_depth, bsize);
1231 * If bucket dispatch failed, search loopcount number of buckets
1232 * before we give up and fail.
1234 do {
1235 b = &tq->tq_buckets[++h & (bsize - 1)];
1236 ASSERT(b->tqbucket_taskq == tq); /* Sanity check */
1237 loopcount--;
1239 if (b->tqbucket_nfree != 0) {
1240 tqe = taskq_bucket_dispatch(b, func, arg);
1241 } else {
1242 TQ_STAT(b, tqs_misses);
1244 } while ((tqe == NULL) && (loopcount > 0));
1248 * At this point we either scheduled a task and (tqe != NULL) or failed
1249 * (tqe == NULL). Try to recover from fails.
1253 * For KM_SLEEP dispatches, try to extend the bucket and retry dispatch.
1255 if ((tqe == NULL) && !(flags & TQ_NOSLEEP)) {
1257 * taskq_bucket_extend() may fail to do anything, but this is
1258 * fine - we deal with it later. If the bucket was successfully
1259 * extended, there is a good chance that taskq_bucket_dispatch()
1260 * will get this new entry, unless someone is racing with us and
1261 * stealing the new entry from under our nose.
1262 * taskq_bucket_extend() may sleep.
1264 taskq_bucket_extend(bucket);
1265 TQ_STAT(bucket, tqs_disptcreates);
1266 if ((tqe = taskq_bucket_dispatch(bucket, func, arg)) != NULL)
1267 return ((taskqid_t)tqe);
1270 ASSERT(bucket != NULL);
1273 * Since there are not enough free entries in the bucket, add a
1274 * taskq entry to extend it in the background using backing queue
1275 * (unless we already have a taskq entry to perform that extension).
1277 mutex_enter(&tq->tq_lock);
1278 if (!taskq_ent_exists(tq, taskq_bucket_extend, bucket)) {
1279 if ((tqe1 = taskq_ent_alloc(tq, TQ_NOSLEEP)) != NULL) {
1280 TQ_ENQUEUE_FRONT(tq, tqe1, taskq_bucket_extend, bucket);
1281 } else {
1282 tq->tq_nomem++;
1287 * Dispatch failed and we can't find an entry to schedule a task.
1288 * Revert to the backing queue unless TQ_NOQUEUE was asked.
1290 if ((tqe == NULL) && !(flags & TQ_NOQUEUE)) {
1291 if ((tqe = taskq_ent_alloc(tq, flags)) != NULL) {
1292 TQ_ENQUEUE(tq, tqe, func, arg);
1293 } else {
1294 tq->tq_nomem++;
1297 mutex_exit(&tq->tq_lock);
1299 return ((taskqid_t)tqe);
1302 void
1303 taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
1304 taskq_ent_t *tqe)
1306 ASSERT(func != NULL);
1307 ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC));
1310 * Mark it as a prealloc'd task. This is important
1311 * to ensure that we don't free it later.
1313 tqe->tqent_un.tqent_flags |= TQENT_FLAG_PREALLOC;
1315 * Enqueue the task to the underlying queue.
1317 mutex_enter(&tq->tq_lock);
1319 if (flags & TQ_FRONT) {
1320 TQ_ENQUEUE_FRONT(tq, tqe, func, arg);
1321 } else {
1322 TQ_ENQUEUE(tq, tqe, func, arg);
1324 mutex_exit(&tq->tq_lock);
1328 * Allow our caller to ask if there are tasks pending on the queue.
1330 boolean_t
1331 taskq_empty(taskq_t *tq)
1333 boolean_t rv;
1335 ASSERT3P(tq, !=, curthread->t_taskq);
1336 mutex_enter(&tq->tq_lock);
1337 rv = (tq->tq_task.tqent_next == &tq->tq_task) && (tq->tq_active == 0);
1338 mutex_exit(&tq->tq_lock);
1340 return (rv);
1344 * Wait for all pending tasks to complete.
1345 * Calling taskq_wait from a task will cause deadlock.
1347 void
1348 taskq_wait(taskq_t *tq)
1350 ASSERT(tq != curthread->t_taskq);
1352 mutex_enter(&tq->tq_lock);
1353 while (tq->tq_task.tqent_next != &tq->tq_task || tq->tq_active != 0)
1354 cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
1355 mutex_exit(&tq->tq_lock);
1357 if (tq->tq_flags & TASKQ_DYNAMIC) {
1358 taskq_bucket_t *b = tq->tq_buckets;
1359 int bid = 0;
1360 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
1361 mutex_enter(&b->tqbucket_lock);
1362 while (b->tqbucket_nalloc > 0)
1363 cv_wait(&b->tqbucket_cv, &b->tqbucket_lock);
1364 mutex_exit(&b->tqbucket_lock);
1370 * Suspend execution of tasks.
1372 * Tasks in the queue part will be suspended immediately upon return from this
1373 * function. Pending tasks in the dynamic part will continue to execute, but all
1374 * new tasks will be suspended.
1376 void
1377 taskq_suspend(taskq_t *tq)
1379 rw_enter(&tq->tq_threadlock, RW_WRITER);
1381 if (tq->tq_flags & TASKQ_DYNAMIC) {
1382 taskq_bucket_t *b = tq->tq_buckets;
1383 int bid = 0;
1384 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
1385 mutex_enter(&b->tqbucket_lock);
1386 b->tqbucket_flags |= TQBUCKET_SUSPEND;
1387 mutex_exit(&b->tqbucket_lock);
1391 * Mark task queue as being suspended. Needed for taskq_suspended().
1393 mutex_enter(&tq->tq_lock);
1394 ASSERT(!(tq->tq_flags & TASKQ_SUSPENDED));
1395 tq->tq_flags |= TASKQ_SUSPENDED;
1396 mutex_exit(&tq->tq_lock);
1400 * returns: 1 if tq is suspended, 0 otherwise.
1403 taskq_suspended(taskq_t *tq)
1405 return ((tq->tq_flags & TASKQ_SUSPENDED) != 0);
1409 * Resume taskq execution.
1411 void
1412 taskq_resume(taskq_t *tq)
1414 ASSERT(RW_WRITE_HELD(&tq->tq_threadlock));
1416 if (tq->tq_flags & TASKQ_DYNAMIC) {
1417 taskq_bucket_t *b = tq->tq_buckets;
1418 int bid = 0;
1419 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
1420 mutex_enter(&b->tqbucket_lock);
1421 b->tqbucket_flags &= ~TQBUCKET_SUSPEND;
1422 mutex_exit(&b->tqbucket_lock);
1425 mutex_enter(&tq->tq_lock);
1426 ASSERT(tq->tq_flags & TASKQ_SUSPENDED);
1427 tq->tq_flags &= ~TASKQ_SUSPENDED;
1428 mutex_exit(&tq->tq_lock);
1430 rw_exit(&tq->tq_threadlock);
1434 taskq_member(taskq_t *tq, kthread_t *thread)
1436 return (thread->t_taskq == tq);
1440 * Creates a thread in the taskq. We only allow one outstanding create at
1441 * a time. We drop and reacquire the tq_lock in order to avoid blocking other
1442 * taskq activity while thread_create() or lwp_kernel_create() run.
1444 * The first time we're called, we do some additional setup, and do not
1445 * return until there are enough threads to start servicing requests.
1447 static void
1448 taskq_thread_create(taskq_t *tq)
1450 kthread_t *t;
1451 const boolean_t first = (tq->tq_nthreads == 0);
1453 ASSERT(MUTEX_HELD(&tq->tq_lock));
1454 ASSERT(tq->tq_flags & TASKQ_CHANGING);
1455 ASSERT(tq->tq_nthreads < tq->tq_nthreads_target);
1456 ASSERT(!(tq->tq_flags & TASKQ_THREAD_CREATED));
1459 tq->tq_flags |= TASKQ_THREAD_CREATED;
1460 tq->tq_active++;
1461 mutex_exit(&tq->tq_lock);
1464 * With TASKQ_DUTY_CYCLE the new thread must have an LWP
1465 * as explained in ../disp/sysdc.c (for the msacct data).
1466 * Otherwise simple kthreads are preferred.
1468 if ((tq->tq_flags & TASKQ_DUTY_CYCLE) != 0) {
1469 /* Enforced in taskq_create_common */
1470 ASSERT3P(tq->tq_proc, !=, &p0);
1471 t = lwp_kernel_create(tq->tq_proc, taskq_thread, tq, TS_RUN,
1472 tq->tq_pri);
1473 } else {
1474 t = thread_create(NULL, 0, taskq_thread, tq, 0, tq->tq_proc,
1475 TS_RUN, tq->tq_pri);
1478 if (!first) {
1479 mutex_enter(&tq->tq_lock);
1480 return;
1484 * We know the thread cannot go away, since tq cannot be
1485 * destroyed until creation has completed. We can therefore
1486 * safely dereference t.
1488 if (tq->tq_flags & TASKQ_THREADS_CPU_PCT) {
1489 taskq_cpupct_install(tq, t->t_cpupart);
1491 mutex_enter(&tq->tq_lock);
1493 /* Wait until we can service requests. */
1494 while (tq->tq_nthreads != tq->tq_nthreads_target &&
1495 tq->tq_nthreads < TASKQ_CREATE_ACTIVE_THREADS) {
1496 cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
1501 * Common "sleep taskq thread" function, which handles CPR stuff, as well
1502 * as giving a nice common point for debuggers to find inactive threads.
1504 static clock_t
1505 taskq_thread_wait(taskq_t *tq, kmutex_t *mx, kcondvar_t *cv,
1506 callb_cpr_t *cprinfo, clock_t timeout)
1508 clock_t ret = 0;
1510 if (!(tq->tq_flags & TASKQ_CPR_SAFE)) {
1511 CALLB_CPR_SAFE_BEGIN(cprinfo);
1513 if (timeout < 0)
1514 cv_wait(cv, mx);
1515 else
1516 ret = cv_reltimedwait(cv, mx, timeout, TR_CLOCK_TICK);
1518 if (!(tq->tq_flags & TASKQ_CPR_SAFE)) {
1519 CALLB_CPR_SAFE_END(cprinfo, mx);
1522 return (ret);
1526 * Worker thread for processing task queue.
1528 static void
1529 taskq_thread(void *arg)
1531 int thread_id;
1533 taskq_t *tq = arg;
1534 taskq_ent_t *tqe;
1535 callb_cpr_t cprinfo;
1536 hrtime_t start, end;
1537 boolean_t freeit;
1539 curthread->t_taskq = tq; /* mark ourselves for taskq_member() */
1541 if (curproc != &p0 && (tq->tq_flags & TASKQ_DUTY_CYCLE)) {
1542 sysdc_thread_enter(curthread, tq->tq_DC,
1543 (tq->tq_flags & TASKQ_DC_BATCH) ? SYSDC_THREAD_BATCH : 0);
1546 if (tq->tq_flags & TASKQ_CPR_SAFE) {
1547 CALLB_CPR_INIT_SAFE(curthread, tq->tq_name);
1548 } else {
1549 CALLB_CPR_INIT(&cprinfo, &tq->tq_lock, callb_generic_cpr,
1550 tq->tq_name);
1552 mutex_enter(&tq->tq_lock);
1553 thread_id = ++tq->tq_nthreads;
1554 ASSERT(tq->tq_flags & TASKQ_THREAD_CREATED);
1555 ASSERT(tq->tq_flags & TASKQ_CHANGING);
1556 tq->tq_flags &= ~TASKQ_THREAD_CREATED;
1558 VERIFY3S(thread_id, <=, tq->tq_nthreads_max);
1560 if (tq->tq_nthreads_max == 1)
1561 tq->tq_thread = curthread;
1562 else
1563 tq->tq_threadlist[thread_id - 1] = curthread;
1565 /* Allow taskq_create_common()'s taskq_thread_create() to return. */
1566 if (tq->tq_nthreads == TASKQ_CREATE_ACTIVE_THREADS)
1567 cv_broadcast(&tq->tq_wait_cv);
1569 for (;;) {
1570 if (tq->tq_flags & TASKQ_CHANGING) {
1571 /* See if we're no longer needed */
1572 if (thread_id > tq->tq_nthreads_target) {
1574 * To preserve the one-to-one mapping between
1575 * thread_id and thread, we must exit from
1576 * highest thread ID to least.
1578 * However, if everyone is exiting, the order
1579 * doesn't matter, so just exit immediately.
1580 * (this is safe, since you must wait for
1581 * nthreads to reach 0 after setting
1582 * tq_nthreads_target to 0)
1584 if (thread_id == tq->tq_nthreads ||
1585 tq->tq_nthreads_target == 0)
1586 break;
1588 /* Wait for higher thread_ids to exit */
1589 (void) taskq_thread_wait(tq, &tq->tq_lock,
1590 &tq->tq_exit_cv, &cprinfo, -1);
1591 continue;
1595 * If no thread is starting taskq_thread(), we can
1596 * do some bookkeeping.
1598 if (!(tq->tq_flags & TASKQ_THREAD_CREATED)) {
1599 /* Check if we've reached our target */
1600 if (tq->tq_nthreads == tq->tq_nthreads_target) {
1601 tq->tq_flags &= ~TASKQ_CHANGING;
1602 cv_broadcast(&tq->tq_wait_cv);
1604 /* Check if we need to create a thread */
1605 if (tq->tq_nthreads < tq->tq_nthreads_target) {
1606 taskq_thread_create(tq);
1607 continue; /* tq_lock was dropped */
1611 if ((tqe = tq->tq_task.tqent_next) == &tq->tq_task) {
1612 if (--tq->tq_active == 0)
1613 cv_broadcast(&tq->tq_wait_cv);
1614 (void) taskq_thread_wait(tq, &tq->tq_lock,
1615 &tq->tq_dispatch_cv, &cprinfo, -1);
1616 tq->tq_active++;
1617 continue;
1620 tqe->tqent_prev->tqent_next = tqe->tqent_next;
1621 tqe->tqent_next->tqent_prev = tqe->tqent_prev;
1622 mutex_exit(&tq->tq_lock);
1625 * For prealloc'd tasks, we don't free anything. We
1626 * have to check this now, because once we call the
1627 * function for a prealloc'd taskq, we can't touch the
1628 * tqent any longer (calling the function returns the
1629 * ownershp of the tqent back to caller of
1630 * taskq_dispatch.)
1632 if ((!(tq->tq_flags & TASKQ_DYNAMIC)) &&
1633 (tqe->tqent_un.tqent_flags & TQENT_FLAG_PREALLOC)) {
1634 /* clear pointers to assist assertion checks */
1635 tqe->tqent_next = tqe->tqent_prev = NULL;
1636 freeit = B_FALSE;
1637 } else {
1638 freeit = B_TRUE;
1641 rw_enter(&tq->tq_threadlock, RW_READER);
1642 start = gethrtime();
1643 DTRACE_PROBE2(taskq__exec__start, taskq_t *, tq,
1644 taskq_ent_t *, tqe);
1645 tqe->tqent_func(tqe->tqent_arg);
1646 DTRACE_PROBE2(taskq__exec__end, taskq_t *, tq,
1647 taskq_ent_t *, tqe);
1648 end = gethrtime();
1649 rw_exit(&tq->tq_threadlock);
1651 mutex_enter(&tq->tq_lock);
1652 tq->tq_totaltime += end - start;
1653 tq->tq_executed++;
1655 if (freeit)
1656 taskq_ent_free(tq, tqe);
1659 if (tq->tq_nthreads_max == 1)
1660 tq->tq_thread = NULL;
1661 else
1662 tq->tq_threadlist[thread_id - 1] = NULL;
1664 /* We're exiting, and therefore no longer active */
1665 ASSERT(tq->tq_active > 0);
1666 tq->tq_active--;
1668 ASSERT(tq->tq_nthreads > 0);
1669 tq->tq_nthreads--;
1671 /* Wake up anyone waiting for us to exit */
1672 cv_broadcast(&tq->tq_exit_cv);
1673 if (tq->tq_nthreads == tq->tq_nthreads_target) {
1674 if (!(tq->tq_flags & TASKQ_THREAD_CREATED))
1675 tq->tq_flags &= ~TASKQ_CHANGING;
1677 cv_broadcast(&tq->tq_wait_cv);
1680 ASSERT(!(tq->tq_flags & TASKQ_CPR_SAFE));
1681 CALLB_CPR_EXIT(&cprinfo); /* drops tq->tq_lock */
1682 if (curthread->t_lwp != NULL) {
1683 mutex_enter(&curproc->p_lock);
1684 lwp_exit();
1685 } else {
1686 thread_exit();
1691 * Worker per-entry thread for dynamic dispatches.
1693 static void
1694 taskq_d_thread(taskq_ent_t *tqe)
1696 taskq_bucket_t *bucket = tqe->tqent_un.tqent_bucket;
1697 taskq_t *tq = bucket->tqbucket_taskq;
1698 kmutex_t *lock = &bucket->tqbucket_lock;
1699 kcondvar_t *cv = &tqe->tqent_cv;
1700 callb_cpr_t cprinfo;
1701 clock_t w;
1703 CALLB_CPR_INIT(&cprinfo, lock, callb_generic_cpr, tq->tq_name);
1705 mutex_enter(lock);
1707 for (;;) {
1709 * If a task is scheduled (func != NULL), execute it, otherwise
1710 * sleep, waiting for a job.
1712 if (tqe->tqent_func != NULL) {
1713 hrtime_t start;
1714 hrtime_t end;
1716 ASSERT(bucket->tqbucket_nalloc > 0);
1719 * It is possible to free the entry right away before
1720 * actually executing the task so that subsequent
1721 * dispatches may immediately reuse it. But this,
1722 * effectively, creates a two-length queue in the entry
1723 * and may lead to a deadlock if the execution of the
1724 * current task depends on the execution of the next
1725 * scheduled task. So, we keep the entry busy until the
1726 * task is processed.
1729 mutex_exit(lock);
1730 start = gethrtime();
1731 DTRACE_PROBE3(taskq__d__exec__start, taskq_t *, tq,
1732 taskq_bucket_t *, bucket, taskq_ent_t *, tqe);
1733 tqe->tqent_func(tqe->tqent_arg);
1734 DTRACE_PROBE3(taskq__d__exec__end, taskq_t *, tq,
1735 taskq_bucket_t *, bucket, taskq_ent_t *, tqe);
1736 end = gethrtime();
1737 mutex_enter(lock);
1738 bucket->tqbucket_totaltime += end - start;
1741 * Return the entry to the bucket free list.
1743 tqe->tqent_func = NULL;
1744 TQ_APPEND(bucket->tqbucket_freelist, tqe);
1745 bucket->tqbucket_nalloc--;
1746 bucket->tqbucket_nfree++;
1747 ASSERT(!IS_EMPTY(bucket->tqbucket_freelist));
1749 * taskq_wait() waits for nalloc to drop to zero on
1750 * tqbucket_cv.
1752 cv_signal(&bucket->tqbucket_cv);
1756 * At this point the entry must be in the bucket free list -
1757 * either because it was there initially or because it just
1758 * finished executing a task and put itself on the free list.
1760 ASSERT(bucket->tqbucket_nfree > 0);
1762 * Go to sleep unless we are closing.
1763 * If a thread is sleeping too long, it dies.
1765 if (! (bucket->tqbucket_flags & TQBUCKET_CLOSE)) {
1766 w = taskq_thread_wait(tq, lock, cv,
1767 &cprinfo, taskq_thread_timeout * hz);
1771 * At this point we may be in two different states:
1773 * (1) tqent_func is set which means that a new task is
1774 * dispatched and we need to execute it.
1776 * (2) Thread is sleeping for too long or we are closing. In
1777 * both cases destroy the thread and the entry.
1780 /* If func is NULL we should be on the freelist. */
1781 ASSERT((tqe->tqent_func != NULL) ||
1782 (bucket->tqbucket_nfree > 0));
1783 /* If func is non-NULL we should be allocated */
1784 ASSERT((tqe->tqent_func == NULL) ||
1785 (bucket->tqbucket_nalloc > 0));
1787 /* Check freelist consistency */
1788 ASSERT((bucket->tqbucket_nfree > 0) ||
1789 IS_EMPTY(bucket->tqbucket_freelist));
1790 ASSERT((bucket->tqbucket_nfree == 0) ||
1791 !IS_EMPTY(bucket->tqbucket_freelist));
1793 if ((tqe->tqent_func == NULL) &&
1794 ((w == -1) || (bucket->tqbucket_flags & TQBUCKET_CLOSE))) {
1796 * This thread is sleeping for too long or we are
1797 * closing - time to die.
1798 * Thread creation/destruction happens rarely,
1799 * so grabbing the lock is not a big performance issue.
1800 * The bucket lock is dropped by CALLB_CPR_EXIT().
1803 /* Remove the entry from the free list. */
1804 tqe->tqent_prev->tqent_next = tqe->tqent_next;
1805 tqe->tqent_next->tqent_prev = tqe->tqent_prev;
1806 ASSERT(bucket->tqbucket_nfree > 0);
1807 bucket->tqbucket_nfree--;
1809 TQ_STAT(bucket, tqs_tdeaths);
1810 cv_signal(&bucket->tqbucket_cv);
1811 tqe->tqent_thread = NULL;
1812 mutex_enter(&tq->tq_lock);
1813 tq->tq_tdeaths++;
1814 mutex_exit(&tq->tq_lock);
1815 CALLB_CPR_EXIT(&cprinfo);
1816 kmem_cache_free(taskq_ent_cache, tqe);
1817 thread_exit();
1824 * Taskq creation. May sleep for memory.
1825 * Always use automatically generated instances to avoid kstat name space
1826 * collisions.
1829 taskq_t *
1830 taskq_create(const char *name, int nthreads, pri_t pri, int minalloc,
1831 int maxalloc, uint_t flags)
1833 ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0);
1835 return (taskq_create_common(name, 0, nthreads, pri, minalloc,
1836 maxalloc, &p0, 0, flags | TASKQ_NOINSTANCE));
1840 * Create an instance of task queue. It is legal to create task queues with the
1841 * same name and different instances.
1843 * taskq_create_instance is used by ddi_taskq_create() where it gets the
1844 * instance from ddi_get_instance(). In some cases the instance is not
1845 * initialized and is set to -1. This case is handled as if no instance was
1846 * passed at all.
1848 taskq_t *
1849 taskq_create_instance(const char *name, int instance, int nthreads, pri_t pri,
1850 int minalloc, int maxalloc, uint_t flags)
1852 ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0);
1853 ASSERT((instance >= 0) || (instance == -1));
1855 if (instance < 0) {
1856 flags |= TASKQ_NOINSTANCE;
1859 return (taskq_create_common(name, instance, nthreads,
1860 pri, minalloc, maxalloc, &p0, 0, flags));
1863 taskq_t *
1864 taskq_create_proc(const char *name, int nthreads, pri_t pri, int minalloc,
1865 int maxalloc, proc_t *proc, uint_t flags)
1867 ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0);
1868 ASSERT(proc->p_flag & SSYS);
1870 return (taskq_create_common(name, 0, nthreads, pri, minalloc,
1871 maxalloc, proc, 0, flags | TASKQ_NOINSTANCE));
1874 taskq_t *
1875 taskq_create_sysdc(const char *name, int nthreads, int minalloc,
1876 int maxalloc, proc_t *proc, uint_t dc, uint_t flags)
1878 ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0);
1879 ASSERT(proc->p_flag & SSYS);
1881 return (taskq_create_common(name, 0, nthreads, minclsyspri, minalloc,
1882 maxalloc, proc, dc, flags | TASKQ_NOINSTANCE | TASKQ_DUTY_CYCLE));
1885 static taskq_t *
1886 taskq_create_common(const char *name, int instance, int nthreads, pri_t pri,
1887 int minalloc, int maxalloc, proc_t *proc, uint_t dc, uint_t flags)
1889 taskq_t *tq = kmem_cache_alloc(taskq_cache, KM_SLEEP);
1890 uint_t ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus);
1891 uint_t bsize; /* # of buckets - always power of 2 */
1892 int max_nthreads;
1895 * TASKQ_DYNAMIC, TASKQ_CPR_SAFE and TASKQ_THREADS_CPU_PCT are all
1896 * mutually incompatible.
1898 IMPLY((flags & TASKQ_DYNAMIC), !(flags & TASKQ_CPR_SAFE));
1899 IMPLY((flags & TASKQ_DYNAMIC), !(flags & TASKQ_THREADS_CPU_PCT));
1900 IMPLY((flags & TASKQ_CPR_SAFE), !(flags & TASKQ_THREADS_CPU_PCT));
1902 /* Cannot have DYNAMIC with DUTY_CYCLE */
1903 IMPLY((flags & TASKQ_DYNAMIC), !(flags & TASKQ_DUTY_CYCLE));
1905 /* Cannot have DUTY_CYCLE with a p0 kernel process */
1906 IMPLY((flags & TASKQ_DUTY_CYCLE), proc != &p0);
1908 /* Cannot have DC_BATCH without DUTY_CYCLE */
1909 ASSERT((flags & (TASKQ_DUTY_CYCLE|TASKQ_DC_BATCH)) != TASKQ_DC_BATCH);
1911 ASSERT(proc != NULL);
1913 bsize = 1 << (highbit(ncpus) - 1);
1914 ASSERT(bsize >= 1);
1915 bsize = MIN(bsize, taskq_maxbuckets);
1917 if (flags & TASKQ_DYNAMIC) {
1918 ASSERT3S(nthreads, >=, 1);
1919 tq->tq_maxsize = nthreads;
1921 /* For dynamic task queues use just one backup thread */
1922 nthreads = max_nthreads = 1;
1924 } else if (flags & TASKQ_THREADS_CPU_PCT) {
1925 uint_t pct;
1926 ASSERT3S(nthreads, >=, 0);
1927 pct = nthreads;
1929 if (pct > taskq_cpupct_max_percent)
1930 pct = taskq_cpupct_max_percent;
1933 * If you're using THREADS_CPU_PCT, the process for the
1934 * taskq threads must be curproc. This allows any pset
1935 * binding to be inherited correctly. If proc is &p0,
1936 * we won't be creating LWPs, so new threads will be assigned
1937 * to the default processor set.
1939 ASSERT(curproc == proc || proc == &p0);
1940 tq->tq_threads_ncpus_pct = pct;
1941 nthreads = 1; /* corrected in taskq_thread_create() */
1942 max_nthreads = TASKQ_THREADS_PCT(max_ncpus, pct);
1944 } else {
1945 ASSERT3S(nthreads, >=, 1);
1946 max_nthreads = nthreads;
1949 if (max_nthreads < taskq_minimum_nthreads_max)
1950 max_nthreads = taskq_minimum_nthreads_max;
1953 * Make sure the name is 0-terminated, and conforms to the rules for
1954 * C indentifiers
1956 (void) strncpy(tq->tq_name, name, TASKQ_NAMELEN + 1);
1957 strident_canon(tq->tq_name, TASKQ_NAMELEN + 1);
1959 tq->tq_flags = flags | TASKQ_CHANGING;
1960 tq->tq_active = 0;
1961 tq->tq_instance = instance;
1962 tq->tq_nthreads_target = nthreads;
1963 tq->tq_nthreads_max = max_nthreads;
1964 tq->tq_minalloc = minalloc;
1965 tq->tq_maxalloc = maxalloc;
1966 tq->tq_nbuckets = bsize;
1967 tq->tq_proc = proc;
1968 tq->tq_pri = pri;
1969 tq->tq_DC = dc;
1970 list_link_init(&tq->tq_cpupct_link);
1972 if (max_nthreads > 1)
1973 tq->tq_threadlist = kmem_alloc(
1974 sizeof (kthread_t *) * max_nthreads, KM_SLEEP);
1976 mutex_enter(&tq->tq_lock);
1977 if (flags & TASKQ_PREPOPULATE) {
1978 while (minalloc-- > 0)
1979 taskq_ent_free(tq, taskq_ent_alloc(tq, TQ_SLEEP));
1983 * Before we start creating threads for this taskq, take a
1984 * zone hold so the zone can't go away before taskq_destroy
1985 * makes sure all the taskq threads are gone. This hold is
1986 * similar in purpose to those taken by zthread_create().
1988 zone_hold(tq->tq_proc->p_zone);
1991 * Create the first thread, which will create any other threads
1992 * necessary. taskq_thread_create will not return until we have
1993 * enough threads to be able to process requests.
1995 taskq_thread_create(tq);
1996 mutex_exit(&tq->tq_lock);
1998 if (flags & TASKQ_DYNAMIC) {
1999 taskq_bucket_t *bucket = kmem_zalloc(sizeof (taskq_bucket_t) *
2000 bsize, KM_SLEEP);
2001 int b_id;
2003 tq->tq_buckets = bucket;
2005 /* Initialize each bucket */
2006 for (b_id = 0; b_id < bsize; b_id++, bucket++) {
2007 mutex_init(&bucket->tqbucket_lock, NULL, MUTEX_DEFAULT,
2008 NULL);
2009 cv_init(&bucket->tqbucket_cv, NULL, CV_DEFAULT, NULL);
2010 bucket->tqbucket_taskq = tq;
2011 bucket->tqbucket_freelist.tqent_next =
2012 bucket->tqbucket_freelist.tqent_prev =
2013 &bucket->tqbucket_freelist;
2014 if (flags & TASKQ_PREPOPULATE)
2015 taskq_bucket_extend(bucket);
2020 * Install kstats.
2021 * We have two cases:
2022 * 1) Instance is provided to taskq_create_instance(). In this case it
2023 * should be >= 0 and we use it.
2025 * 2) Instance is not provided and is automatically generated
2027 if (flags & TASKQ_NOINSTANCE) {
2028 instance = tq->tq_instance =
2029 (int)(uintptr_t)vmem_alloc(taskq_id_arena, 1, VM_SLEEP);
2032 if (flags & TASKQ_DYNAMIC) {
2033 if ((tq->tq_kstat = kstat_create("unix", instance,
2034 tq->tq_name, "taskq_d", KSTAT_TYPE_NAMED,
2035 sizeof (taskq_d_kstat) / sizeof (kstat_named_t),
2036 KSTAT_FLAG_VIRTUAL)) != NULL) {
2037 tq->tq_kstat->ks_lock = &taskq_d_kstat_lock;
2038 tq->tq_kstat->ks_data = &taskq_d_kstat;
2039 tq->tq_kstat->ks_update = taskq_d_kstat_update;
2040 tq->tq_kstat->ks_private = tq;
2041 kstat_install(tq->tq_kstat);
2043 } else {
2044 if ((tq->tq_kstat = kstat_create("unix", instance, tq->tq_name,
2045 "taskq", KSTAT_TYPE_NAMED,
2046 sizeof (taskq_kstat) / sizeof (kstat_named_t),
2047 KSTAT_FLAG_VIRTUAL)) != NULL) {
2048 tq->tq_kstat->ks_lock = &taskq_kstat_lock;
2049 tq->tq_kstat->ks_data = &taskq_kstat;
2050 tq->tq_kstat->ks_update = taskq_kstat_update;
2051 tq->tq_kstat->ks_private = tq;
2052 kstat_install(tq->tq_kstat);
2056 return (tq);
2060 * taskq_destroy().
2062 * Assumes: by the time taskq_destroy is called no one will use this task queue
2063 * in any way and no one will try to dispatch entries in it.
2065 void
2066 taskq_destroy(taskq_t *tq)
2068 taskq_bucket_t *b = tq->tq_buckets;
2069 int bid = 0;
2071 ASSERT(! (tq->tq_flags & TASKQ_CPR_SAFE));
2074 * Destroy kstats.
2076 if (tq->tq_kstat != NULL) {
2077 kstat_delete(tq->tq_kstat);
2078 tq->tq_kstat = NULL;
2082 * Destroy instance if needed.
2084 if (tq->tq_flags & TASKQ_NOINSTANCE) {
2085 vmem_free(taskq_id_arena, (void *)(uintptr_t)(tq->tq_instance),
2087 tq->tq_instance = 0;
2091 * Unregister from the cpupct list.
2093 if (tq->tq_flags & TASKQ_THREADS_CPU_PCT) {
2094 taskq_cpupct_remove(tq);
2098 * Wait for any pending entries to complete.
2100 taskq_wait(tq);
2102 mutex_enter(&tq->tq_lock);
2103 ASSERT((tq->tq_task.tqent_next == &tq->tq_task) &&
2104 (tq->tq_active == 0));
2106 /* notify all the threads that they need to exit */
2107 tq->tq_nthreads_target = 0;
2109 tq->tq_flags |= TASKQ_CHANGING;
2110 cv_broadcast(&tq->tq_dispatch_cv);
2111 cv_broadcast(&tq->tq_exit_cv);
2113 while (tq->tq_nthreads != 0)
2114 cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
2116 if (tq->tq_nthreads_max != 1)
2117 kmem_free(tq->tq_threadlist, sizeof (kthread_t *) *
2118 tq->tq_nthreads_max);
2120 tq->tq_minalloc = 0;
2121 while (tq->tq_nalloc != 0)
2122 taskq_ent_free(tq, taskq_ent_alloc(tq, TQ_SLEEP));
2124 mutex_exit(&tq->tq_lock);
2127 * Mark each bucket as closing and wakeup all sleeping threads.
2129 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
2130 taskq_ent_t *tqe;
2132 mutex_enter(&b->tqbucket_lock);
2134 b->tqbucket_flags |= TQBUCKET_CLOSE;
2135 /* Wakeup all sleeping threads */
2137 for (tqe = b->tqbucket_freelist.tqent_next;
2138 tqe != &b->tqbucket_freelist; tqe = tqe->tqent_next)
2139 cv_signal(&tqe->tqent_cv);
2141 ASSERT(b->tqbucket_nalloc == 0);
2144 * At this point we waited for all pending jobs to complete (in
2145 * both the task queue and the bucket and no new jobs should
2146 * arrive. Wait for all threads to die.
2148 while (b->tqbucket_nfree > 0)
2149 cv_wait(&b->tqbucket_cv, &b->tqbucket_lock);
2150 mutex_exit(&b->tqbucket_lock);
2151 mutex_destroy(&b->tqbucket_lock);
2152 cv_destroy(&b->tqbucket_cv);
2155 if (tq->tq_buckets != NULL) {
2156 ASSERT(tq->tq_flags & TASKQ_DYNAMIC);
2157 kmem_free(tq->tq_buckets,
2158 sizeof (taskq_bucket_t) * tq->tq_nbuckets);
2160 /* Cleanup fields before returning tq to the cache */
2161 tq->tq_buckets = NULL;
2162 tq->tq_tcreates = 0;
2163 tq->tq_tdeaths = 0;
2164 } else {
2165 ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC));
2169 * Now that all the taskq threads are gone, we can
2170 * drop the zone hold taken in taskq_create_common
2172 zone_rele(tq->tq_proc->p_zone);
2174 tq->tq_threads_ncpus_pct = 0;
2175 tq->tq_totaltime = 0;
2176 tq->tq_tasks = 0;
2177 tq->tq_maxtasks = 0;
2178 tq->tq_executed = 0;
2179 kmem_cache_free(taskq_cache, tq);
2183 * Extend a bucket with a new entry on the free list and attach a worker thread
2184 * to it.
2186 * Argument: pointer to the bucket.
2188 * This function may quietly fail. It is only used by taskq_dispatch() which
2189 * handles such failures properly.
2191 static void
2192 taskq_bucket_extend(void *arg)
2194 taskq_ent_t *tqe;
2195 taskq_bucket_t *b = (taskq_bucket_t *)arg;
2196 taskq_t *tq = b->tqbucket_taskq;
2197 int nthreads;
2199 mutex_enter(&tq->tq_lock);
2201 if (! ENOUGH_MEMORY()) {
2202 tq->tq_nomem++;
2203 mutex_exit(&tq->tq_lock);
2204 return;
2208 * Observe global taskq limits on the number of threads.
2210 if (tq->tq_tcreates++ - tq->tq_tdeaths > tq->tq_maxsize) {
2211 tq->tq_tcreates--;
2212 mutex_exit(&tq->tq_lock);
2213 return;
2215 mutex_exit(&tq->tq_lock);
2217 tqe = kmem_cache_alloc(taskq_ent_cache, KM_NOSLEEP);
2219 if (tqe == NULL) {
2220 mutex_enter(&tq->tq_lock);
2221 tq->tq_nomem++;
2222 tq->tq_tcreates--;
2223 mutex_exit(&tq->tq_lock);
2224 return;
2227 ASSERT(tqe->tqent_thread == NULL);
2229 tqe->tqent_un.tqent_bucket = b;
2232 * Create a thread in a TS_STOPPED state first. If it is successfully
2233 * created, place the entry on the free list and start the thread.
2235 tqe->tqent_thread = thread_create(NULL, 0, taskq_d_thread, tqe,
2236 0, tq->tq_proc, TS_STOPPED, tq->tq_pri);
2239 * Once the entry is ready, link it to the the bucket free list.
2241 mutex_enter(&b->tqbucket_lock);
2242 tqe->tqent_func = NULL;
2243 TQ_APPEND(b->tqbucket_freelist, tqe);
2244 b->tqbucket_nfree++;
2245 TQ_STAT(b, tqs_tcreates);
2247 #if TASKQ_STATISTIC
2248 nthreads = b->tqbucket_stat.tqs_tcreates -
2249 b->tqbucket_stat.tqs_tdeaths;
2250 b->tqbucket_stat.tqs_maxthreads = MAX(nthreads,
2251 b->tqbucket_stat.tqs_maxthreads);
2252 #endif
2254 mutex_exit(&b->tqbucket_lock);
2256 * Start the stopped thread.
2258 thread_lock(tqe->tqent_thread);
2259 tqe->tqent_thread->t_taskq = tq;
2260 tqe->tqent_thread->t_schedflag |= TS_ALLSTART;
2261 setrun_locked(tqe->tqent_thread);
2262 thread_unlock(tqe->tqent_thread);
2265 static int
2266 taskq_kstat_update(kstat_t *ksp, int rw)
2268 struct taskq_kstat *tqsp = &taskq_kstat;
2269 taskq_t *tq = ksp->ks_private;
2271 if (rw == KSTAT_WRITE)
2272 return (EACCES);
2274 tqsp->tq_pid.value.ui64 = tq->tq_proc->p_pid;
2275 tqsp->tq_tasks.value.ui64 = tq->tq_tasks;
2276 tqsp->tq_executed.value.ui64 = tq->tq_executed;
2277 tqsp->tq_maxtasks.value.ui64 = tq->tq_maxtasks;
2278 tqsp->tq_totaltime.value.ui64 = tq->tq_totaltime;
2279 tqsp->tq_nactive.value.ui64 = tq->tq_active;
2280 tqsp->tq_nalloc.value.ui64 = tq->tq_nalloc;
2281 tqsp->tq_pri.value.ui64 = tq->tq_pri;
2282 tqsp->tq_nthreads.value.ui64 = tq->tq_nthreads;
2283 tqsp->tq_nomem.value.ui64 = tq->tq_nomem;
2284 return (0);
2287 static int
2288 taskq_d_kstat_update(kstat_t *ksp, int rw)
2290 struct taskq_d_kstat *tqsp = &taskq_d_kstat;
2291 taskq_t *tq = ksp->ks_private;
2292 taskq_bucket_t *b = tq->tq_buckets;
2293 int bid = 0;
2295 if (rw == KSTAT_WRITE)
2296 return (EACCES);
2298 ASSERT(tq->tq_flags & TASKQ_DYNAMIC);
2300 tqsp->tqd_btasks.value.ui64 = tq->tq_tasks;
2301 tqsp->tqd_bexecuted.value.ui64 = tq->tq_executed;
2302 tqsp->tqd_bmaxtasks.value.ui64 = tq->tq_maxtasks;
2303 tqsp->tqd_bnalloc.value.ui64 = tq->tq_nalloc;
2304 tqsp->tqd_bnactive.value.ui64 = tq->tq_active;
2305 tqsp->tqd_btotaltime.value.ui64 = tq->tq_totaltime;
2306 tqsp->tqd_pri.value.ui64 = tq->tq_pri;
2307 tqsp->tqd_nomem.value.ui64 = tq->tq_nomem;
2309 tqsp->tqd_hits.value.ui64 = 0;
2310 tqsp->tqd_misses.value.ui64 = 0;
2311 tqsp->tqd_overflows.value.ui64 = 0;
2312 tqsp->tqd_tcreates.value.ui64 = 0;
2313 tqsp->tqd_tdeaths.value.ui64 = 0;
2314 tqsp->tqd_maxthreads.value.ui64 = 0;
2315 tqsp->tqd_nomem.value.ui64 = 0;
2316 tqsp->tqd_disptcreates.value.ui64 = 0;
2317 tqsp->tqd_totaltime.value.ui64 = 0;
2318 tqsp->tqd_nalloc.value.ui64 = 0;
2319 tqsp->tqd_nfree.value.ui64 = 0;
2321 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
2322 tqsp->tqd_hits.value.ui64 += b->tqbucket_stat.tqs_hits;
2323 tqsp->tqd_misses.value.ui64 += b->tqbucket_stat.tqs_misses;
2324 tqsp->tqd_overflows.value.ui64 += b->tqbucket_stat.tqs_overflow;
2325 tqsp->tqd_tcreates.value.ui64 += b->tqbucket_stat.tqs_tcreates;
2326 tqsp->tqd_tdeaths.value.ui64 += b->tqbucket_stat.tqs_tdeaths;
2327 tqsp->tqd_maxthreads.value.ui64 +=
2328 b->tqbucket_stat.tqs_maxthreads;
2329 tqsp->tqd_disptcreates.value.ui64 +=
2330 b->tqbucket_stat.tqs_disptcreates;
2331 tqsp->tqd_totaltime.value.ui64 += b->tqbucket_totaltime;
2332 tqsp->tqd_nalloc.value.ui64 += b->tqbucket_nalloc;
2333 tqsp->tqd_nfree.value.ui64 += b->tqbucket_nfree;
2335 return (0);