2 * Copyright (c) 2000 Doug Rabson
3 * Copyright (c) 2014 Jeff Roberson
4 * Copyright (c) 2016 Matthew Macy
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
33 #include <sys/cpumask.h>
34 #include <sys/kernel.h>
35 #include <sys/libkern.h>
36 #include <sys/limits.h>
38 #include <sys/malloc.h>
40 #include <sys/sched.h>
41 #include <sys/gtaskqueue.h>
42 #include <sys/unistd.h>
43 #include <machine/stdarg.h>
45 static MALLOC_DEFINE(M_GTASKQUEUE
, "gtaskqueue", "Group Task Queues");
46 static void gtaskqueue_thread_enqueue(void *);
47 static void gtaskqueue_thread_loop(void *arg
);
48 static int task_is_running(struct gtaskqueue
*queue
, struct gtask
*gtask
);
49 static void gtaskqueue_drain_locked(struct gtaskqueue
*queue
, struct gtask
*gtask
);
51 TASKQGROUP_DEFINE(softirq
, ncpus
, 1);
53 struct gtaskqueue_busy
{
54 struct gtask
*tb_running
;
56 LIST_ENTRY(gtaskqueue_busy
) tb_link
;
59 typedef void (*gtaskqueue_enqueue_fn
)(void *context
);
62 STAILQ_HEAD(, gtask
) tq_queue
;
63 LIST_HEAD(, gtaskqueue_busy
) tq_active
;
67 gtaskqueue_enqueue_fn tq_enqueue
;
70 struct thread
**tq_threads
;
74 taskqueue_callback_fn tq_callbacks
[TASKQUEUE_NUM_CALLBACKS
];
75 void *tq_cb_contexts
[TASKQUEUE_NUM_CALLBACKS
];
79 #define TQ_FLAGS_ACTIVE (1 << 0)
80 #define TQ_FLAGS_BLOCKED (1 << 1)
81 #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2)
83 #define DT_CALLOUT_ARMED (1 << 0)
85 #define TQ_LOCK(tq) lockmgr(&(tq)->tq_lock, LK_EXCLUSIVE)
86 #define TQ_ASSERT_LOCKED(tq) KKASSERT(lockstatus(&(tq)->tq_lock, NULL) != 0)
87 #define TQ_UNLOCK(tq) lockmgr(&(tq)->tq_lock, LK_RELEASE);
88 #define TQ_ASSERT_UNLOCKED(tq) KKASSERT(lockstatus(&(tq)->tq_lock) == 0)
92 gtask_dump(struct gtask
*gtask
)
94 kprintf("gtask: %p ta_flags=%x ta_priority=%d ta_func=%p "
96 gtask
, gtask
->ta_flags
, gtask
->ta_priority
,
97 gtask
->ta_func
, gtask
->ta_context
);
102 TQ_SLEEP(struct gtaskqueue
*tq
, void *p
, const char *wm
)
104 return (lksleep(p
, &tq
->tq_lock
, 0, wm
, 0));
107 static struct gtaskqueue
*
108 _gtaskqueue_create(const char *name
, int mflags
,
109 taskqueue_enqueue_fn enqueue
, void *context
,
110 int lkflags
, const char *mtxname __unused
)
112 struct gtaskqueue
*queue
;
114 queue
= kmalloc(sizeof(struct gtaskqueue
),
115 M_GTASKQUEUE
, mflags
| M_ZERO
);
117 kprintf("_gtaskqueue_create: kmalloc failed %08x\n", mflags
);
121 STAILQ_INIT(&queue
->tq_queue
);
122 LIST_INIT(&queue
->tq_active
);
123 queue
->tq_enqueue
= enqueue
;
124 queue
->tq_context
= context
;
125 queue
->tq_name
= name
? name
: "taskqueue";
126 queue
->tq_flags
|= TQ_FLAGS_ACTIVE
;
127 if (enqueue
== gtaskqueue_thread_enqueue
)
128 queue
->tq_flags
|= TQ_FLAGS_UNLOCKED_ENQUEUE
;
129 lockinit(&queue
->tq_lock
, queue
->tq_name
, 0, 0);
135 * Signal a taskqueue thread to terminate.
138 gtaskqueue_terminate(struct thread
**pp
, struct gtaskqueue
*tq
)
141 while (tq
->tq_tcount
> 0 || tq
->tq_callouts
> 0) {
143 TQ_SLEEP(tq
, pp
, "gtq_destroy");
148 gtaskqueue_free(struct gtaskqueue
*queue
)
152 queue
->tq_flags
&= ~TQ_FLAGS_ACTIVE
;
153 gtaskqueue_terminate(queue
->tq_threads
, queue
);
154 KASSERT(LIST_EMPTY(&queue
->tq_active
), ("Tasks still running?"));
155 KASSERT(queue
->tq_callouts
== 0, ("Armed timeout tasks"));
156 lockuninit(&queue
->tq_lock
);
157 kfree(queue
->tq_threads
, M_GTASKQUEUE
);
158 /*kfree(queue->tq_name, M_GTASKQUEUE);*/
159 kfree(queue
, M_GTASKQUEUE
);
163 * Wait for all to complete, then prevent it from being enqueued
166 grouptask_block(struct grouptask
*grouptask
)
168 struct gtaskqueue
*queue
= grouptask
->gt_taskqueue
;
169 struct gtask
*gtask
= &grouptask
->gt_task
;
174 panic("queue == NULL");
178 gtask
->ta_flags
|= TASK_NOENQUEUE
;
179 gtaskqueue_drain_locked(queue
, gtask
);
184 grouptask_unblock(struct grouptask
*grouptask
)
186 struct gtaskqueue
*queue
= grouptask
->gt_taskqueue
;
187 struct gtask
*gtask
= &grouptask
->gt_task
;
192 panic("queue == NULL");
196 gtask
->ta_flags
&= ~TASK_NOENQUEUE
;
201 grouptaskqueue_enqueue(struct gtaskqueue
*queue
, struct gtask
*gtask
)
206 panic("queue == NULL");
210 if (gtask
->ta_flags
& TASK_ENQUEUED
) {
214 if (gtask
->ta_flags
& TASK_NOENQUEUE
) {
218 STAILQ_INSERT_TAIL(&queue
->tq_queue
, gtask
, ta_link
);
219 gtask
->ta_flags
|= TASK_ENQUEUED
;
221 if ((queue
->tq_flags
& TQ_FLAGS_BLOCKED
) == 0)
222 queue
->tq_enqueue(queue
->tq_context
);
227 gtaskqueue_task_nop_fn(void *context
)
232 * Block until all currently queued tasks in this taskqueue
233 * have begun execution. Tasks queued during execution of
234 * this function are ignored.
237 gtaskqueue_drain_tq_queue(struct gtaskqueue
*queue
)
239 struct gtask t_barrier
;
241 if (STAILQ_EMPTY(&queue
->tq_queue
))
245 * Enqueue our barrier after all current tasks, but with
246 * the highest priority so that newly queued tasks cannot
247 * pass it. Because of the high priority, we can not use
248 * taskqueue_enqueue_locked directly (which drops the lock
249 * anyway) so just insert it at tail while we have the
252 GTASK_INIT(&t_barrier
, 0, USHRT_MAX
, gtaskqueue_task_nop_fn
, &t_barrier
);
253 STAILQ_INSERT_TAIL(&queue
->tq_queue
, &t_barrier
, ta_link
);
254 t_barrier
.ta_flags
|= TASK_ENQUEUED
;
257 * Once the barrier has executed, all previously queued tasks
258 * have completed or are currently executing.
260 while (t_barrier
.ta_flags
& TASK_ENQUEUED
)
261 TQ_SLEEP(queue
, &t_barrier
, "gtq_qdrain");
265 * Block until all currently executing tasks for this taskqueue
266 * complete. Tasks that begin execution during the execution
267 * of this function are ignored.
270 gtaskqueue_drain_tq_active(struct gtaskqueue
*queue
)
272 struct gtaskqueue_busy
*tb
;
275 if (LIST_EMPTY(&queue
->tq_active
))
278 /* Block taskq_terminate().*/
279 queue
->tq_callouts
++;
281 /* Wait for any active task with sequence from the past. */
284 LIST_FOREACH(tb
, &queue
->tq_active
, tb_link
) {
285 if ((int)(tb
->tb_seq
- seq
) <= 0) {
286 TQ_SLEEP(queue
, tb
->tb_running
, "gtq_adrain");
291 /* Release taskqueue_terminate(). */
292 queue
->tq_callouts
--;
293 if ((queue
->tq_flags
& TQ_FLAGS_ACTIVE
) == 0)
294 wakeup_one(queue
->tq_threads
);
298 gtaskqueue_block(struct gtaskqueue
*queue
)
302 queue
->tq_flags
|= TQ_FLAGS_BLOCKED
;
307 gtaskqueue_unblock(struct gtaskqueue
*queue
)
311 queue
->tq_flags
&= ~TQ_FLAGS_BLOCKED
;
312 if (!STAILQ_EMPTY(&queue
->tq_queue
))
313 queue
->tq_enqueue(queue
->tq_context
);
318 gtaskqueue_run_locked(struct gtaskqueue
*queue
)
320 struct gtaskqueue_busy tb
;
323 struct epoch_tracker et
;
327 KASSERT(queue
!= NULL
, ("tq is NULL"));
328 TQ_ASSERT_LOCKED(queue
);
329 tb
.tb_running
= NULL
;
330 LIST_INSERT_HEAD(&queue
->tq_active
, &tb
, tb_link
);
332 in_net_epoch
= false;
335 while ((gtask
= STAILQ_FIRST(&queue
->tq_queue
)) != NULL
) {
336 STAILQ_REMOVE_HEAD(&queue
->tq_queue
, ta_link
);
337 gtask
->ta_flags
&= ~TASK_ENQUEUED
;
338 tb
.tb_running
= gtask
;
339 tb
.tb_seq
= ++queue
->tq_seq
;
342 KASSERT(gtask
->ta_func
!= NULL
, ("task->ta_func is NULL"));
344 if (!in_net_epoch
&& TASK_IS_NET(gtask
)) {
347 } else if (in_net_epoch
&& !TASK_IS_NET(gtask
)) {
349 in_net_epoch
= false;
352 gtask
->ta_func(gtask
->ta_context
);
361 LIST_REMOVE(&tb
, tb_link
);
365 task_is_running(struct gtaskqueue
*queue
, struct gtask
*gtask
)
367 struct gtaskqueue_busy
*tb
;
369 TQ_ASSERT_LOCKED(queue
);
370 LIST_FOREACH(tb
, &queue
->tq_active
, tb_link
) {
371 if (tb
->tb_running
== gtask
)
378 gtaskqueue_cancel_locked(struct gtaskqueue
*queue
, struct gtask
*gtask
)
381 if (gtask
->ta_flags
& TASK_ENQUEUED
)
382 STAILQ_REMOVE(&queue
->tq_queue
, gtask
, gtask
, ta_link
);
383 gtask
->ta_flags
&= ~TASK_ENQUEUED
;
384 return (task_is_running(queue
, gtask
) ? EBUSY
: 0);
388 gtaskqueue_cancel(struct gtaskqueue
*queue
, struct gtask
*gtask
)
393 error
= gtaskqueue_cancel_locked(queue
, gtask
);
400 gtaskqueue_drain_locked(struct gtaskqueue
*queue
, struct gtask
*gtask
)
402 while ((gtask
->ta_flags
& TASK_ENQUEUED
) || task_is_running(queue
, gtask
))
403 TQ_SLEEP(queue
, gtask
, "gtq_drain");
407 gtaskqueue_drain(struct gtaskqueue
*queue
, struct gtask
*gtask
)
410 gtaskqueue_drain_locked(queue
, gtask
);
415 gtaskqueue_drain_all(struct gtaskqueue
*queue
)
419 gtaskqueue_drain_tq_queue(queue
);
420 gtaskqueue_drain_tq_active(queue
);
424 static int __printflike(4, 0)
425 _gtaskqueue_start_threads(struct gtaskqueue
**tqp
, int count
, int pri
,
426 const char *name
, __va_list ap
)
428 char ktname
[MAXCOMLEN
+ 1];
430 struct gtaskqueue
*tq
;
436 kvsnprintf(ktname
, sizeof(ktname
), name
, ap
);
439 tq
->tq_threads
= kmalloc(sizeof(struct thread
*) * count
,
440 M_GTASKQUEUE
, M_WAITOK
| M_ZERO
);
442 for (i
= 0; i
< count
; i
++) {
445 error
= lwkt_create(gtaskqueue_thread_loop
, tqp
,
446 &tq
->tq_threads
[i
], NULL
,
450 error
= lwkt_create(gtaskqueue_thread_loop
, tqp
,
451 &tq
->tq_threads
[i
], NULL
,
456 /* should be ok to continue, taskqueue_free will dtrt */
457 kprintf("%s: lwkt_create(%s): error %d",
458 __func__
, ktname
, error
);
459 tq
->tq_threads
[i
] = NULL
; /* paranoid */
463 for (i
= 0; i
< count
; i
++) {
464 if (tq
->tq_threads
[i
] == NULL
)
466 td
= tq
->tq_threads
[i
];
467 lwkt_setpri_initial(td
, pri
);
474 static int __printflike(4, 5)
475 gtaskqueue_start_threads(struct gtaskqueue
**tqp
, int count
, int pri
,
476 const char *name
, ...)
481 __va_start(ap
, name
);
482 error
= _gtaskqueue_start_threads(tqp
, count
, pri
, name
, ap
);
489 gtaskqueue_run_callback(struct gtaskqueue
*tq
,
490 enum taskqueue_callback_type cb_type
)
492 taskqueue_callback_fn tq_callback
;
494 TQ_ASSERT_UNLOCKED(tq
);
495 tq_callback
= tq
->tq_callbacks
[cb_type
];
496 if (tq_callback
!= NULL
)
497 tq_callback(tq
->tq_cb_contexts
[cb_type
]);
502 gtaskqueue_thread_loop(void *arg
)
504 struct gtaskqueue
**tqp
, *tq
;
509 gtaskqueue_run_callback(tq
, TASKQUEUE_CALLBACK_TYPE_INIT
);
512 while ((tq
->tq_flags
& TQ_FLAGS_ACTIVE
) != 0) {
514 gtaskqueue_run_locked(tq
);
516 * Because taskqueue_run() can drop tq_mutex, we need to
517 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
518 * meantime, which means we missed a wakeup.
520 if ((tq
->tq_flags
& TQ_FLAGS_ACTIVE
) == 0)
522 TQ_SLEEP(tq
, tq
, "-");
524 gtaskqueue_run_locked(tq
);
526 * This thread is on its way out, so just drop the lock temporarily
527 * in order to call the shutdown callback. This allows the callback
528 * to look at the taskqueue, even just before it dies.
532 gtaskqueue_run_callback(tq
, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN
);
536 /* rendezvous with thread that asked us to terminate */
538 wakeup_one(tq
->tq_threads
);
544 gtaskqueue_thread_enqueue(void *context
)
546 struct gtaskqueue
**tqp
, *tq
;
554 * NOTE: FreeBSD uses MTX_SPIN locks, which doesn't make a whole lot
555 * of sense (over-use of spin-locks in general). In DFly we
556 * want to use blockable locks for almost everything.
558 static struct gtaskqueue
*
559 gtaskqueue_create_fast(const char *name
, int mflags
,
560 taskqueue_enqueue_fn enqueue
, void *context
)
562 return _gtaskqueue_create(name
, mflags
, enqueue
, context
,
563 0, "fast_taskqueue");
566 struct taskqgroup_cpu
{
567 LIST_HEAD(, grouptask
) tgc_tasks
;
568 struct gtaskqueue
*tgc_taskq
;
574 struct taskqgroup_cpu tqg_queue
[MAXCPU
];
575 struct lock tqg_lock
;
576 const char * tqg_name
;
580 struct taskq_bind_task
{
581 struct gtask bt_task
;
586 taskqgroup_cpu_create(struct taskqgroup
*qgroup
, int idx
, int cpu
)
588 struct taskqgroup_cpu
*qcpu
;
590 qcpu
= &qgroup
->tqg_queue
[idx
];
591 LIST_INIT(&qcpu
->tgc_tasks
);
592 qcpu
->tgc_taskq
= gtaskqueue_create_fast(NULL
, M_WAITOK
,
593 gtaskqueue_thread_enqueue
,
595 gtaskqueue_start_threads(&qcpu
->tgc_taskq
, 1, TDPRI_KERN_DAEMON
,
596 "%s_%d", qgroup
->tqg_name
, idx
);
601 * Find the taskq with least # of tasks that doesn't currently have any
602 * other queues from the uniq identifier.
605 taskqgroup_find(struct taskqgroup
*qgroup
, void *uniq
)
611 KKASSERT(lockstatus(&qgroup
->tqg_lock
, NULL
) != 0);
612 KASSERT(qgroup
->tqg_cnt
!= 0,
613 ("qgroup %s has no queues", qgroup
->tqg_name
));
616 * Two passes: first scan for a queue with the least tasks that
617 * does not already service this uniq id. If that fails simply find
618 * the queue with the least total tasks.
620 for (idx
= -1, mincnt
= INT_MAX
, strict
= 1; mincnt
== INT_MAX
;
622 for (i
= 0; i
< qgroup
->tqg_cnt
; i
++) {
623 if (qgroup
->tqg_queue
[i
].tgc_cnt
> mincnt
)
626 LIST_FOREACH(n
, &qgroup
->tqg_queue
[i
].tgc_tasks
,
628 if (n
->gt_uniq
== uniq
)
633 mincnt
= qgroup
->tqg_queue
[i
].tgc_cnt
;
638 panic("%s: failed to pick a qid.", __func__
);
644 taskqgroup_attach(struct taskqgroup
*qgroup
, struct grouptask
*gtask
,
645 void *uniq
, device_t dev
, struct resource
*irq
, const char *name
)
649 KASSERT(qgroup
->tqg_cnt
> 0,
650 ("qgroup %s has no queues", qgroup
->tqg_name
));
652 gtask
->gt_uniq
= uniq
;
653 ksnprintf(gtask
->gt_name
, GROUPTASK_NAMELEN
, "%s", name
? name
: "grouptask");
657 lockmgr(&qgroup
->tqg_lock
, LK_EXCLUSIVE
);
658 qid
= taskqgroup_find(qgroup
, uniq
);
659 qgroup
->tqg_queue
[qid
].tgc_cnt
++;
660 LIST_INSERT_HEAD(&qgroup
->tqg_queue
[qid
].tgc_tasks
, gtask
, gt_list
);
661 gtask
->gt_taskqueue
= qgroup
->tqg_queue
[qid
].tgc_taskq
;
662 if (dev
!= NULL
&& irq
!= NULL
) {
663 cpu
= qgroup
->tqg_queue
[qid
].tgc_cpu
;
665 lockmgr(&qgroup
->tqg_lock
, LK_RELEASE
);
668 * XXX FreeBSD created a mess by separating out the cpu
669 * binding from bus_setup_intr(). Punt for now.
671 error
= bus_bind_intr(dev
, irq
, cpu
);
676 kprintf("%s: binding interrupt failed for %s: %d\n",
677 __func__
, gtask
->gt_name
, error
);
679 lockmgr(&qgroup
->tqg_lock
, LK_RELEASE
);
684 taskqgroup_attach_cpu(struct taskqgroup
*qgroup
, struct grouptask
*gtask
,
685 void *uniq
, int cpu
, device_t dev
, struct resource
*irq
, const char *name
)
689 gtask
->gt_uniq
= uniq
;
690 ksnprintf(gtask
->gt_name
, GROUPTASK_NAMELEN
, "%s", name
? name
: "grouptask");
694 lockmgr(&qgroup
->tqg_lock
, LK_EXCLUSIVE
);
695 for (i
= 0, qid
= -1; i
< qgroup
->tqg_cnt
; i
++) {
696 if (qgroup
->tqg_queue
[i
].tgc_cpu
== cpu
) {
702 lockmgr(&qgroup
->tqg_lock
, LK_RELEASE
);
703 kprintf("%s: qid not found for %s cpu=%d\n",
704 __func__
, gtask
->gt_name
, cpu
);
707 qgroup
->tqg_queue
[qid
].tgc_cnt
++;
708 LIST_INSERT_HEAD(&qgroup
->tqg_queue
[qid
].tgc_tasks
, gtask
, gt_list
);
709 gtask
->gt_taskqueue
= qgroup
->tqg_queue
[qid
].tgc_taskq
;
710 cpu
= qgroup
->tqg_queue
[qid
].tgc_cpu
;
711 lockmgr(&qgroup
->tqg_lock
, LK_RELEASE
);
713 if (dev
!= NULL
&& irq
!= NULL
) {
716 * XXX FreeBSD created a mess by separating out the cpu
717 * binding from bus_setup_intr(). Punt for now.
719 error
= bus_bind_intr(dev
, irq
, cpu
);
724 kprintf("%s: binding interrupt failed for %s: %d\n",
725 __func__
, gtask
->gt_name
, error
);
732 taskqgroup_detach(struct taskqgroup
*qgroup
, struct grouptask
*gtask
)
736 grouptask_block(gtask
);
737 lockmgr(&qgroup
->tqg_lock
, LK_EXCLUSIVE
);
738 for (i
= 0; i
< qgroup
->tqg_cnt
; i
++)
739 if (qgroup
->tqg_queue
[i
].tgc_taskq
== gtask
->gt_taskqueue
)
741 if (i
== qgroup
->tqg_cnt
)
742 panic("%s: task %s not in group", __func__
, gtask
->gt_name
);
743 qgroup
->tqg_queue
[i
].tgc_cnt
--;
744 LIST_REMOVE(gtask
, gt_list
);
745 lockmgr(&qgroup
->tqg_lock
, LK_RELEASE
);
746 gtask
->gt_taskqueue
= NULL
;
747 gtask
->gt_task
.ta_flags
&= ~TASK_NOENQUEUE
;
751 taskqgroup_binder(void *ctx
)
753 struct taskq_bind_task
*gtask
;
756 lwkt_migratecpu(gtask
->bt_cpuid
);
757 kfree(gtask
, M_DEVBUF
);
761 taskqgroup_bind(struct taskqgroup
*qgroup
)
763 struct taskq_bind_task
*gtask
;
767 * Bind taskqueue threads to specific CPUs, if they have been assigned
770 if (qgroup
->tqg_cnt
== 1)
773 for (i
= 0; i
< qgroup
->tqg_cnt
; i
++) {
774 gtask
= kmalloc(sizeof(*gtask
), M_DEVBUF
, M_WAITOK
);
775 GTASK_INIT(>ask
->bt_task
, 0, 0, taskqgroup_binder
, gtask
);
776 gtask
->bt_cpuid
= qgroup
->tqg_queue
[i
].tgc_cpu
;
777 grouptaskqueue_enqueue(qgroup
->tqg_queue
[i
].tgc_taskq
,
783 taskqgroup_create(const char *name
, int cnt
, int stride
)
785 struct taskqgroup
*qgroup
;
788 qgroup
= kmalloc(sizeof(*qgroup
), M_GTASKQUEUE
, M_WAITOK
| M_ZERO
);
789 lockinit(&qgroup
->tqg_lock
, "taskqgroup", 0, 0);
790 qgroup
->tqg_name
= name
;
791 qgroup
->tqg_cnt
= cnt
;
793 for (cpu
= i
= 0; i
< cnt
; i
++) {
794 taskqgroup_cpu_create(qgroup
, i
, cpu
);
795 for (j
= 0; j
< stride
; j
++)
796 cpu
= (cpu
+ 1) % ncpus
;
802 taskqgroup_destroy(struct taskqgroup
*qgroup
)
807 taskqgroup_drain_all(struct taskqgroup
*tqg
)
809 struct gtaskqueue
*q
;
811 for (int i
= 0; i
< ncpus
; i
++) {
812 q
= tqg
->tqg_queue
[i
].tgc_taskq
;
815 gtaskqueue_drain_all(q
);