2 * Copyright (c) 2000 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/kern/subr_taskqueue.c,v 1.1.2.3 2003/09/10 00:40:39 ken Exp $
27 * $DragonFly: src/sys/kern/subr_taskqueue.c,v 1.13 2008/06/07 11:44:04 mneumann Exp $
30 #include <sys/param.h>
31 #include <sys/queue.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/taskqueue.h>
35 #include <sys/interrupt.h>
37 #include <sys/malloc.h>
38 #include <sys/kthread.h>
39 #include <sys/thread2.h>
41 MALLOC_DEFINE(M_TASKQUEUE
, "taskqueue", "Task Queues");
43 static STAILQ_HEAD(taskqueue_list
, taskqueue
) taskqueue_queues
;
46 STAILQ_ENTRY(taskqueue
) tq_link
;
47 STAILQ_HEAD(, task
) tq_queue
;
49 taskqueue_enqueue_fn tq_enqueue
;
55 taskqueue_create(const char *name
, int mflags
,
56 taskqueue_enqueue_fn enqueue
, void *context
)
58 struct taskqueue
*queue
;
61 queue
= kmalloc(sizeof(struct taskqueue
), M_TASKQUEUE
, mflags
);
64 STAILQ_INIT(&queue
->tq_queue
);
65 queue
->tq_name
= name
;
66 queue
->tq_enqueue
= enqueue
;
67 queue
->tq_context
= context
;
68 queue
->tq_draining
= 0;
72 STAILQ_INIT(&taskqueue_queues
);
75 STAILQ_INSERT_TAIL(&taskqueue_queues
, queue
, tq_link
);
82 taskqueue_free(struct taskqueue
*queue
)
85 queue
->tq_draining
= 1;
91 STAILQ_REMOVE(&taskqueue_queues
, queue
, taskqueue
, tq_link
);
94 kfree(queue
, M_TASKQUEUE
);
98 taskqueue_find(const char *name
)
100 struct taskqueue
*queue
;
103 STAILQ_FOREACH(queue
, &taskqueue_queues
, tq_link
) {
104 if (!strcmp(queue
->tq_name
, name
)) {
114 * NOTE! If using the per-cpu taskqueues ``taskqueue_thread[mycpuid]'',
115 * be sure NOT TO SHARE the ``task'' between CPUs. TASKS ARE NOT LOCKED.
116 * So either use a throwaway task which will only be enqueued once, or
117 * use one task per CPU!
120 taskqueue_enqueue(struct taskqueue
*queue
, struct task
*task
)
128 * Don't allow new tasks on a queue which is being freed.
130 if (queue
->tq_draining
) {
136 * Count multiple enqueues.
138 if (task
->ta_pending
) {
145 * Optimise the case when all tasks have the same priority.
147 prev
= STAILQ_LAST(&queue
->tq_queue
, task
, ta_link
);
148 if (!prev
|| prev
->ta_priority
>= task
->ta_priority
) {
149 STAILQ_INSERT_TAIL(&queue
->tq_queue
, task
, ta_link
);
152 for (ins
= STAILQ_FIRST(&queue
->tq_queue
); ins
;
153 prev
= ins
, ins
= STAILQ_NEXT(ins
, ta_link
))
154 if (ins
->ta_priority
< task
->ta_priority
)
158 STAILQ_INSERT_AFTER(&queue
->tq_queue
, prev
, task
, ta_link
);
160 STAILQ_INSERT_HEAD(&queue
->tq_queue
, task
, ta_link
);
163 task
->ta_pending
= 1;
164 if (queue
->tq_enqueue
)
165 queue
->tq_enqueue(queue
->tq_context
);
173 taskqueue_run(struct taskqueue
*queue
)
179 while (STAILQ_FIRST(&queue
->tq_queue
)) {
181 * Carefully remove the first task from the queue and
182 * zero its pending count.
184 task
= STAILQ_FIRST(&queue
->tq_queue
);
185 STAILQ_REMOVE_HEAD(&queue
->tq_queue
, ta_link
);
186 pending
= task
->ta_pending
;
187 task
->ta_pending
= 0;
190 task
->ta_func(task
->ta_context
, pending
);
198 taskqueue_swi_enqueue(void *context
)
204 taskqueue_swi_run(void *arg
, void *frame
)
206 taskqueue_run(taskqueue_swi
);
209 TASKQUEUE_DEFINE(swi
, taskqueue_swi_enqueue
, 0,
210 register_swi(SWI_TQ
, taskqueue_swi_run
, NULL
, "swi_taskq", NULL
));
213 taskqueue_kthread(void *arg
)
216 taskqueue_run(taskqueue_thread
[mycpuid
]);
218 if (STAILQ_EMPTY(&taskqueue_thread
[mycpuid
]->tq_queue
))
219 tsleep(taskqueue_thread
[mycpuid
], 0, "tqthr", 0);
225 taskqueue_thread_enqueue(void *context
)
227 wakeup(taskqueue_thread
[mycpuid
]);
230 struct taskqueue
*taskqueue_thread
[MAXCPU
];
231 static struct thread
*taskqueue_thread_td
[MAXCPU
];
238 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
239 taskqueue_thread
[cpu
] = taskqueue_create("thread", M_INTWAIT
,
240 taskqueue_thread_enqueue
, NULL
);
241 lwkt_create(taskqueue_kthread
, NULL
,
242 &taskqueue_thread_td
[cpu
], NULL
,
243 0, cpu
, "taskqueue %d", cpu
);
247 SYSINIT(taskqueueinit
, SI_SUB_CONFIGURE
, SI_ORDER_SECOND
, taskqueue_init
, NULL
);