2 * Copyright (c) 2015-2020 François Tigeot <ftigeot@wolfpond.org>
3 * Copyright (c) 2020 Matthew Dillon <dillon@backplane.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <linux/workqueue.h>
31 #include <sys/kthread.h>
34 Running behaviour, from kernel.org docs:
35 - While there are work items on the workqueue the worker executes the functions
36 associated with the work items one after the other.
37 - When there is no work item left on the workqueue the worker becomes idle.
39 There are two worker-pools,
40 one for normal work items
41 and the other for high priority ones, for each possible CPU
42 and some extra worker-pools to serve work items queued on unbound workqueues
43 - the number of these backing pools is dynamic.
46 /* XXX: Linux functions often enable/disable irqs on the CPU they run on
47 * this should be investigated */
49 struct workqueue_struct
*system_wq
;
50 struct workqueue_struct
*system_highpri_wq
;
51 struct workqueue_struct
*system_long_wq
;
52 struct workqueue_struct
*system_unbound_wq
;
53 struct workqueue_struct
*system_power_efficient_wq
;
56 * Linux now uses these worker pools:
58 * - (per cpu) regular high priority
60 * - ordered high priority
62 * - unbound high priority
66 process_all_work(struct workqueue_worker
*worker
)
68 struct work_struct
*work
;
71 while (STAILQ_FIRST(&worker
->ws_list_head
)) {
72 work
= STAILQ_FIRST(&worker
->ws_list_head
);
73 STAILQ_REMOVE_HEAD(&worker
->ws_list_head
, ws_entries
);
74 work
->on_queue
= false;
76 /* A work shouldn't be executed concurrently on a single cpu */
80 /* Do not run canceled works */
82 /* XXX: should we allow canceled works to be reenabled ? */
83 work
->canceled
= false;
88 lockmgr(&worker
->worker_lock
, LK_RELEASE
);
91 lockmgr(&worker
->worker_lock
, LK_EXCLUSIVE
);
92 if (work
->on_queue
== false)
94 didcan
= work
->canceled
;
96 work
->running
= false;
103 wq_worker_thread(void *arg
)
105 struct workqueue_worker
*worker
= arg
;
107 lockmgr(&worker
->worker_lock
, LK_EXCLUSIVE
);
109 process_all_work(worker
);
110 lksleep(worker
, &worker
->worker_lock
, 0, "wqidle", 0);
112 lockmgr(&worker
->worker_lock
, LK_RELEASE
);
116 * Return false if work was already on a queue
117 * Return true and queue it if this was not the case
120 queue_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
122 struct workqueue_worker
*worker
;
125 /* XXX: should we block instead ? */
129 if (wq
->num_workers
> 1)
130 worker
= &(*wq
->workers
)[mycpuid
];
132 worker
= &(*wq
->workers
)[0];
134 lockmgr(&worker
->worker_lock
, LK_EXCLUSIVE
);
135 work
->canceled
= false;
136 if (work
->on_queue
== false || work
->running
== false) {
137 if (work
->on_queue
== false) {
138 STAILQ_INSERT_TAIL(&worker
->ws_list_head
, work
,
140 work
->on_queue
= true;
141 work
->worker
= worker
;
146 lockmgr(&worker
->worker_lock
, LK_RELEASE
);
152 _delayed_work_fn(void *arg
)
154 struct delayed_work
*dw
= arg
;
156 queue_work(system_wq
, &dw
->work
);
160 queue_delayed_work(struct workqueue_struct
*wq
, struct delayed_work
*work
,
163 int pending
= work
->work
.on_queue
; // XXX: running too ?
165 callout_reset(&work
->timer
, delay
, _delayed_work_fn
, work
);
167 _delayed_work_fn((void *)work
);
174 init_workqueues(void *arg
)
176 system_wq
= alloc_workqueue("system_wq", 0, 1);
177 system_highpri_wq
= alloc_workqueue("system_highpri_wq", WQ_HIGHPRI
, 1);
178 system_long_wq
= alloc_workqueue("system_long_wq", 0, 1);
179 system_unbound_wq
= alloc_workqueue("system_unbound_wq", WQ_UNBOUND
, 1);
180 system_power_efficient_wq
= alloc_workqueue("system_power_efficient_wq", 0, 1);
185 static int destroy_workqueues(void *arg
)
187 destroy_workqueue(system_wq
);
188 destroy_workqueue(system_highpri_wq
);
189 destroy_workqueue(system_long_wq
);
190 destroy_workqueue(system_unbound_wq
);
191 destroy_workqueue(system_power_efficient_wq
);
196 struct workqueue_struct
*
197 _create_workqueue_common(const char *name
, int flags
)
199 struct workqueue_struct
*wq
;
202 wq
= kmalloc(sizeof(*wq
), M_DRM
, M_WAITOK
| M_ZERO
);
204 if (flags
& WQ_HIGHPRI
)
205 priority
= TDPRI_INT_SUPPORT
;
207 priority
= TDPRI_KERN_DAEMON
;
209 if (flags
& WQ_UNBOUND
) {
212 wq
->num_workers
= ncpus
;
214 wq
->workers
= kmalloc(sizeof(struct workqueue_worker
) * wq
->num_workers
,
215 M_DRM
, M_WAITOK
| M_ZERO
);
217 for (int i
= 0;i
< wq
->num_workers
; i
++) {
218 struct workqueue_worker
*worker
= &(*wq
->workers
)[i
];
220 lockinit(&worker
->worker_lock
, "lwq", 0, 0);
221 STAILQ_INIT(&worker
->ws_list_head
);
222 if (wq
->num_workers
> 1) {
223 error
= lwkt_create(wq_worker_thread
, worker
,
224 &worker
->worker_thread
, NULL
, TDF_NOSTART
, i
, "%s/%d", name
, i
);
226 error
= lwkt_create(wq_worker_thread
, worker
,
227 &worker
->worker_thread
, NULL
, TDF_NOSTART
, -1, name
);
230 kprintf("%s: lwkt_create(%s/%d): error %d",
231 __func__
, name
, i
, error
);
232 /* XXX: destroy kernel threads and free workers[] if applicable */
236 lwkt_setpri_initial(worker
->worker_thread
, priority
);
237 lwkt_schedule(worker
->worker_thread
);
244 destroy_workqueue(struct workqueue_struct
*wq
)
247 // wq->is_draining = true;
250 kfree(wq
->wq_threads
);
255 SYSINIT(linux_workqueue_init
, SI_SUB_DRIVERS
, SI_ORDER_MIDDLE
, init_workqueues
, NULL
);
256 SYSUNINIT(linux_workqueue_destroy
, SI_SUB_DRIVERS
, SI_ORDER_MIDDLE
, destroy_workqueues
, NULL
);
259 flush_delayed_work(struct delayed_work
*dwork
)
261 callout_drain(&dwork
->timer
);
262 return flush_work(&dwork
->work
);
265 /* Wait until the wq becomes empty */
267 drain_workqueue(struct workqueue_struct
*wq
)
269 struct workqueue_worker
*worker
;
271 wq
->is_draining
= true;
273 for (int i
=0;i
< wq
->num_workers
; i
++) {
274 worker
= &(*wq
->workers
)[i
];
276 lockmgr(&worker
->worker_lock
, LK_EXCLUSIVE
);
277 while (!STAILQ_EMPTY(&worker
->ws_list_head
)) {
278 /* XXX: introduces latency */
279 tsleep(&drain_workqueue
, 0, "wkdrain", 1);
281 lockmgr(&worker
->worker_lock
, LK_RELEASE
);
284 /* XXX: No more work will be queued. is that right ? */
285 // wq->is_draining = false;
289 work_pending(struct work_struct
*work
)
291 /* XXX: is on_queue the only constraint ? */
292 return work
->on_queue
;
296 work_busy(struct work_struct
*work
)
298 return (work
->on_queue
|| work
->running
);
302 __flush_work_func(struct work_struct
*work
)
307 /* XXX introduces latency ? */
309 flush_workqueue(struct workqueue_struct
*wq
)
311 struct work_struct __flush_work
;
313 INIT_WORK(&__flush_work
, __flush_work_func
);
315 queue_work(wq
, &__flush_work
);
316 while (__flush_work
.on_queue
|| __flush_work
.running
) {
317 tsleep(&__flush_work
, 0, "flshwq", 0);
322 * Wait until a work is done (has been executed)
323 * Return true if this function had to wait, and false otherwise
326 flush_work(struct work_struct
*work
)
330 /* XXX: probably unreliable */
331 while (work
->on_queue
|| work
->running
) {
333 /* XXX: use something more intelligent than tsleep() */
334 tsleep(&flush_work
, 0, "flshwrk", 1);
341 _cancel_work(struct work_struct
*work
, bool sync_wait
)
343 struct workqueue_worker
*worker
;
349 if (work
->on_queue
) {
350 worker
= work
->worker
;
353 lockmgr(&worker
->worker_lock
, LK_EXCLUSIVE
);
354 if (worker
!= work
->worker
|| work
->on_queue
== false) {
355 lockmgr(&worker
->worker_lock
, LK_RELEASE
);
358 STAILQ_REMOVE(&worker
->ws_list_head
, work
,
359 work_struct
, ws_entries
);
360 work
->on_queue
= false;
362 lockmgr(&worker
->worker_lock
, LK_RELEASE
);
364 if (work
->running
== false)
367 worker
= work
->worker
;
370 lockmgr(&worker
->worker_lock
, LK_EXCLUSIVE
);
371 if (worker
!= work
->worker
|| work
->running
== false) {
372 lockmgr(&worker
->worker_lock
, LK_RELEASE
);
375 work
->canceled
= true;
377 if (sync_wait
== false) {
378 lockmgr(&worker
->worker_lock
, LK_RELEASE
);
382 lksleep(work
, &worker
->worker_lock
, 0, "wqcan", 1);
383 lockmgr(&worker
->worker_lock
, LK_RELEASE
);
391 * If work was queued, remove it from the queue and return true.
392 * If work was not queued, return false.
393 * In any case, wait for work to complete or be removed from the workqueue,
394 * callers may free associated data structures after this call.
397 cancel_work_sync(struct work_struct
*work
)
399 return _cancel_work(work
, true);
402 /* Return false if work wasn't pending
403 * Return true if work was pending and canceled */
405 cancel_delayed_work(struct delayed_work
*dwork
)
407 struct work_struct
*work
= &dwork
->work
;
409 work
->canceled
= true;
410 callout_cancel(&dwork
->timer
);
412 return _cancel_work(work
, false);
416 cancel_delayed_work_sync(struct delayed_work
*dwork
)
418 struct work_struct
*work
= &dwork
->work
;
420 work
->canceled
= true;
421 callout_cancel(&dwork
->timer
);
423 return _cancel_work(work
, true);
427 delayed_work_pending(struct delayed_work
*dw
)
429 /* XXX: possibly wrong if the timer hasn't yet fired */
430 return work_pending(&dw
->work
);
434 destroy_work_on_stack(struct work_struct
*work
)
439 destroy_delayed_work_on_stack(struct delayed_work
*work
)