acpi: Narrow workaround for broken interrupt settings
[dragonfly.git] / sys / dev / drm / linux_workqueue.c
blobef23865f1960a8e9bcc90a76004be836241a1755
1 /*
2 * Copyright (c) 2015-2020 François Tigeot <ftigeot@wolfpond.org>
3 * Copyright (c) 2020 Matthew Dillon <dillon@backplane.com>
4 * All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include <drm/drmP.h>
29 #include <linux/workqueue.h>
31 #include <sys/kthread.h>
34 Running behaviour, from kernel.org docs:
35 - While there are work items on the workqueue the worker executes the functions
36 associated with the work items one after the other.
37 - When there is no work item left on the workqueue the worker becomes idle.
39 There are two worker-pools,
40 one for normal work items
41 and the other for high priority ones, for each possible CPU
42 and some extra worker-pools to serve work items queued on unbound workqueues
43 - the number of these backing pools is dynamic.
46 /* XXX: Linux functions often enable/disable irqs on the CPU they run on
47 * this should be investigated */
49 struct workqueue_struct *system_wq;
50 struct workqueue_struct *system_highpri_wq;
51 struct workqueue_struct *system_long_wq;
52 struct workqueue_struct *system_unbound_wq;
53 struct workqueue_struct *system_power_efficient_wq;
56 * Linux now uses these worker pools:
57 * - (per cpu) regular
58 * - (per cpu) regular high priority
59 * - ordered
60 * - ordered high priority
61 * - unbound
62 * - unbound high priority
65 static inline void
66 process_all_work(struct workqueue_worker *worker)
68 struct work_struct *work;
69 bool didcan;
71 while (STAILQ_FIRST(&worker->ws_list_head)) {
72 work = STAILQ_FIRST(&worker->ws_list_head);
73 STAILQ_REMOVE_HEAD(&worker->ws_list_head, ws_entries);
74 work->on_queue = false;
76 /* A work shouldn't be executed concurrently on a single cpu */
77 if (work->running)
78 continue;
80 /* Do not run canceled works */
81 if (work->canceled) {
82 /* XXX: should we allow canceled works to be reenabled ? */
83 work->canceled = false;
84 continue;
87 work->running = true;
88 lockmgr(&worker->worker_lock, LK_RELEASE);
89 work->func(work);
90 lwkt_yield();
91 lockmgr(&worker->worker_lock, LK_EXCLUSIVE);
92 if (work->on_queue == false)
93 work->worker = NULL;
94 didcan = work->canceled;
95 cpu_sfence();
96 work->running = false;
97 if (didcan == true)
98 wakeup(work);
102 static void
103 wq_worker_thread(void *arg)
105 struct workqueue_worker *worker = arg;
107 lockmgr(&worker->worker_lock, LK_EXCLUSIVE);
108 while (1) {
109 process_all_work(worker);
110 lksleep(worker, &worker->worker_lock, 0, "wqidle", 0);
112 lockmgr(&worker->worker_lock, LK_RELEASE);
116 * Return false if work was already on a queue
117 * Return true and queue it if this was not the case
120 queue_work(struct workqueue_struct *wq, struct work_struct *work)
122 struct workqueue_worker *worker;
123 int ret = false;
125 /* XXX: should we block instead ? */
126 if (wq->is_draining)
127 return false;
129 if (wq->num_workers > 1)
130 worker = &(*wq->workers)[mycpuid];
131 else
132 worker = &(*wq->workers)[0];
134 lockmgr(&worker->worker_lock, LK_EXCLUSIVE);
135 work->canceled = false;
136 if (work->on_queue == false || work->running == false) {
137 if (work->on_queue == false) {
138 STAILQ_INSERT_TAIL(&worker->ws_list_head, work,
139 ws_entries);
140 work->on_queue = true;
141 work->worker = worker;
142 wakeup_one(worker);
144 ret = true;
146 lockmgr(&worker->worker_lock, LK_RELEASE);
148 return ret;
151 static inline void
152 _delayed_work_fn(void *arg)
154 struct delayed_work *dw = arg;
156 queue_work(system_wq, &dw->work);
160 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work,
161 unsigned long delay)
163 int pending = work->work.on_queue; // XXX: running too ?
164 if (delay != 0) {
165 callout_reset(&work->timer, delay, _delayed_work_fn, work);
166 } else {
167 _delayed_work_fn((void *)work);
170 return (!pending);
173 static int
174 init_workqueues(void *arg)
176 system_wq = alloc_workqueue("system_wq", 0, 1);
177 system_highpri_wq = alloc_workqueue("system_highpri_wq", WQ_HIGHPRI, 1);
178 system_long_wq = alloc_workqueue("system_long_wq", 0, 1);
179 system_unbound_wq = alloc_workqueue("system_unbound_wq", WQ_UNBOUND, 1);
180 system_power_efficient_wq = alloc_workqueue("system_power_efficient_wq", 0, 1);
182 return 0;
185 static int destroy_workqueues(void *arg)
187 destroy_workqueue(system_wq);
188 destroy_workqueue(system_highpri_wq);
189 destroy_workqueue(system_long_wq);
190 destroy_workqueue(system_unbound_wq);
191 destroy_workqueue(system_power_efficient_wq);
193 return 0;
196 struct workqueue_struct *
197 _create_workqueue_common(const char *name, int flags)
199 struct workqueue_struct *wq;
200 int priority, error;
202 wq = kmalloc(sizeof(*wq), M_DRM, M_WAITOK | M_ZERO);
204 if (flags & WQ_HIGHPRI)
205 priority = TDPRI_INT_SUPPORT;
206 else
207 priority = TDPRI_KERN_DAEMON;
209 if (flags & WQ_UNBOUND) {
210 wq->num_workers = 1;
211 } else {
212 wq->num_workers = ncpus;
214 wq->workers = kmalloc(sizeof(struct workqueue_worker) * wq->num_workers,
215 M_DRM, M_WAITOK | M_ZERO);
217 for (int i = 0;i < wq->num_workers; i++) {
218 struct workqueue_worker *worker = &(*wq->workers)[i];
220 lockinit(&worker->worker_lock, "lwq", 0, 0);
221 STAILQ_INIT(&worker->ws_list_head);
222 if (wq->num_workers > 1) {
223 error = lwkt_create(wq_worker_thread, worker,
224 &worker->worker_thread, NULL, TDF_NOSTART, i, "%s/%d", name, i);
225 } else {
226 error = lwkt_create(wq_worker_thread, worker,
227 &worker->worker_thread, NULL, TDF_NOSTART, -1, name);
229 if (error) {
230 kprintf("%s: lwkt_create(%s/%d): error %d",
231 __func__, name, i, error);
232 /* XXX: destroy kernel threads and free workers[] if applicable */
233 kfree(wq);
234 return NULL;
236 lwkt_setpri_initial(worker->worker_thread, priority);
237 lwkt_schedule(worker->worker_thread);
240 return wq;
243 void
244 destroy_workqueue(struct workqueue_struct *wq)
246 drain_workqueue(wq);
247 // wq->is_draining = true;
248 #if 0 /* XXX TODO */
249 kill_all_threads;
250 kfree(wq->wq_threads);
251 kfree(wq);
252 #endif
255 SYSINIT(linux_workqueue_init, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, init_workqueues, NULL);
256 SYSUNINIT(linux_workqueue_destroy, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, destroy_workqueues, NULL);
258 bool
259 flush_delayed_work(struct delayed_work *dwork)
261 callout_drain(&dwork->timer);
262 return flush_work(&dwork->work);
265 /* Wait until the wq becomes empty */
266 void
267 drain_workqueue(struct workqueue_struct *wq)
269 struct workqueue_worker *worker;
271 wq->is_draining = true;
273 for (int i=0;i < wq->num_workers; i++) {
274 worker = &(*wq->workers)[i];
276 lockmgr(&worker->worker_lock, LK_EXCLUSIVE);
277 while (!STAILQ_EMPTY(&worker->ws_list_head)) {
278 /* XXX: introduces latency */
279 tsleep(&drain_workqueue, 0, "wkdrain", 1);
281 lockmgr(&worker->worker_lock, LK_RELEASE);
284 /* XXX: No more work will be queued. is that right ? */
285 // wq->is_draining = false;
288 bool
289 work_pending(struct work_struct *work)
291 /* XXX: is on_queue the only constraint ? */
292 return work->on_queue;
295 unsigned int
296 work_busy(struct work_struct *work)
298 return (work->on_queue || work->running);
301 static inline void
302 __flush_work_func(struct work_struct *work)
304 wakeup_one(work);
307 /* XXX introduces latency ? */
308 void
309 flush_workqueue(struct workqueue_struct *wq)
311 struct work_struct __flush_work;
313 INIT_WORK(&__flush_work, __flush_work_func);
315 queue_work(wq, &__flush_work);
316 while (__flush_work.on_queue || __flush_work.running) {
317 tsleep(&__flush_work, 0, "flshwq", 0);
322 * Wait until a work is done (has been executed)
323 * Return true if this function had to wait, and false otherwise
325 bool
326 flush_work(struct work_struct *work)
328 int ret = false;
330 /* XXX: probably unreliable */
331 while (work->on_queue || work->running) {
332 ret = true;
333 /* XXX: use something more intelligent than tsleep() */
334 tsleep(&flush_work, 0, "flshwrk", 1);
337 return ret;
340 static inline bool
341 _cancel_work(struct work_struct *work, bool sync_wait)
343 struct workqueue_worker *worker;
344 bool ret;
346 ret = false;
348 for (;;) {
349 if (work->on_queue) {
350 worker = work->worker;
351 if (worker == NULL)
352 continue;
353 lockmgr(&worker->worker_lock, LK_EXCLUSIVE);
354 if (worker != work->worker || work->on_queue == false) {
355 lockmgr(&worker->worker_lock, LK_RELEASE);
356 continue;
358 STAILQ_REMOVE(&worker->ws_list_head, work,
359 work_struct, ws_entries);
360 work->on_queue = false;
361 ret = true;
362 lockmgr(&worker->worker_lock, LK_RELEASE);
364 if (work->running == false)
365 break;
367 worker = work->worker;
368 if (worker == NULL)
369 continue;
370 lockmgr(&worker->worker_lock, LK_EXCLUSIVE);
371 if (worker != work->worker || work->running == false) {
372 lockmgr(&worker->worker_lock, LK_RELEASE);
373 continue;
375 work->canceled = true;
376 ret = true;
377 if (sync_wait == false) {
378 lockmgr(&worker->worker_lock, LK_RELEASE);
379 break;
381 /* XXX this races */
382 lksleep(work, &worker->worker_lock, 0, "wqcan", 1);
383 lockmgr(&worker->worker_lock, LK_RELEASE);
384 /* retest */
387 return ret;
391 * If work was queued, remove it from the queue and return true.
392 * If work was not queued, return false.
393 * In any case, wait for work to complete or be removed from the workqueue,
394 * callers may free associated data structures after this call.
396 bool
397 cancel_work_sync(struct work_struct *work)
399 return _cancel_work(work, true);
402 /* Return false if work wasn't pending
403 * Return true if work was pending and canceled */
404 bool
405 cancel_delayed_work(struct delayed_work *dwork)
407 struct work_struct *work = &dwork->work;
409 work->canceled = true;
410 callout_cancel(&dwork->timer);
412 return _cancel_work(work, false);
415 bool
416 cancel_delayed_work_sync(struct delayed_work *dwork)
418 struct work_struct *work = &dwork->work;
420 work->canceled = true;
421 callout_cancel(&dwork->timer);
423 return _cancel_work(work, true);
426 bool
427 delayed_work_pending(struct delayed_work *dw)
429 /* XXX: possibly wrong if the timer hasn't yet fired */
430 return work_pending(&dw->work);
433 void
434 destroy_work_on_stack(struct work_struct *work)
438 void
439 destroy_delayed_work_on_stack(struct delayed_work *work)