sound: oxygen: allow custom MCLK rates
[firewire-audio.git] / kernel / slow-work.c
blob0d31135efbf4cab0b98babcaa05355eaf5130dfa
1 /* Worker thread pool for slow items, such as filesystem lookups or mkdirs
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
11 * See Documentation/slow-work.txt
14 #include <linux/module.h>
15 #include <linux/slow-work.h>
16 #include <linux/kthread.h>
17 #include <linux/freezer.h>
18 #include <linux/wait.h>
20 #define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
21 * things to do */
22 #define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
23 * OOM */
25 static void slow_work_cull_timeout(unsigned long);
26 static void slow_work_oom_timeout(unsigned long);
28 #ifdef CONFIG_SYSCTL
29 static int slow_work_min_threads_sysctl(struct ctl_table *, int,
30 void __user *, size_t *, loff_t *);
32 static int slow_work_max_threads_sysctl(struct ctl_table *, int ,
33 void __user *, size_t *, loff_t *);
34 #endif
37 * The pool of threads has at least min threads in it as long as someone is
38 * using the facility, and may have as many as max.
40 * A portion of the pool may be processing very slow operations.
42 static unsigned slow_work_min_threads = 2;
43 static unsigned slow_work_max_threads = 4;
44 static unsigned vslow_work_proportion = 50; /* % of threads that may process
45 * very slow work */
47 #ifdef CONFIG_SYSCTL
48 static const int slow_work_min_min_threads = 2;
49 static int slow_work_max_max_threads = 255;
50 static const int slow_work_min_vslow = 1;
51 static const int slow_work_max_vslow = 99;
53 ctl_table slow_work_sysctls[] = {
55 .ctl_name = CTL_UNNUMBERED,
56 .procname = "min-threads",
57 .data = &slow_work_min_threads,
58 .maxlen = sizeof(unsigned),
59 .mode = 0644,
60 .proc_handler = slow_work_min_threads_sysctl,
61 .extra1 = (void *) &slow_work_min_min_threads,
62 .extra2 = &slow_work_max_threads,
65 .ctl_name = CTL_UNNUMBERED,
66 .procname = "max-threads",
67 .data = &slow_work_max_threads,
68 .maxlen = sizeof(unsigned),
69 .mode = 0644,
70 .proc_handler = slow_work_max_threads_sysctl,
71 .extra1 = &slow_work_min_threads,
72 .extra2 = (void *) &slow_work_max_max_threads,
75 .ctl_name = CTL_UNNUMBERED,
76 .procname = "vslow-percentage",
77 .data = &vslow_work_proportion,
78 .maxlen = sizeof(unsigned),
79 .mode = 0644,
80 .proc_handler = &proc_dointvec_minmax,
81 .extra1 = (void *) &slow_work_min_vslow,
82 .extra2 = (void *) &slow_work_max_vslow,
84 { .ctl_name = 0 }
86 #endif
89 * The active state of the thread pool
91 static atomic_t slow_work_thread_count;
92 static atomic_t vslow_work_executing_count;
94 static bool slow_work_may_not_start_new_thread;
95 static bool slow_work_cull; /* cull a thread due to lack of activity */
96 static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0);
97 static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
98 static struct slow_work slow_work_new_thread; /* new thread starter */
101 * The queues of work items and the lock governing access to them. These are
102 * shared between all the CPUs. It doesn't make sense to have per-CPU queues
103 * as the number of threads bears no relation to the number of CPUs.
105 * There are two queues of work items: one for slow work items, and one for
106 * very slow work items.
108 static LIST_HEAD(slow_work_queue);
109 static LIST_HEAD(vslow_work_queue);
110 static DEFINE_SPINLOCK(slow_work_queue_lock);
113 * The thread controls. A variable used to signal to the threads that they
114 * should exit when the queue is empty, a waitqueue used by the threads to wait
115 * for signals, and a completion set by the last thread to exit.
117 static bool slow_work_threads_should_exit;
118 static DECLARE_WAIT_QUEUE_HEAD(slow_work_thread_wq);
119 static DECLARE_COMPLETION(slow_work_last_thread_exited);
122 * The number of users of the thread pool and its lock. Whilst this is zero we
123 * have no threads hanging around, and when this reaches zero, we wait for all
124 * active or queued work items to complete and kill all the threads we do have.
126 static int slow_work_user_count;
127 static DEFINE_MUTEX(slow_work_user_lock);
130 * Calculate the maximum number of active threads in the pool that are
131 * permitted to process very slow work items.
133 * The answer is rounded up to at least 1, but may not equal or exceed the
134 * maximum number of the threads in the pool. This means we always have at
135 * least one thread that can process slow work items, and we always have at
136 * least one thread that won't get tied up doing so.
138 static unsigned slow_work_calc_vsmax(void)
140 unsigned vsmax;
142 vsmax = atomic_read(&slow_work_thread_count) * vslow_work_proportion;
143 vsmax /= 100;
144 vsmax = max(vsmax, 1U);
145 return min(vsmax, slow_work_max_threads - 1);
149 * Attempt to execute stuff queued on a slow thread. Return true if we managed
150 * it, false if there was nothing to do.
152 static bool slow_work_execute(void)
154 struct slow_work *work = NULL;
155 unsigned vsmax;
156 bool very_slow;
158 vsmax = slow_work_calc_vsmax();
160 /* see if we can schedule a new thread to be started if we're not
161 * keeping up with the work */
162 if (!waitqueue_active(&slow_work_thread_wq) &&
163 (!list_empty(&slow_work_queue) || !list_empty(&vslow_work_queue)) &&
164 atomic_read(&slow_work_thread_count) < slow_work_max_threads &&
165 !slow_work_may_not_start_new_thread)
166 slow_work_enqueue(&slow_work_new_thread);
168 /* find something to execute */
169 spin_lock_irq(&slow_work_queue_lock);
170 if (!list_empty(&vslow_work_queue) &&
171 atomic_read(&vslow_work_executing_count) < vsmax) {
172 work = list_entry(vslow_work_queue.next,
173 struct slow_work, link);
174 if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags))
175 BUG();
176 list_del_init(&work->link);
177 atomic_inc(&vslow_work_executing_count);
178 very_slow = true;
179 } else if (!list_empty(&slow_work_queue)) {
180 work = list_entry(slow_work_queue.next,
181 struct slow_work, link);
182 if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags))
183 BUG();
184 list_del_init(&work->link);
185 very_slow = false;
186 } else {
187 very_slow = false; /* avoid the compiler warning */
189 spin_unlock_irq(&slow_work_queue_lock);
191 if (!work)
192 return false;
194 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
195 BUG();
197 work->ops->execute(work);
199 if (very_slow)
200 atomic_dec(&vslow_work_executing_count);
201 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);
203 /* if someone tried to enqueue the item whilst we were executing it,
204 * then it'll be left unenqueued to avoid multiple threads trying to
205 * execute it simultaneously
207 * there is, however, a race between us testing the pending flag and
208 * getting the spinlock, and between the enqueuer setting the pending
209 * flag and getting the spinlock, so we use a deferral bit to tell us
210 * if the enqueuer got there first
212 if (test_bit(SLOW_WORK_PENDING, &work->flags)) {
213 spin_lock_irq(&slow_work_queue_lock);
215 if (!test_bit(SLOW_WORK_EXECUTING, &work->flags) &&
216 test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags))
217 goto auto_requeue;
219 spin_unlock_irq(&slow_work_queue_lock);
222 work->ops->put_ref(work);
223 return true;
225 auto_requeue:
226 /* we must complete the enqueue operation
227 * - we transfer our ref on the item back to the appropriate queue
228 * - don't wake another thread up as we're awake already
230 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
231 list_add_tail(&work->link, &vslow_work_queue);
232 else
233 list_add_tail(&work->link, &slow_work_queue);
234 spin_unlock_irq(&slow_work_queue_lock);
235 return true;
239 * slow_work_enqueue - Schedule a slow work item for processing
240 * @work: The work item to queue
242 * Schedule a slow work item for processing. If the item is already undergoing
243 * execution, this guarantees not to re-enter the execution routine until the
244 * first execution finishes.
246 * The item is pinned by this function as it retains a reference to it, managed
247 * through the item operations. The item is unpinned once it has been
248 * executed.
250 * An item may hog the thread that is running it for a relatively large amount
251 * of time, sufficient, for example, to perform several lookup, mkdir, create
252 * and setxattr operations. It may sleep on I/O and may sleep to obtain locks.
254 * Conversely, if a number of items are awaiting processing, it may take some
255 * time before any given item is given attention. The number of threads in the
256 * pool may be increased to deal with demand, but only up to a limit.
258 * If SLOW_WORK_VERY_SLOW is set on the work item, then it will be placed in
259 * the very slow queue, from which only a portion of the threads will be
260 * allowed to pick items to execute. This ensures that very slow items won't
261 * overly block ones that are just ordinarily slow.
263 * Returns 0 if successful, -EAGAIN if not.
265 int slow_work_enqueue(struct slow_work *work)
267 unsigned long flags;
269 BUG_ON(slow_work_user_count <= 0);
270 BUG_ON(!work);
271 BUG_ON(!work->ops);
272 BUG_ON(!work->ops->get_ref);
274 /* when honouring an enqueue request, we only promise that we will run
275 * the work function in the future; we do not promise to run it once
276 * per enqueue request
278 * we use the PENDING bit to merge together repeat requests without
279 * having to disable IRQs and take the spinlock, whilst still
280 * maintaining our promise
282 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
283 spin_lock_irqsave(&slow_work_queue_lock, flags);
285 /* we promise that we will not attempt to execute the work
286 * function in more than one thread simultaneously
288 * this, however, leaves us with a problem if we're asked to
289 * enqueue the work whilst someone is executing the work
290 * function as simply queueing the work immediately means that
291 * another thread may try executing it whilst it is already
292 * under execution
294 * to deal with this, we set the ENQ_DEFERRED bit instead of
295 * enqueueing, and the thread currently executing the work
296 * function will enqueue the work item when the work function
297 * returns and it has cleared the EXECUTING bit
299 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
300 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
301 } else {
302 if (work->ops->get_ref(work) < 0)
303 goto cant_get_ref;
304 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
305 list_add_tail(&work->link, &vslow_work_queue);
306 else
307 list_add_tail(&work->link, &slow_work_queue);
308 wake_up(&slow_work_thread_wq);
311 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
313 return 0;
315 cant_get_ref:
316 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
317 return -EAGAIN;
319 EXPORT_SYMBOL(slow_work_enqueue);
322 * Schedule a cull of the thread pool at some time in the near future
324 static void slow_work_schedule_cull(void)
326 mod_timer(&slow_work_cull_timer,
327 round_jiffies(jiffies + SLOW_WORK_CULL_TIMEOUT));
331 * Worker thread culling algorithm
333 static bool slow_work_cull_thread(void)
335 unsigned long flags;
336 bool do_cull = false;
338 spin_lock_irqsave(&slow_work_queue_lock, flags);
340 if (slow_work_cull) {
341 slow_work_cull = false;
343 if (list_empty(&slow_work_queue) &&
344 list_empty(&vslow_work_queue) &&
345 atomic_read(&slow_work_thread_count) >
346 slow_work_min_threads) {
347 slow_work_schedule_cull();
348 do_cull = true;
352 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
353 return do_cull;
357 * Determine if there is slow work available for dispatch
359 static inline bool slow_work_available(int vsmax)
361 return !list_empty(&slow_work_queue) ||
362 (!list_empty(&vslow_work_queue) &&
363 atomic_read(&vslow_work_executing_count) < vsmax);
367 * Worker thread dispatcher
369 static int slow_work_thread(void *_data)
371 int vsmax;
373 DEFINE_WAIT(wait);
375 set_freezable();
376 set_user_nice(current, -5);
378 for (;;) {
379 vsmax = vslow_work_proportion;
380 vsmax *= atomic_read(&slow_work_thread_count);
381 vsmax /= 100;
383 prepare_to_wait_exclusive(&slow_work_thread_wq, &wait,
384 TASK_INTERRUPTIBLE);
385 if (!freezing(current) &&
386 !slow_work_threads_should_exit &&
387 !slow_work_available(vsmax) &&
388 !slow_work_cull)
389 schedule();
390 finish_wait(&slow_work_thread_wq, &wait);
392 try_to_freeze();
394 vsmax = vslow_work_proportion;
395 vsmax *= atomic_read(&slow_work_thread_count);
396 vsmax /= 100;
398 if (slow_work_available(vsmax) && slow_work_execute()) {
399 cond_resched();
400 if (list_empty(&slow_work_queue) &&
401 list_empty(&vslow_work_queue) &&
402 atomic_read(&slow_work_thread_count) >
403 slow_work_min_threads)
404 slow_work_schedule_cull();
405 continue;
408 if (slow_work_threads_should_exit)
409 break;
411 if (slow_work_cull && slow_work_cull_thread())
412 break;
415 if (atomic_dec_and_test(&slow_work_thread_count))
416 complete_and_exit(&slow_work_last_thread_exited, 0);
417 return 0;
421 * Handle thread cull timer expiration
423 static void slow_work_cull_timeout(unsigned long data)
425 slow_work_cull = true;
426 wake_up(&slow_work_thread_wq);
430 * Get a reference on slow work thread starter
432 static int slow_work_new_thread_get_ref(struct slow_work *work)
434 return 0;
438 * Drop a reference on slow work thread starter
440 static void slow_work_new_thread_put_ref(struct slow_work *work)
445 * Start a new slow work thread
447 static void slow_work_new_thread_execute(struct slow_work *work)
449 struct task_struct *p;
451 if (slow_work_threads_should_exit)
452 return;
454 if (atomic_read(&slow_work_thread_count) >= slow_work_max_threads)
455 return;
457 if (!mutex_trylock(&slow_work_user_lock))
458 return;
460 slow_work_may_not_start_new_thread = true;
461 atomic_inc(&slow_work_thread_count);
462 p = kthread_run(slow_work_thread, NULL, "kslowd");
463 if (IS_ERR(p)) {
464 printk(KERN_DEBUG "Slow work thread pool: OOM\n");
465 if (atomic_dec_and_test(&slow_work_thread_count))
466 BUG(); /* we're running on a slow work thread... */
467 mod_timer(&slow_work_oom_timer,
468 round_jiffies(jiffies + SLOW_WORK_OOM_TIMEOUT));
469 } else {
470 /* ratelimit the starting of new threads */
471 mod_timer(&slow_work_oom_timer, jiffies + 1);
474 mutex_unlock(&slow_work_user_lock);
477 static const struct slow_work_ops slow_work_new_thread_ops = {
478 .get_ref = slow_work_new_thread_get_ref,
479 .put_ref = slow_work_new_thread_put_ref,
480 .execute = slow_work_new_thread_execute,
484 * post-OOM new thread start suppression expiration
486 static void slow_work_oom_timeout(unsigned long data)
488 slow_work_may_not_start_new_thread = false;
491 #ifdef CONFIG_SYSCTL
493 * Handle adjustment of the minimum number of threads
495 static int slow_work_min_threads_sysctl(struct ctl_table *table, int write,
496 void __user *buffer,
497 size_t *lenp, loff_t *ppos)
499 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
500 int n;
502 if (ret == 0) {
503 mutex_lock(&slow_work_user_lock);
504 if (slow_work_user_count > 0) {
505 /* see if we need to start or stop threads */
506 n = atomic_read(&slow_work_thread_count) -
507 slow_work_min_threads;
509 if (n < 0 && !slow_work_may_not_start_new_thread)
510 slow_work_enqueue(&slow_work_new_thread);
511 else if (n > 0)
512 slow_work_schedule_cull();
514 mutex_unlock(&slow_work_user_lock);
517 return ret;
521 * Handle adjustment of the maximum number of threads
523 static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
524 void __user *buffer,
525 size_t *lenp, loff_t *ppos)
527 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
528 int n;
530 if (ret == 0) {
531 mutex_lock(&slow_work_user_lock);
532 if (slow_work_user_count > 0) {
533 /* see if we need to stop threads */
534 n = slow_work_max_threads -
535 atomic_read(&slow_work_thread_count);
537 if (n < 0)
538 slow_work_schedule_cull();
540 mutex_unlock(&slow_work_user_lock);
543 return ret;
545 #endif /* CONFIG_SYSCTL */
548 * slow_work_register_user - Register a user of the facility
550 * Register a user of the facility, starting up the initial threads if there
551 * aren't any other users at this point. This will return 0 if successful, or
552 * an error if not.
554 int slow_work_register_user(void)
556 struct task_struct *p;
557 int loop;
559 mutex_lock(&slow_work_user_lock);
561 if (slow_work_user_count == 0) {
562 printk(KERN_NOTICE "Slow work thread pool: Starting up\n");
563 init_completion(&slow_work_last_thread_exited);
565 slow_work_threads_should_exit = false;
566 slow_work_init(&slow_work_new_thread,
567 &slow_work_new_thread_ops);
568 slow_work_may_not_start_new_thread = false;
569 slow_work_cull = false;
571 /* start the minimum number of threads */
572 for (loop = 0; loop < slow_work_min_threads; loop++) {
573 atomic_inc(&slow_work_thread_count);
574 p = kthread_run(slow_work_thread, NULL, "kslowd");
575 if (IS_ERR(p))
576 goto error;
578 printk(KERN_NOTICE "Slow work thread pool: Ready\n");
581 slow_work_user_count++;
582 mutex_unlock(&slow_work_user_lock);
583 return 0;
585 error:
586 if (atomic_dec_and_test(&slow_work_thread_count))
587 complete(&slow_work_last_thread_exited);
588 if (loop > 0) {
589 printk(KERN_ERR "Slow work thread pool:"
590 " Aborting startup on ENOMEM\n");
591 slow_work_threads_should_exit = true;
592 wake_up_all(&slow_work_thread_wq);
593 wait_for_completion(&slow_work_last_thread_exited);
594 printk(KERN_ERR "Slow work thread pool: Aborted\n");
596 mutex_unlock(&slow_work_user_lock);
597 return PTR_ERR(p);
599 EXPORT_SYMBOL(slow_work_register_user);
602 * slow_work_unregister_user - Unregister a user of the facility
604 * Unregister a user of the facility, killing all the threads if this was the
605 * last one.
607 void slow_work_unregister_user(void)
609 mutex_lock(&slow_work_user_lock);
611 BUG_ON(slow_work_user_count <= 0);
613 slow_work_user_count--;
614 if (slow_work_user_count == 0) {
615 printk(KERN_NOTICE "Slow work thread pool: Shutting down\n");
616 slow_work_threads_should_exit = true;
617 del_timer_sync(&slow_work_cull_timer);
618 del_timer_sync(&slow_work_oom_timer);
619 wake_up_all(&slow_work_thread_wq);
620 wait_for_completion(&slow_work_last_thread_exited);
621 printk(KERN_NOTICE "Slow work thread pool:"
622 " Shut down complete\n");
625 mutex_unlock(&slow_work_user_lock);
627 EXPORT_SYMBOL(slow_work_unregister_user);
630 * Initialise the slow work facility
632 static int __init init_slow_work(void)
634 unsigned nr_cpus = num_possible_cpus();
636 if (slow_work_max_threads < nr_cpus)
637 slow_work_max_threads = nr_cpus;
638 #ifdef CONFIG_SYSCTL
639 if (slow_work_max_max_threads < nr_cpus * 2)
640 slow_work_max_max_threads = nr_cpus * 2;
641 #endif
642 return 0;
645 subsys_initcall(init_slow_work);