2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kthread.h>
20 #include <linux/slab.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/freezer.h>
24 #include "async-thread.h"
26 #define WORK_QUEUED_BIT 0
27 #define WORK_DONE_BIT 1
28 #define WORK_ORDER_DONE_BIT 2
29 #define WORK_HIGH_PRIO_BIT 3
32 * container for the kthread task pointer and the list of pending work
33 * One of these is allocated per thread.
35 struct btrfs_worker_thread
{
36 /* pool we belong to */
37 struct btrfs_workers
*workers
;
39 /* list of struct btrfs_work that are waiting for service */
40 struct list_head pending
;
41 struct list_head prio_pending
;
43 /* list of worker threads from struct btrfs_workers */
44 struct list_head worker_list
;
47 struct task_struct
*task
;
49 /* number of things on the pending list */
52 /* reference counter for this struct */
55 unsigned long sequence
;
57 /* protects the pending list. */
60 /* set to non-zero when this thread is already awake and kicking */
63 /* are we currently idle */
68 * btrfs_start_workers uses kthread_run, which can block waiting for memory
69 * for a very long time. It will actually throttle on page writeback,
70 * and so it may not make progress until after our btrfs worker threads
71 * process all of the pending work structs in their queue
73 * This means we can't use btrfs_start_workers from inside a btrfs worker
74 * thread that is used as part of cleaning dirty memory, which pretty much
75 * involves all of the worker threads.
77 * Instead we have a helper queue who never has more than one thread
78 * where we scheduler thread start operations. This worker_start struct
79 * is used to contain the work and hold a pointer to the queue that needs
83 struct btrfs_work work
;
84 struct btrfs_workers
*queue
;
87 static void start_new_worker_func(struct btrfs_work
*work
)
89 struct worker_start
*start
;
90 start
= container_of(work
, struct worker_start
, work
);
91 btrfs_start_workers(start
->queue
, 1);
95 static int start_new_worker(struct btrfs_workers
*queue
)
97 struct worker_start
*start
;
100 start
= kzalloc(sizeof(*start
), GFP_NOFS
);
104 start
->work
.func
= start_new_worker_func
;
105 start
->queue
= queue
;
106 ret
= btrfs_queue_worker(queue
->atomic_worker_start
, &start
->work
);
113 * helper function to move a thread onto the idle list after it
114 * has finished some requests.
116 static void check_idle_worker(struct btrfs_worker_thread
*worker
)
118 if (!worker
->idle
&& atomic_read(&worker
->num_pending
) <
119 worker
->workers
->idle_thresh
/ 2) {
121 spin_lock_irqsave(&worker
->workers
->lock
, flags
);
124 /* the list may be empty if the worker is just starting */
125 if (!list_empty(&worker
->worker_list
)) {
126 list_move(&worker
->worker_list
,
127 &worker
->workers
->idle_list
);
129 spin_unlock_irqrestore(&worker
->workers
->lock
, flags
);
134 * helper function to move a thread off the idle list after new
135 * pending work is added.
137 static void check_busy_worker(struct btrfs_worker_thread
*worker
)
139 if (worker
->idle
&& atomic_read(&worker
->num_pending
) >=
140 worker
->workers
->idle_thresh
) {
142 spin_lock_irqsave(&worker
->workers
->lock
, flags
);
145 if (!list_empty(&worker
->worker_list
)) {
146 list_move_tail(&worker
->worker_list
,
147 &worker
->workers
->worker_list
);
149 spin_unlock_irqrestore(&worker
->workers
->lock
, flags
);
153 static void check_pending_worker_creates(struct btrfs_worker_thread
*worker
)
155 struct btrfs_workers
*workers
= worker
->workers
;
159 if (!workers
->atomic_start_pending
)
162 spin_lock_irqsave(&workers
->lock
, flags
);
163 if (!workers
->atomic_start_pending
)
166 workers
->atomic_start_pending
= 0;
167 if (workers
->num_workers
+ workers
->num_workers_starting
>=
168 workers
->max_workers
)
171 workers
->num_workers_starting
+= 1;
172 spin_unlock_irqrestore(&workers
->lock
, flags
);
173 start_new_worker(workers
);
177 spin_unlock_irqrestore(&workers
->lock
, flags
);
180 static noinline
int run_ordered_completions(struct btrfs_workers
*workers
,
181 struct btrfs_work
*work
)
183 if (!workers
->ordered
)
186 set_bit(WORK_DONE_BIT
, &work
->flags
);
188 spin_lock(&workers
->order_lock
);
191 if (!list_empty(&workers
->prio_order_list
)) {
192 work
= list_entry(workers
->prio_order_list
.next
,
193 struct btrfs_work
, order_list
);
194 } else if (!list_empty(&workers
->order_list
)) {
195 work
= list_entry(workers
->order_list
.next
,
196 struct btrfs_work
, order_list
);
200 if (!test_bit(WORK_DONE_BIT
, &work
->flags
))
203 /* we are going to call the ordered done function, but
204 * we leave the work item on the list as a barrier so
205 * that later work items that are done don't have their
206 * functions called before this one returns
208 if (test_and_set_bit(WORK_ORDER_DONE_BIT
, &work
->flags
))
211 spin_unlock(&workers
->order_lock
);
213 work
->ordered_func(work
);
215 /* now take the lock again and call the freeing code */
216 spin_lock(&workers
->order_lock
);
217 list_del(&work
->order_list
);
218 work
->ordered_free(work
);
221 spin_unlock(&workers
->order_lock
);
225 static void put_worker(struct btrfs_worker_thread
*worker
)
227 if (atomic_dec_and_test(&worker
->refs
))
231 static int try_worker_shutdown(struct btrfs_worker_thread
*worker
)
235 spin_lock_irq(&worker
->lock
);
236 spin_lock(&worker
->workers
->lock
);
237 if (worker
->workers
->num_workers
> 1 &&
240 !list_empty(&worker
->worker_list
) &&
241 list_empty(&worker
->prio_pending
) &&
242 list_empty(&worker
->pending
) &&
243 atomic_read(&worker
->num_pending
) == 0) {
245 list_del_init(&worker
->worker_list
);
246 worker
->workers
->num_workers
--;
248 spin_unlock(&worker
->workers
->lock
);
249 spin_unlock_irq(&worker
->lock
);
256 static struct btrfs_work
*get_next_work(struct btrfs_worker_thread
*worker
,
257 struct list_head
*prio_head
,
258 struct list_head
*head
)
260 struct btrfs_work
*work
= NULL
;
261 struct list_head
*cur
= NULL
;
263 if(!list_empty(prio_head
))
264 cur
= prio_head
->next
;
267 if (!list_empty(&worker
->prio_pending
))
270 if (!list_empty(head
))
277 spin_lock_irq(&worker
->lock
);
278 list_splice_tail_init(&worker
->prio_pending
, prio_head
);
279 list_splice_tail_init(&worker
->pending
, head
);
281 if (!list_empty(prio_head
))
282 cur
= prio_head
->next
;
283 else if (!list_empty(head
))
285 spin_unlock_irq(&worker
->lock
);
291 work
= list_entry(cur
, struct btrfs_work
, list
);
298 * main loop for servicing work items
300 static int worker_loop(void *arg
)
302 struct btrfs_worker_thread
*worker
= arg
;
303 struct list_head head
;
304 struct list_head prio_head
;
305 struct btrfs_work
*work
;
307 INIT_LIST_HEAD(&head
);
308 INIT_LIST_HEAD(&prio_head
);
315 work
= get_next_work(worker
, &prio_head
, &head
);
319 list_del(&work
->list
);
320 clear_bit(WORK_QUEUED_BIT
, &work
->flags
);
322 work
->worker
= worker
;
326 atomic_dec(&worker
->num_pending
);
328 * unless this is an ordered work queue,
329 * 'work' was probably freed by func above.
331 run_ordered_completions(worker
->workers
, work
);
333 check_pending_worker_creates(worker
);
337 spin_lock_irq(&worker
->lock
);
338 check_idle_worker(worker
);
340 if (freezing(current
)) {
342 spin_unlock_irq(&worker
->lock
);
345 spin_unlock_irq(&worker
->lock
);
346 if (!kthread_should_stop()) {
349 * we've dropped the lock, did someone else
353 if (!list_empty(&worker
->pending
) ||
354 !list_empty(&worker
->prio_pending
))
358 * this short schedule allows more work to
359 * come in without the queue functions
360 * needing to go through wake_up_process()
362 * worker->working is still 1, so nobody
363 * is going to try and wake us up
367 if (!list_empty(&worker
->pending
) ||
368 !list_empty(&worker
->prio_pending
))
371 if (kthread_should_stop())
374 /* still no more work?, sleep for real */
375 spin_lock_irq(&worker
->lock
);
376 set_current_state(TASK_INTERRUPTIBLE
);
377 if (!list_empty(&worker
->pending
) ||
378 !list_empty(&worker
->prio_pending
)) {
379 spin_unlock_irq(&worker
->lock
);
384 * this makes sure we get a wakeup when someone
385 * adds something new to the queue
388 spin_unlock_irq(&worker
->lock
);
390 if (!kthread_should_stop()) {
391 schedule_timeout(HZ
* 120);
392 if (!worker
->working
&&
393 try_worker_shutdown(worker
)) {
398 __set_current_state(TASK_RUNNING
);
400 } while (!kthread_should_stop());
405 * this will wait for all the worker threads to shutdown
407 int btrfs_stop_workers(struct btrfs_workers
*workers
)
409 struct list_head
*cur
;
410 struct btrfs_worker_thread
*worker
;
413 spin_lock_irq(&workers
->lock
);
414 list_splice_init(&workers
->idle_list
, &workers
->worker_list
);
415 while (!list_empty(&workers
->worker_list
)) {
416 cur
= workers
->worker_list
.next
;
417 worker
= list_entry(cur
, struct btrfs_worker_thread
,
420 atomic_inc(&worker
->refs
);
421 workers
->num_workers
-= 1;
422 if (!list_empty(&worker
->worker_list
)) {
423 list_del_init(&worker
->worker_list
);
428 spin_unlock_irq(&workers
->lock
);
430 kthread_stop(worker
->task
);
431 spin_lock_irq(&workers
->lock
);
434 spin_unlock_irq(&workers
->lock
);
439 * simple init on struct btrfs_workers
441 void btrfs_init_workers(struct btrfs_workers
*workers
, char *name
, int max
,
442 struct btrfs_workers
*async_helper
)
444 workers
->num_workers
= 0;
445 workers
->num_workers_starting
= 0;
446 INIT_LIST_HEAD(&workers
->worker_list
);
447 INIT_LIST_HEAD(&workers
->idle_list
);
448 INIT_LIST_HEAD(&workers
->order_list
);
449 INIT_LIST_HEAD(&workers
->prio_order_list
);
450 spin_lock_init(&workers
->lock
);
451 spin_lock_init(&workers
->order_lock
);
452 workers
->max_workers
= max
;
453 workers
->idle_thresh
= 32;
454 workers
->name
= name
;
455 workers
->ordered
= 0;
456 workers
->atomic_start_pending
= 0;
457 workers
->atomic_worker_start
= async_helper
;
461 * starts new worker threads. This does not enforce the max worker
462 * count in case you need to temporarily go past it.
464 static int __btrfs_start_workers(struct btrfs_workers
*workers
,
467 struct btrfs_worker_thread
*worker
;
471 for (i
= 0; i
< num_workers
; i
++) {
472 worker
= kzalloc(sizeof(*worker
), GFP_NOFS
);
478 INIT_LIST_HEAD(&worker
->pending
);
479 INIT_LIST_HEAD(&worker
->prio_pending
);
480 INIT_LIST_HEAD(&worker
->worker_list
);
481 spin_lock_init(&worker
->lock
);
483 atomic_set(&worker
->num_pending
, 0);
484 atomic_set(&worker
->refs
, 1);
485 worker
->workers
= workers
;
486 worker
->task
= kthread_run(worker_loop
, worker
,
487 "btrfs-%s-%d", workers
->name
,
488 workers
->num_workers
+ i
);
489 if (IS_ERR(worker
->task
)) {
490 ret
= PTR_ERR(worker
->task
);
494 spin_lock_irq(&workers
->lock
);
495 list_add_tail(&worker
->worker_list
, &workers
->idle_list
);
497 workers
->num_workers
++;
498 workers
->num_workers_starting
--;
499 WARN_ON(workers
->num_workers_starting
< 0);
500 spin_unlock_irq(&workers
->lock
);
504 btrfs_stop_workers(workers
);
508 int btrfs_start_workers(struct btrfs_workers
*workers
, int num_workers
)
510 spin_lock_irq(&workers
->lock
);
511 workers
->num_workers_starting
+= num_workers
;
512 spin_unlock_irq(&workers
->lock
);
513 return __btrfs_start_workers(workers
, num_workers
);
517 * run through the list and find a worker thread that doesn't have a lot
518 * to do right now. This can return null if we aren't yet at the thread
519 * count limit and all of the threads are busy.
521 static struct btrfs_worker_thread
*next_worker(struct btrfs_workers
*workers
)
523 struct btrfs_worker_thread
*worker
;
524 struct list_head
*next
;
527 enforce_min
= (workers
->num_workers
+ workers
->num_workers_starting
) <
528 workers
->max_workers
;
531 * if we find an idle thread, don't move it to the end of the
532 * idle list. This improves the chance that the next submission
533 * will reuse the same thread, and maybe catch it while it is still
536 if (!list_empty(&workers
->idle_list
)) {
537 next
= workers
->idle_list
.next
;
538 worker
= list_entry(next
, struct btrfs_worker_thread
,
542 if (enforce_min
|| list_empty(&workers
->worker_list
))
546 * if we pick a busy task, move the task to the end of the list.
547 * hopefully this will keep things somewhat evenly balanced.
548 * Do the move in batches based on the sequence number. This groups
549 * requests submitted at roughly the same time onto the same worker.
551 next
= workers
->worker_list
.next
;
552 worker
= list_entry(next
, struct btrfs_worker_thread
, worker_list
);
555 if (worker
->sequence
% workers
->idle_thresh
== 0)
556 list_move_tail(next
, &workers
->worker_list
);
561 * selects a worker thread to take the next job. This will either find
562 * an idle worker, start a new worker up to the max count, or just return
563 * one of the existing busy workers.
565 static struct btrfs_worker_thread
*find_worker(struct btrfs_workers
*workers
)
567 struct btrfs_worker_thread
*worker
;
569 struct list_head
*fallback
;
572 spin_lock_irqsave(&workers
->lock
, flags
);
573 worker
= next_worker(workers
);
576 if (workers
->num_workers
+ workers
->num_workers_starting
>=
577 workers
->max_workers
) {
579 } else if (workers
->atomic_worker_start
) {
580 workers
->atomic_start_pending
= 1;
583 workers
->num_workers_starting
++;
584 spin_unlock_irqrestore(&workers
->lock
, flags
);
585 /* we're below the limit, start another worker */
586 __btrfs_start_workers(workers
, 1);
595 * we have failed to find any workers, just
596 * return the first one we can find.
598 if (!list_empty(&workers
->worker_list
))
599 fallback
= workers
->worker_list
.next
;
600 if (!list_empty(&workers
->idle_list
))
601 fallback
= workers
->idle_list
.next
;
603 worker
= list_entry(fallback
,
604 struct btrfs_worker_thread
, worker_list
);
607 * this makes sure the worker doesn't exit before it is placed
608 * onto a busy/idle list
610 atomic_inc(&worker
->num_pending
);
611 spin_unlock_irqrestore(&workers
->lock
, flags
);
616 * btrfs_requeue_work just puts the work item back on the tail of the list
617 * it was taken from. It is intended for use with long running work functions
618 * that make some progress and want to give the cpu up for others.
620 int btrfs_requeue_work(struct btrfs_work
*work
)
622 struct btrfs_worker_thread
*worker
= work
->worker
;
626 if (test_and_set_bit(WORK_QUEUED_BIT
, &work
->flags
))
629 spin_lock_irqsave(&worker
->lock
, flags
);
630 if (test_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
))
631 list_add_tail(&work
->list
, &worker
->prio_pending
);
633 list_add_tail(&work
->list
, &worker
->pending
);
634 atomic_inc(&worker
->num_pending
);
636 /* by definition we're busy, take ourselves off the idle
640 spin_lock(&worker
->workers
->lock
);
642 list_move_tail(&worker
->worker_list
,
643 &worker
->workers
->worker_list
);
644 spin_unlock(&worker
->workers
->lock
);
646 if (!worker
->working
) {
652 wake_up_process(worker
->task
);
653 spin_unlock_irqrestore(&worker
->lock
, flags
);
659 void btrfs_set_work_high_prio(struct btrfs_work
*work
)
661 set_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
);
665 * places a struct btrfs_work into the pending queue of one of the kthreads
667 int btrfs_queue_worker(struct btrfs_workers
*workers
, struct btrfs_work
*work
)
669 struct btrfs_worker_thread
*worker
;
673 /* don't requeue something already on a list */
674 if (test_and_set_bit(WORK_QUEUED_BIT
, &work
->flags
))
677 worker
= find_worker(workers
);
678 if (workers
->ordered
) {
680 * you're not allowed to do ordered queues from an
683 spin_lock(&workers
->order_lock
);
684 if (test_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
)) {
685 list_add_tail(&work
->order_list
,
686 &workers
->prio_order_list
);
688 list_add_tail(&work
->order_list
, &workers
->order_list
);
690 spin_unlock(&workers
->order_lock
);
692 INIT_LIST_HEAD(&work
->order_list
);
695 spin_lock_irqsave(&worker
->lock
, flags
);
697 if (test_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
))
698 list_add_tail(&work
->list
, &worker
->prio_pending
);
700 list_add_tail(&work
->list
, &worker
->pending
);
701 check_busy_worker(worker
);
704 * avoid calling into wake_up_process if this thread has already
707 if (!worker
->working
)
712 wake_up_process(worker
->task
);
713 spin_unlock_irqrestore(&worker
->lock
, flags
);