2 * Copyright (C) 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2006 Red Hat GmbH
5 * This file is released under the GPL.
7 * Kcopyd provides a simple interface for copying an area of one
8 * block-device to one or more other block-devices, with an asynchronous
9 * completion notification.
12 #include <asm/types.h>
13 #include <asm/atomic.h>
15 #include <linux/blkdev.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
19 #include <linux/mempool.h>
20 #include <linux/module.h>
21 #include <linux/pagemap.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/workqueue.h>
25 #include <linux/mutex.h>
29 static struct workqueue_struct
*_kcopyd_wq
;
30 static struct work_struct _kcopyd_work
;
32 static void wake(void)
34 queue_work(_kcopyd_wq
, &_kcopyd_work
);
37 /*-----------------------------------------------------------------
38 * Each kcopyd client has its own little pool of preallocated
39 * pages for kcopyd io.
40 *---------------------------------------------------------------*/
41 struct kcopyd_client
{
42 struct list_head list
;
45 struct page_list
*pages
;
46 unsigned int nr_pages
;
47 unsigned int nr_free_pages
;
49 struct dm_io_client
*io_client
;
51 wait_queue_head_t destroyq
;
55 static struct page_list
*alloc_pl(void)
59 pl
= kmalloc(sizeof(*pl
), GFP_KERNEL
);
63 pl
->page
= alloc_page(GFP_KERNEL
);
72 static void free_pl(struct page_list
*pl
)
74 __free_page(pl
->page
);
78 static int kcopyd_get_pages(struct kcopyd_client
*kc
,
79 unsigned int nr
, struct page_list
**pages
)
84 if (kc
->nr_free_pages
< nr
) {
85 spin_unlock(&kc
->lock
);
89 kc
->nr_free_pages
-= nr
;
90 for (*pages
= pl
= kc
->pages
; --nr
; pl
= pl
->next
)
96 spin_unlock(&kc
->lock
);
101 static void kcopyd_put_pages(struct kcopyd_client
*kc
, struct page_list
*pl
)
103 struct page_list
*cursor
;
105 spin_lock(&kc
->lock
);
106 for (cursor
= pl
; cursor
->next
; cursor
= cursor
->next
)
110 cursor
->next
= kc
->pages
;
112 spin_unlock(&kc
->lock
);
116 * These three functions resize the page pool.
118 static void drop_pages(struct page_list
*pl
)
120 struct page_list
*next
;
129 static int client_alloc_pages(struct kcopyd_client
*kc
, unsigned int nr
)
132 struct page_list
*pl
= NULL
, *next
;
134 for (i
= 0; i
< nr
; i
++) {
145 kcopyd_put_pages(kc
, pl
);
150 static void client_free_pages(struct kcopyd_client
*kc
)
152 BUG_ON(kc
->nr_free_pages
!= kc
->nr_pages
);
153 drop_pages(kc
->pages
);
155 kc
->nr_free_pages
= kc
->nr_pages
= 0;
158 /*-----------------------------------------------------------------
159 * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
160 * for this reason we use a mempool to prevent the client from
161 * ever having to do io (which could cause a deadlock).
162 *---------------------------------------------------------------*/
164 struct kcopyd_client
*kc
;
165 struct list_head list
;
169 * Error state of the job.
172 unsigned int write_err
;
175 * Either READ or WRITE
178 struct io_region source
;
181 * The destinations for the transfer.
183 unsigned int num_dests
;
184 struct io_region dests
[KCOPYD_MAX_REGIONS
];
187 unsigned int nr_pages
;
188 struct page_list
*pages
;
191 * Set this to ensure you are notified when the job has
192 * completed. 'context' is for callback to use.
198 * These fields are only used if the job has been split
199 * into more manageable parts.
206 /* FIXME: this should scale with the number of pages */
209 static struct kmem_cache
*_job_cache
;
210 static mempool_t
*_job_pool
;
213 * We maintain three lists of jobs:
215 * i) jobs waiting for pages
216 * ii) jobs that have pages, and are waiting for the io to be issued.
217 * iii) jobs that have completed.
219 * All three of these are protected by job_lock.
221 static DEFINE_SPINLOCK(_job_lock
);
223 static LIST_HEAD(_complete_jobs
);
224 static LIST_HEAD(_io_jobs
);
225 static LIST_HEAD(_pages_jobs
);
227 static int jobs_init(void)
229 _job_cache
= KMEM_CACHE(kcopyd_job
, 0);
233 _job_pool
= mempool_create_slab_pool(MIN_JOBS
, _job_cache
);
235 kmem_cache_destroy(_job_cache
);
242 static void jobs_exit(void)
244 BUG_ON(!list_empty(&_complete_jobs
));
245 BUG_ON(!list_empty(&_io_jobs
));
246 BUG_ON(!list_empty(&_pages_jobs
));
248 mempool_destroy(_job_pool
);
249 kmem_cache_destroy(_job_cache
);
255 * Functions to push and pop a job onto the head of a given job
258 static struct kcopyd_job
*pop(struct list_head
*jobs
)
260 struct kcopyd_job
*job
= NULL
;
263 spin_lock_irqsave(&_job_lock
, flags
);
265 if (!list_empty(jobs
)) {
266 job
= list_entry(jobs
->next
, struct kcopyd_job
, list
);
267 list_del(&job
->list
);
269 spin_unlock_irqrestore(&_job_lock
, flags
);
274 static void push(struct list_head
*jobs
, struct kcopyd_job
*job
)
278 spin_lock_irqsave(&_job_lock
, flags
);
279 list_add_tail(&job
->list
, jobs
);
280 spin_unlock_irqrestore(&_job_lock
, flags
);
284 * These three functions process 1 item from the corresponding
290 * > 0: can't process yet.
292 static int run_complete_job(struct kcopyd_job
*job
)
294 void *context
= job
->context
;
295 int read_err
= job
->read_err
;
296 unsigned int write_err
= job
->write_err
;
297 kcopyd_notify_fn fn
= job
->fn
;
298 struct kcopyd_client
*kc
= job
->kc
;
300 kcopyd_put_pages(kc
, job
->pages
);
301 mempool_free(job
, _job_pool
);
302 fn(read_err
, write_err
, context
);
304 if (atomic_dec_and_test(&kc
->nr_jobs
))
305 wake_up(&kc
->destroyq
);
310 static void complete_io(unsigned long error
, void *context
)
312 struct kcopyd_job
*job
= (struct kcopyd_job
*) context
;
315 if (job
->rw
== WRITE
)
316 job
->write_err
|= error
;
320 if (!test_bit(KCOPYD_IGNORE_ERROR
, &job
->flags
)) {
321 push(&_complete_jobs
, job
);
327 if (job
->rw
== WRITE
)
328 push(&_complete_jobs
, job
);
332 push(&_io_jobs
, job
);
339 * Request io on as many buffer heads as we can currently get for
342 static int run_io_job(struct kcopyd_job
*job
)
345 struct dm_io_request io_req
= {
347 .mem
.type
= DM_IO_PAGE_LIST
,
348 .mem
.ptr
.pl
= job
->pages
,
349 .mem
.offset
= job
->offset
,
350 .notify
.fn
= complete_io
,
351 .notify
.context
= job
,
352 .client
= job
->kc
->io_client
,
356 r
= dm_io(&io_req
, 1, &job
->source
, NULL
);
358 r
= dm_io(&io_req
, job
->num_dests
, job
->dests
, NULL
);
363 static int run_pages_job(struct kcopyd_job
*job
)
367 job
->nr_pages
= dm_div_up(job
->dests
[0].count
+ job
->offset
,
369 r
= kcopyd_get_pages(job
->kc
, job
->nr_pages
, &job
->pages
);
371 /* this job is ready for io */
372 push(&_io_jobs
, job
);
377 /* can't complete now */
384 * Run through a list for as long as possible. Returns the count
385 * of successful jobs.
387 static int process_jobs(struct list_head
*jobs
, int (*fn
) (struct kcopyd_job
*))
389 struct kcopyd_job
*job
;
392 while ((job
= pop(jobs
))) {
397 /* error this rogue job */
398 if (job
->rw
== WRITE
)
399 job
->write_err
= (unsigned int) -1;
402 push(&_complete_jobs
, job
);
408 * We couldn't service this job ATM, so
409 * push this job back onto the list.
422 * kcopyd does this every time it's woken up.
424 static void do_work(struct work_struct
*ignored
)
427 * The order that these are called is *very* important.
428 * complete jobs can free some pages for pages jobs.
429 * Pages jobs when successful will jump onto the io jobs
430 * list. io jobs call wake when they complete and it all
433 process_jobs(&_complete_jobs
, run_complete_job
);
434 process_jobs(&_pages_jobs
, run_pages_job
);
435 process_jobs(&_io_jobs
, run_io_job
);
439 * If we are copying a small region we just dispatch a single job
440 * to do the copy, otherwise the io has to be split up into many
443 static void dispatch_job(struct kcopyd_job
*job
)
445 atomic_inc(&job
->kc
->nr_jobs
);
446 push(&_pages_jobs
, job
);
450 #define SUB_JOB_SIZE 128
451 static void segment_complete(int read_err
,
452 unsigned int write_err
, void *context
)
454 /* FIXME: tidy this function */
455 sector_t progress
= 0;
457 struct kcopyd_job
*job
= (struct kcopyd_job
*) context
;
459 mutex_lock(&job
->lock
);
461 /* update the error */
466 job
->write_err
|= write_err
;
469 * Only dispatch more work if there hasn't been an error.
471 if ((!job
->read_err
&& !job
->write_err
) ||
472 test_bit(KCOPYD_IGNORE_ERROR
, &job
->flags
)) {
473 /* get the next chunk of work */
474 progress
= job
->progress
;
475 count
= job
->source
.count
- progress
;
477 if (count
> SUB_JOB_SIZE
)
478 count
= SUB_JOB_SIZE
;
480 job
->progress
+= count
;
483 mutex_unlock(&job
->lock
);
487 struct kcopyd_job
*sub_job
= mempool_alloc(_job_pool
, GFP_NOIO
);
490 sub_job
->source
.sector
+= progress
;
491 sub_job
->source
.count
= count
;
493 for (i
= 0; i
< job
->num_dests
; i
++) {
494 sub_job
->dests
[i
].sector
+= progress
;
495 sub_job
->dests
[i
].count
= count
;
498 sub_job
->fn
= segment_complete
;
499 sub_job
->context
= job
;
500 dispatch_job(sub_job
);
502 } else if (atomic_dec_and_test(&job
->sub_jobs
)) {
505 * To avoid a race we must keep the job around
506 * until after the notify function has completed.
507 * Otherwise the client may try and stop the job
508 * after we've completed.
510 job
->fn(read_err
, write_err
, job
->context
);
511 mempool_free(job
, _job_pool
);
516 * Create some little jobs that will do the move between
519 #define SPLIT_COUNT 8
520 static void split_job(struct kcopyd_job
*job
)
524 atomic_set(&job
->sub_jobs
, SPLIT_COUNT
);
525 for (i
= 0; i
< SPLIT_COUNT
; i
++)
526 segment_complete(0, 0u, job
);
529 int kcopyd_copy(struct kcopyd_client
*kc
, struct io_region
*from
,
530 unsigned int num_dests
, struct io_region
*dests
,
531 unsigned int flags
, kcopyd_notify_fn fn
, void *context
)
533 struct kcopyd_job
*job
;
536 * Allocate a new job.
538 job
= mempool_alloc(_job_pool
, GFP_NOIO
);
541 * set up for the read.
551 job
->num_dests
= num_dests
;
552 memcpy(&job
->dests
, dests
, sizeof(*dests
) * num_dests
);
559 job
->context
= context
;
561 if (job
->source
.count
< SUB_JOB_SIZE
)
565 mutex_init(&job
->lock
);
574 * Cancels a kcopyd job, eg. someone might be deactivating a
578 int kcopyd_cancel(struct kcopyd_job
*job
, int block
)
585 /*-----------------------------------------------------------------
587 *---------------------------------------------------------------*/
588 static DEFINE_MUTEX(_client_lock
);
589 static LIST_HEAD(_clients
);
591 static void client_add(struct kcopyd_client
*kc
)
593 mutex_lock(&_client_lock
);
594 list_add(&kc
->list
, &_clients
);
595 mutex_unlock(&_client_lock
);
598 static void client_del(struct kcopyd_client
*kc
)
600 mutex_lock(&_client_lock
);
602 mutex_unlock(&_client_lock
);
605 static DEFINE_MUTEX(kcopyd_init_lock
);
606 static int kcopyd_clients
= 0;
608 static int kcopyd_init(void)
612 mutex_lock(&kcopyd_init_lock
);
614 if (kcopyd_clients
) {
615 /* Already initialized. */
617 mutex_unlock(&kcopyd_init_lock
);
623 mutex_unlock(&kcopyd_init_lock
);
627 _kcopyd_wq
= create_singlethread_workqueue("kcopyd");
630 mutex_unlock(&kcopyd_init_lock
);
635 INIT_WORK(&_kcopyd_work
, do_work
);
636 mutex_unlock(&kcopyd_init_lock
);
640 static void kcopyd_exit(void)
642 mutex_lock(&kcopyd_init_lock
);
644 if (!kcopyd_clients
) {
646 destroy_workqueue(_kcopyd_wq
);
649 mutex_unlock(&kcopyd_init_lock
);
652 int kcopyd_client_create(unsigned int nr_pages
, struct kcopyd_client
**result
)
655 struct kcopyd_client
*kc
;
661 kc
= kmalloc(sizeof(*kc
), GFP_KERNEL
);
667 spin_lock_init(&kc
->lock
);
669 kc
->nr_pages
= kc
->nr_free_pages
= 0;
670 r
= client_alloc_pages(kc
, nr_pages
);
677 kc
->io_client
= dm_io_client_create(nr_pages
);
678 if (IS_ERR(kc
->io_client
)) {
679 r
= PTR_ERR(kc
->io_client
);
680 client_free_pages(kc
);
686 init_waitqueue_head(&kc
->destroyq
);
687 atomic_set(&kc
->nr_jobs
, 0);
694 void kcopyd_client_destroy(struct kcopyd_client
*kc
)
696 /* Wait for completion of all jobs submitted by this client. */
697 wait_event(kc
->destroyq
, !atomic_read(&kc
->nr_jobs
));
699 dm_io_client_destroy(kc
->io_client
);
700 client_free_pages(kc
);
706 EXPORT_SYMBOL(kcopyd_client_create
);
707 EXPORT_SYMBOL(kcopyd_client_destroy
);
708 EXPORT_SYMBOL(kcopyd_copy
);