2 * Copyright (C) 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2006 Red Hat GmbH
5 * This file is released under the GPL.
7 * Kcopyd provides a simple interface for copying an area of one
8 * block-device to one or more other block-devices, with an asynchronous
9 * completion notification.
12 #include <linux/types.h>
13 #include <linux/atomic.h>
14 #include <linux/blkdev.h>
16 #include <linux/init.h>
17 #include <linux/list.h>
18 #include <linux/mempool.h>
19 #include <linux/module.h>
20 #include <linux/pagemap.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/workqueue.h>
24 #include <linux/mutex.h>
25 #include <linux/device-mapper.h>
26 #include <linux/dm-kcopyd.h>
30 #define SUB_JOB_SIZE 128
33 #define RESERVE_PAGES (DIV_ROUND_UP(SUB_JOB_SIZE << SECTOR_SHIFT, PAGE_SIZE))
35 /*-----------------------------------------------------------------
36 * Each kcopyd client has its own little pool of preallocated
37 * pages for kcopyd io.
38 *---------------------------------------------------------------*/
39 struct dm_kcopyd_client
{
40 struct page_list
*pages
;
41 unsigned nr_reserved_pages
;
42 unsigned nr_free_pages
;
44 struct dm_io_client
*io_client
;
46 wait_queue_head_t destroyq
;
51 struct workqueue_struct
*kcopyd_wq
;
52 struct work_struct kcopyd_work
;
55 * We maintain three lists of jobs:
57 * i) jobs waiting for pages
58 * ii) jobs that have pages, and are waiting for the io to be issued.
59 * iii) jobs that have completed.
61 * All three of these are protected by job_lock.
64 struct list_head complete_jobs
;
65 struct list_head io_jobs
;
66 struct list_head pages_jobs
;
69 static struct page_list zero_page_list
;
71 static void wake(struct dm_kcopyd_client
*kc
)
73 queue_work(kc
->kcopyd_wq
, &kc
->kcopyd_work
);
77 * Obtain one page for the use of kcopyd.
79 static struct page_list
*alloc_pl(gfp_t gfp
)
83 pl
= kmalloc(sizeof(*pl
), gfp
);
87 pl
->page
= alloc_page(gfp
);
96 static void free_pl(struct page_list
*pl
)
98 __free_page(pl
->page
);
103 * Add the provided pages to a client's free page list, releasing
104 * back to the system any beyond the reserved_pages limit.
106 static void kcopyd_put_pages(struct dm_kcopyd_client
*kc
, struct page_list
*pl
)
108 struct page_list
*next
;
113 if (kc
->nr_free_pages
>= kc
->nr_reserved_pages
)
116 pl
->next
= kc
->pages
;
125 static int kcopyd_get_pages(struct dm_kcopyd_client
*kc
,
126 unsigned int nr
, struct page_list
**pages
)
128 struct page_list
*pl
;
133 pl
= alloc_pl(__GFP_NOWARN
| __GFP_NORETRY
);
135 /* Use reserved pages */
139 kc
->pages
= pl
->next
;
150 kcopyd_put_pages(kc
, *pages
);
155 * These three functions resize the page pool.
157 static void drop_pages(struct page_list
*pl
)
159 struct page_list
*next
;
169 * Allocate and reserve nr_pages for the use of a specific client.
171 static int client_reserve_pages(struct dm_kcopyd_client
*kc
, unsigned nr_pages
)
174 struct page_list
*pl
= NULL
, *next
;
176 for (i
= 0; i
< nr_pages
; i
++) {
177 next
= alloc_pl(GFP_KERNEL
);
187 kc
->nr_reserved_pages
+= nr_pages
;
188 kcopyd_put_pages(kc
, pl
);
193 static void client_free_pages(struct dm_kcopyd_client
*kc
)
195 BUG_ON(kc
->nr_free_pages
!= kc
->nr_reserved_pages
);
196 drop_pages(kc
->pages
);
198 kc
->nr_free_pages
= kc
->nr_reserved_pages
= 0;
201 /*-----------------------------------------------------------------
202 * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
203 * for this reason we use a mempool to prevent the client from
204 * ever having to do io (which could cause a deadlock).
205 *---------------------------------------------------------------*/
207 struct dm_kcopyd_client
*kc
;
208 struct list_head list
;
212 * Error state of the job.
215 unsigned long write_err
;
218 * Either READ or WRITE
221 struct dm_io_region source
;
224 * The destinations for the transfer.
226 unsigned int num_dests
;
227 struct dm_io_region dests
[DM_KCOPYD_MAX_REGIONS
];
229 struct page_list
*pages
;
232 * Set this to ensure you are notified when the job has
233 * completed. 'context' is for callback to use.
235 dm_kcopyd_notify_fn fn
;
239 * These fields are only used if the job has been split
240 * into more manageable parts.
246 struct kcopyd_job
*master_job
;
249 static struct kmem_cache
*_job_cache
;
251 int __init
dm_kcopyd_init(void)
253 _job_cache
= kmem_cache_create("kcopyd_job",
254 sizeof(struct kcopyd_job
) * (SPLIT_COUNT
+ 1),
255 __alignof__(struct kcopyd_job
), 0, NULL
);
259 zero_page_list
.next
= &zero_page_list
;
260 zero_page_list
.page
= ZERO_PAGE(0);
265 void dm_kcopyd_exit(void)
267 kmem_cache_destroy(_job_cache
);
272 * Functions to push and pop a job onto the head of a given job
275 static struct kcopyd_job
*pop(struct list_head
*jobs
,
276 struct dm_kcopyd_client
*kc
)
278 struct kcopyd_job
*job
= NULL
;
281 spin_lock_irqsave(&kc
->job_lock
, flags
);
283 if (!list_empty(jobs
)) {
284 job
= list_entry(jobs
->next
, struct kcopyd_job
, list
);
285 list_del(&job
->list
);
287 spin_unlock_irqrestore(&kc
->job_lock
, flags
);
292 static void push(struct list_head
*jobs
, struct kcopyd_job
*job
)
295 struct dm_kcopyd_client
*kc
= job
->kc
;
297 spin_lock_irqsave(&kc
->job_lock
, flags
);
298 list_add_tail(&job
->list
, jobs
);
299 spin_unlock_irqrestore(&kc
->job_lock
, flags
);
303 static void push_head(struct list_head
*jobs
, struct kcopyd_job
*job
)
306 struct dm_kcopyd_client
*kc
= job
->kc
;
308 spin_lock_irqsave(&kc
->job_lock
, flags
);
309 list_add(&job
->list
, jobs
);
310 spin_unlock_irqrestore(&kc
->job_lock
, flags
);
314 * These three functions process 1 item from the corresponding
320 * > 0: can't process yet.
322 static int run_complete_job(struct kcopyd_job
*job
)
324 void *context
= job
->context
;
325 int read_err
= job
->read_err
;
326 unsigned long write_err
= job
->write_err
;
327 dm_kcopyd_notify_fn fn
= job
->fn
;
328 struct dm_kcopyd_client
*kc
= job
->kc
;
330 if (job
->pages
&& job
->pages
!= &zero_page_list
)
331 kcopyd_put_pages(kc
, job
->pages
);
333 * If this is the master job, the sub jobs have already
334 * completed so we can free everything.
336 if (job
->master_job
== job
)
337 mempool_free(job
, kc
->job_pool
);
338 fn(read_err
, write_err
, context
);
340 if (atomic_dec_and_test(&kc
->nr_jobs
))
341 wake_up(&kc
->destroyq
);
346 static void complete_io(unsigned long error
, void *context
)
348 struct kcopyd_job
*job
= (struct kcopyd_job
*) context
;
349 struct dm_kcopyd_client
*kc
= job
->kc
;
353 job
->write_err
|= error
;
357 if (!test_bit(DM_KCOPYD_IGNORE_ERROR
, &job
->flags
)) {
358 push(&kc
->complete_jobs
, job
);
365 push(&kc
->complete_jobs
, job
);
369 push(&kc
->io_jobs
, job
);
376 * Request io on as many buffer heads as we can currently get for
379 static int run_io_job(struct kcopyd_job
*job
)
382 struct dm_io_request io_req
= {
384 .mem
.type
= DM_IO_PAGE_LIST
,
385 .mem
.ptr
.pl
= job
->pages
,
387 .notify
.fn
= complete_io
,
388 .notify
.context
= job
,
389 .client
= job
->kc
->io_client
,
393 r
= dm_io(&io_req
, 1, &job
->source
, NULL
);
395 r
= dm_io(&io_req
, job
->num_dests
, job
->dests
, NULL
);
400 static int run_pages_job(struct kcopyd_job
*job
)
403 unsigned nr_pages
= dm_div_up(job
->dests
[0].count
, PAGE_SIZE
>> 9);
405 r
= kcopyd_get_pages(job
->kc
, nr_pages
, &job
->pages
);
407 /* this job is ready for io */
408 push(&job
->kc
->io_jobs
, job
);
413 /* can't complete now */
420 * Run through a list for as long as possible. Returns the count
421 * of successful jobs.
423 static int process_jobs(struct list_head
*jobs
, struct dm_kcopyd_client
*kc
,
424 int (*fn
) (struct kcopyd_job
*))
426 struct kcopyd_job
*job
;
429 while ((job
= pop(jobs
, kc
))) {
434 /* error this rogue job */
436 job
->write_err
= (unsigned long) -1L;
439 push(&kc
->complete_jobs
, job
);
445 * We couldn't service this job ATM, so
446 * push this job back onto the list.
448 push_head(jobs
, job
);
459 * kcopyd does this every time it's woken up.
461 static void do_work(struct work_struct
*work
)
463 struct dm_kcopyd_client
*kc
= container_of(work
,
464 struct dm_kcopyd_client
, kcopyd_work
);
465 struct blk_plug plug
;
468 * The order that these are called is *very* important.
469 * complete jobs can free some pages for pages jobs.
470 * Pages jobs when successful will jump onto the io jobs
471 * list. io jobs call wake when they complete and it all
474 blk_start_plug(&plug
);
475 process_jobs(&kc
->complete_jobs
, kc
, run_complete_job
);
476 process_jobs(&kc
->pages_jobs
, kc
, run_pages_job
);
477 process_jobs(&kc
->io_jobs
, kc
, run_io_job
);
478 blk_finish_plug(&plug
);
482 * If we are copying a small region we just dispatch a single job
483 * to do the copy, otherwise the io has to be split up into many
486 static void dispatch_job(struct kcopyd_job
*job
)
488 struct dm_kcopyd_client
*kc
= job
->kc
;
489 atomic_inc(&kc
->nr_jobs
);
490 if (unlikely(!job
->source
.count
))
491 push(&kc
->complete_jobs
, job
);
492 else if (job
->pages
== &zero_page_list
)
493 push(&kc
->io_jobs
, job
);
495 push(&kc
->pages_jobs
, job
);
499 static void segment_complete(int read_err
, unsigned long write_err
,
502 /* FIXME: tidy this function */
503 sector_t progress
= 0;
505 struct kcopyd_job
*sub_job
= (struct kcopyd_job
*) context
;
506 struct kcopyd_job
*job
= sub_job
->master_job
;
507 struct dm_kcopyd_client
*kc
= job
->kc
;
509 mutex_lock(&job
->lock
);
511 /* update the error */
516 job
->write_err
|= write_err
;
519 * Only dispatch more work if there hasn't been an error.
521 if ((!job
->read_err
&& !job
->write_err
) ||
522 test_bit(DM_KCOPYD_IGNORE_ERROR
, &job
->flags
)) {
523 /* get the next chunk of work */
524 progress
= job
->progress
;
525 count
= job
->source
.count
- progress
;
527 if (count
> SUB_JOB_SIZE
)
528 count
= SUB_JOB_SIZE
;
530 job
->progress
+= count
;
533 mutex_unlock(&job
->lock
);
539 sub_job
->source
.sector
+= progress
;
540 sub_job
->source
.count
= count
;
542 for (i
= 0; i
< job
->num_dests
; i
++) {
543 sub_job
->dests
[i
].sector
+= progress
;
544 sub_job
->dests
[i
].count
= count
;
547 sub_job
->fn
= segment_complete
;
548 sub_job
->context
= sub_job
;
549 dispatch_job(sub_job
);
551 } else if (atomic_dec_and_test(&job
->sub_jobs
)) {
554 * Queue the completion callback to the kcopyd thread.
556 * Some callers assume that all the completions are called
557 * from a single thread and don't race with each other.
559 * We must not call the callback directly here because this
560 * code may not be executing in the thread.
562 push(&kc
->complete_jobs
, job
);
568 * Create some sub jobs to share the work between them.
570 static void split_job(struct kcopyd_job
*master_job
)
574 atomic_inc(&master_job
->kc
->nr_jobs
);
576 atomic_set(&master_job
->sub_jobs
, SPLIT_COUNT
);
577 for (i
= 0; i
< SPLIT_COUNT
; i
++) {
578 master_job
[i
+ 1].master_job
= master_job
;
579 segment_complete(0, 0u, &master_job
[i
+ 1]);
583 int dm_kcopyd_copy(struct dm_kcopyd_client
*kc
, struct dm_io_region
*from
,
584 unsigned int num_dests
, struct dm_io_region
*dests
,
585 unsigned int flags
, dm_kcopyd_notify_fn fn
, void *context
)
587 struct kcopyd_job
*job
;
591 * Allocate an array of jobs consisting of one master job
592 * followed by SPLIT_COUNT sub jobs.
594 job
= mempool_alloc(kc
->job_pool
, GFP_NOIO
);
597 * set up for the read.
604 job
->num_dests
= num_dests
;
605 memcpy(&job
->dests
, dests
, sizeof(*dests
) * num_dests
);
612 memset(&job
->source
, 0, sizeof job
->source
);
613 job
->source
.count
= job
->dests
[0].count
;
614 job
->pages
= &zero_page_list
;
617 * Use WRITE SAME to optimize zeroing if all dests support it.
619 job
->rw
= WRITE
| REQ_WRITE_SAME
;
620 for (i
= 0; i
< job
->num_dests
; i
++)
621 if (!bdev_write_same(job
->dests
[i
].bdev
)) {
628 job
->context
= context
;
629 job
->master_job
= job
;
631 if (job
->source
.count
<= SUB_JOB_SIZE
)
634 mutex_init(&job
->lock
);
641 EXPORT_SYMBOL(dm_kcopyd_copy
);
643 int dm_kcopyd_zero(struct dm_kcopyd_client
*kc
,
644 unsigned num_dests
, struct dm_io_region
*dests
,
645 unsigned flags
, dm_kcopyd_notify_fn fn
, void *context
)
647 return dm_kcopyd_copy(kc
, NULL
, num_dests
, dests
, flags
, fn
, context
);
649 EXPORT_SYMBOL(dm_kcopyd_zero
);
651 void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client
*kc
,
652 dm_kcopyd_notify_fn fn
, void *context
)
654 struct kcopyd_job
*job
;
656 job
= mempool_alloc(kc
->job_pool
, GFP_NOIO
);
658 memset(job
, 0, sizeof(struct kcopyd_job
));
661 job
->context
= context
;
662 job
->master_job
= job
;
664 atomic_inc(&kc
->nr_jobs
);
668 EXPORT_SYMBOL(dm_kcopyd_prepare_callback
);
670 void dm_kcopyd_do_callback(void *j
, int read_err
, unsigned long write_err
)
672 struct kcopyd_job
*job
= j
;
673 struct dm_kcopyd_client
*kc
= job
->kc
;
675 job
->read_err
= read_err
;
676 job
->write_err
= write_err
;
678 push(&kc
->complete_jobs
, job
);
681 EXPORT_SYMBOL(dm_kcopyd_do_callback
);
684 * Cancels a kcopyd job, eg. someone might be deactivating a
688 int kcopyd_cancel(struct kcopyd_job
*job
, int block
)
695 /*-----------------------------------------------------------------
697 *---------------------------------------------------------------*/
698 struct dm_kcopyd_client
*dm_kcopyd_client_create(void)
701 struct dm_kcopyd_client
*kc
;
703 kc
= kmalloc(sizeof(*kc
), GFP_KERNEL
);
705 return ERR_PTR(-ENOMEM
);
707 spin_lock_init(&kc
->job_lock
);
708 INIT_LIST_HEAD(&kc
->complete_jobs
);
709 INIT_LIST_HEAD(&kc
->io_jobs
);
710 INIT_LIST_HEAD(&kc
->pages_jobs
);
712 kc
->job_pool
= mempool_create_slab_pool(MIN_JOBS
, _job_cache
);
716 INIT_WORK(&kc
->kcopyd_work
, do_work
);
717 kc
->kcopyd_wq
= alloc_workqueue("kcopyd",
718 WQ_NON_REENTRANT
| WQ_MEM_RECLAIM
, 0);
723 kc
->nr_reserved_pages
= kc
->nr_free_pages
= 0;
724 r
= client_reserve_pages(kc
, RESERVE_PAGES
);
726 goto bad_client_pages
;
728 kc
->io_client
= dm_io_client_create();
729 if (IS_ERR(kc
->io_client
)) {
730 r
= PTR_ERR(kc
->io_client
);
734 init_waitqueue_head(&kc
->destroyq
);
735 atomic_set(&kc
->nr_jobs
, 0);
740 client_free_pages(kc
);
742 destroy_workqueue(kc
->kcopyd_wq
);
744 mempool_destroy(kc
->job_pool
);
750 EXPORT_SYMBOL(dm_kcopyd_client_create
);
752 void dm_kcopyd_client_destroy(struct dm_kcopyd_client
*kc
)
754 /* Wait for completion of all jobs submitted by this client. */
755 wait_event(kc
->destroyq
, !atomic_read(&kc
->nr_jobs
));
757 BUG_ON(!list_empty(&kc
->complete_jobs
));
758 BUG_ON(!list_empty(&kc
->io_jobs
));
759 BUG_ON(!list_empty(&kc
->pages_jobs
));
760 destroy_workqueue(kc
->kcopyd_wq
);
761 dm_io_client_destroy(kc
->io_client
);
762 client_free_pages(kc
);
763 mempool_destroy(kc
->job_pool
);
766 EXPORT_SYMBOL(dm_kcopyd_client_destroy
);