RT-AC66 3.0.0.4.374.130 core
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / drivers / md / kcopyd.c
blobdbc234e3c69f917fb58e7a2f7d26b89dea324b9f
1 /*
2 * Copyright (C) 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2006 Red Hat GmbH
5 * This file is released under the GPL.
7 * Kcopyd provides a simple interface for copying an area of one
8 * block-device to one or more other block-devices, with an asynchronous
9 * completion notification.
12 #include <asm/types.h>
13 #include <asm/atomic.h>
15 #include <linux/blkdev.h>
16 #include <linux/fs.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
19 #include <linux/mempool.h>
20 #include <linux/module.h>
21 #include <linux/pagemap.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/workqueue.h>
25 #include <linux/mutex.h>
27 #include "kcopyd.h"
29 static struct workqueue_struct *_kcopyd_wq;
30 static struct work_struct _kcopyd_work;
32 static inline void wake(void)
34 queue_work(_kcopyd_wq, &_kcopyd_work);
37 /*-----------------------------------------------------------------
38 * Each kcopyd client has its own little pool of preallocated
39 * pages for kcopyd io.
40 *---------------------------------------------------------------*/
41 struct kcopyd_client {
42 struct list_head list;
44 spinlock_t lock;
45 struct page_list *pages;
46 unsigned int nr_pages;
47 unsigned int nr_free_pages;
49 struct dm_io_client *io_client;
51 wait_queue_head_t destroyq;
52 atomic_t nr_jobs;
55 static struct page_list *alloc_pl(void)
57 struct page_list *pl;
59 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
60 if (!pl)
61 return NULL;
63 pl->page = alloc_page(GFP_KERNEL);
64 if (!pl->page) {
65 kfree(pl);
66 return NULL;
69 return pl;
72 static void free_pl(struct page_list *pl)
74 __free_page(pl->page);
75 kfree(pl);
78 static int kcopyd_get_pages(struct kcopyd_client *kc,
79 unsigned int nr, struct page_list **pages)
81 struct page_list *pl;
83 spin_lock(&kc->lock);
84 if (kc->nr_free_pages < nr) {
85 spin_unlock(&kc->lock);
86 return -ENOMEM;
89 kc->nr_free_pages -= nr;
90 for (*pages = pl = kc->pages; --nr; pl = pl->next)
93 kc->pages = pl->next;
94 pl->next = NULL;
96 spin_unlock(&kc->lock);
98 return 0;
101 static void kcopyd_put_pages(struct kcopyd_client *kc, struct page_list *pl)
103 struct page_list *cursor;
105 spin_lock(&kc->lock);
106 for (cursor = pl; cursor->next; cursor = cursor->next)
107 kc->nr_free_pages++;
109 kc->nr_free_pages++;
110 cursor->next = kc->pages;
111 kc->pages = pl;
112 spin_unlock(&kc->lock);
116 * These three functions resize the page pool.
118 static void drop_pages(struct page_list *pl)
120 struct page_list *next;
122 while (pl) {
123 next = pl->next;
124 free_pl(pl);
125 pl = next;
129 static int client_alloc_pages(struct kcopyd_client *kc, unsigned int nr)
131 unsigned int i;
132 struct page_list *pl = NULL, *next;
134 for (i = 0; i < nr; i++) {
135 next = alloc_pl();
136 if (!next) {
137 if (pl)
138 drop_pages(pl);
139 return -ENOMEM;
141 next->next = pl;
142 pl = next;
145 kcopyd_put_pages(kc, pl);
146 kc->nr_pages += nr;
147 return 0;
150 static void client_free_pages(struct kcopyd_client *kc)
152 BUG_ON(kc->nr_free_pages != kc->nr_pages);
153 drop_pages(kc->pages);
154 kc->pages = NULL;
155 kc->nr_free_pages = kc->nr_pages = 0;
158 /*-----------------------------------------------------------------
159 * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
160 * for this reason we use a mempool to prevent the client from
161 * ever having to do io (which could cause a deadlock).
162 *---------------------------------------------------------------*/
163 struct kcopyd_job {
164 struct kcopyd_client *kc;
165 struct list_head list;
166 unsigned long flags;
169 * Error state of the job.
171 int read_err;
172 unsigned int write_err;
175 * Either READ or WRITE
177 int rw;
178 struct io_region source;
181 * The destinations for the transfer.
183 unsigned int num_dests;
184 struct io_region dests[KCOPYD_MAX_REGIONS];
186 sector_t offset;
187 unsigned int nr_pages;
188 struct page_list *pages;
191 * Set this to ensure you are notified when the job has
192 * completed. 'context' is for callback to use.
194 kcopyd_notify_fn fn;
195 void *context;
198 * These fields are only used if the job has been split
199 * into more manageable parts.
201 struct semaphore lock;
202 atomic_t sub_jobs;
203 sector_t progress;
206 /* FIXME: this should scale with the number of pages */
207 #define MIN_JOBS 512
209 static struct kmem_cache *_job_cache;
210 static mempool_t *_job_pool;
213 * We maintain three lists of jobs:
215 * i) jobs waiting for pages
216 * ii) jobs that have pages, and are waiting for the io to be issued.
217 * iii) jobs that have completed.
219 * All three of these are protected by job_lock.
221 static DEFINE_SPINLOCK(_job_lock);
223 static LIST_HEAD(_complete_jobs);
224 static LIST_HEAD(_io_jobs);
225 static LIST_HEAD(_pages_jobs);
227 static int jobs_init(void)
229 _job_cache = kmem_cache_create("kcopyd-jobs",
230 sizeof(struct kcopyd_job),
231 __alignof__(struct kcopyd_job),
232 0, NULL, NULL);
233 if (!_job_cache)
234 return -ENOMEM;
236 _job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
237 if (!_job_pool) {
238 kmem_cache_destroy(_job_cache);
239 return -ENOMEM;
242 return 0;
245 static void jobs_exit(void)
247 BUG_ON(!list_empty(&_complete_jobs));
248 BUG_ON(!list_empty(&_io_jobs));
249 BUG_ON(!list_empty(&_pages_jobs));
251 mempool_destroy(_job_pool);
252 kmem_cache_destroy(_job_cache);
253 _job_pool = NULL;
254 _job_cache = NULL;
258 * Functions to push and pop a job onto the head of a given job
259 * list.
261 static inline struct kcopyd_job *pop(struct list_head *jobs)
263 struct kcopyd_job *job = NULL;
264 unsigned long flags;
266 spin_lock_irqsave(&_job_lock, flags);
268 if (!list_empty(jobs)) {
269 job = list_entry(jobs->next, struct kcopyd_job, list);
270 list_del(&job->list);
272 spin_unlock_irqrestore(&_job_lock, flags);
274 return job;
277 static inline void push(struct list_head *jobs, struct kcopyd_job *job)
279 unsigned long flags;
281 spin_lock_irqsave(&_job_lock, flags);
282 list_add_tail(&job->list, jobs);
283 spin_unlock_irqrestore(&_job_lock, flags);
287 * These three functions process 1 item from the corresponding
288 * job list.
290 * They return:
291 * < 0: error
292 * 0: success
293 * > 0: can't process yet.
295 static int run_complete_job(struct kcopyd_job *job)
297 void *context = job->context;
298 int read_err = job->read_err;
299 unsigned int write_err = job->write_err;
300 kcopyd_notify_fn fn = job->fn;
301 struct kcopyd_client *kc = job->kc;
303 kcopyd_put_pages(kc, job->pages);
304 mempool_free(job, _job_pool);
305 fn(read_err, write_err, context);
307 if (atomic_dec_and_test(&kc->nr_jobs))
308 wake_up(&kc->destroyq);
310 return 0;
313 static void complete_io(unsigned long error, void *context)
315 struct kcopyd_job *job = (struct kcopyd_job *) context;
317 if (error) {
318 if (job->rw == WRITE)
319 job->write_err |= error;
320 else
321 job->read_err = 1;
323 if (!test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) {
324 push(&_complete_jobs, job);
325 wake();
326 return;
330 if (job->rw == WRITE)
331 push(&_complete_jobs, job);
333 else {
334 job->rw = WRITE;
335 push(&_io_jobs, job);
338 wake();
342 * Request io on as many buffer heads as we can currently get for
343 * a particular job.
345 static int run_io_job(struct kcopyd_job *job)
347 int r;
348 struct dm_io_request io_req = {
349 .bi_rw = job->rw,
350 .mem.type = DM_IO_PAGE_LIST,
351 .mem.ptr.pl = job->pages,
352 .mem.offset = job->offset,
353 .notify.fn = complete_io,
354 .notify.context = job,
355 .client = job->kc->io_client,
358 if (job->rw == READ)
359 r = dm_io(&io_req, 1, &job->source, NULL);
360 else
361 r = dm_io(&io_req, job->num_dests, job->dests, NULL);
363 return r;
366 static int run_pages_job(struct kcopyd_job *job)
368 int r;
370 job->nr_pages = dm_div_up(job->dests[0].count + job->offset,
371 PAGE_SIZE >> 9);
372 r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages);
373 if (!r) {
374 /* this job is ready for io */
375 push(&_io_jobs, job);
376 return 0;
379 if (r == -ENOMEM)
380 /* can't complete now */
381 return 1;
383 return r;
387 * Run through a list for as long as possible. Returns the count
388 * of successful jobs.
390 static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
392 struct kcopyd_job *job;
393 int r, count = 0;
395 while ((job = pop(jobs))) {
397 r = fn(job);
399 if (r < 0) {
400 /* error this rogue job */
401 if (job->rw == WRITE)
402 job->write_err = (unsigned int) -1;
403 else
404 job->read_err = 1;
405 push(&_complete_jobs, job);
406 break;
409 if (r > 0) {
411 * We couldn't service this job ATM, so
412 * push this job back onto the list.
414 push(jobs, job);
415 break;
418 count++;
421 return count;
425 * kcopyd does this every time it's woken up.
427 static void do_work(struct work_struct *ignored)
430 * The order that these are called is *very* important.
431 * complete jobs can free some pages for pages jobs.
432 * Pages jobs when successful will jump onto the io jobs
433 * list. io jobs call wake when they complete and it all
434 * starts again.
436 process_jobs(&_complete_jobs, run_complete_job);
437 process_jobs(&_pages_jobs, run_pages_job);
438 process_jobs(&_io_jobs, run_io_job);
442 * If we are copying a small region we just dispatch a single job
443 * to do the copy, otherwise the io has to be split up into many
444 * jobs.
446 static void dispatch_job(struct kcopyd_job *job)
448 atomic_inc(&job->kc->nr_jobs);
449 push(&_pages_jobs, job);
450 wake();
453 #define SUB_JOB_SIZE 128
454 static void segment_complete(int read_err,
455 unsigned int write_err, void *context)
457 /* FIXME: tidy this function */
458 sector_t progress = 0;
459 sector_t count = 0;
460 struct kcopyd_job *job = (struct kcopyd_job *) context;
462 down(&job->lock);
464 /* update the error */
465 if (read_err)
466 job->read_err = 1;
468 if (write_err)
469 job->write_err |= write_err;
472 * Only dispatch more work if there hasn't been an error.
474 if ((!job->read_err && !job->write_err) ||
475 test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) {
476 /* get the next chunk of work */
477 progress = job->progress;
478 count = job->source.count - progress;
479 if (count) {
480 if (count > SUB_JOB_SIZE)
481 count = SUB_JOB_SIZE;
483 job->progress += count;
486 up(&job->lock);
488 if (count) {
489 int i;
490 struct kcopyd_job *sub_job = mempool_alloc(_job_pool, GFP_NOIO);
492 *sub_job = *job;
493 sub_job->source.sector += progress;
494 sub_job->source.count = count;
496 for (i = 0; i < job->num_dests; i++) {
497 sub_job->dests[i].sector += progress;
498 sub_job->dests[i].count = count;
501 sub_job->fn = segment_complete;
502 sub_job->context = job;
503 dispatch_job(sub_job);
505 } else if (atomic_dec_and_test(&job->sub_jobs)) {
508 * To avoid a race we must keep the job around
509 * until after the notify function has completed.
510 * Otherwise the client may try and stop the job
511 * after we've completed.
513 job->fn(read_err, write_err, job->context);
514 mempool_free(job, _job_pool);
519 * Create some little jobs that will do the move between
520 * them.
522 #define SPLIT_COUNT 8
523 static void split_job(struct kcopyd_job *job)
525 int i;
527 atomic_set(&job->sub_jobs, SPLIT_COUNT);
528 for (i = 0; i < SPLIT_COUNT; i++)
529 segment_complete(0, 0u, job);
532 int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
533 unsigned int num_dests, struct io_region *dests,
534 unsigned int flags, kcopyd_notify_fn fn, void *context)
536 struct kcopyd_job *job;
539 * Allocate a new job.
541 job = mempool_alloc(_job_pool, GFP_NOIO);
544 * set up for the read.
546 job->kc = kc;
547 job->flags = flags;
548 job->read_err = 0;
549 job->write_err = 0;
550 job->rw = READ;
552 job->source = *from;
554 job->num_dests = num_dests;
555 memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
557 job->offset = 0;
558 job->nr_pages = 0;
559 job->pages = NULL;
561 job->fn = fn;
562 job->context = context;
564 if (job->source.count < SUB_JOB_SIZE)
565 dispatch_job(job);
567 else {
568 init_MUTEX(&job->lock);
569 job->progress = 0;
570 split_job(job);
573 return 0;
577 * Cancels a kcopyd job, eg. someone might be deactivating a
578 * mirror.
580 #if 0
581 int kcopyd_cancel(struct kcopyd_job *job, int block)
583 /* FIXME: finish */
584 return -1;
586 #endif /* 0 */
588 /*-----------------------------------------------------------------
589 * Unit setup
590 *---------------------------------------------------------------*/
591 static DEFINE_MUTEX(_client_lock);
592 static LIST_HEAD(_clients);
594 static void client_add(struct kcopyd_client *kc)
596 mutex_lock(&_client_lock);
597 list_add(&kc->list, &_clients);
598 mutex_unlock(&_client_lock);
601 static void client_del(struct kcopyd_client *kc)
603 mutex_lock(&_client_lock);
604 list_del(&kc->list);
605 mutex_unlock(&_client_lock);
608 static DEFINE_MUTEX(kcopyd_init_lock);
609 static int kcopyd_clients = 0;
611 static int kcopyd_init(void)
613 int r;
615 mutex_lock(&kcopyd_init_lock);
617 if (kcopyd_clients) {
618 /* Already initialized. */
619 kcopyd_clients++;
620 mutex_unlock(&kcopyd_init_lock);
621 return 0;
624 r = jobs_init();
625 if (r) {
626 mutex_unlock(&kcopyd_init_lock);
627 return r;
630 _kcopyd_wq = create_singlethread_workqueue("kcopyd");
631 if (!_kcopyd_wq) {
632 jobs_exit();
633 mutex_unlock(&kcopyd_init_lock);
634 return -ENOMEM;
637 kcopyd_clients++;
638 INIT_WORK(&_kcopyd_work, do_work);
639 mutex_unlock(&kcopyd_init_lock);
640 return 0;
643 static void kcopyd_exit(void)
645 mutex_lock(&kcopyd_init_lock);
646 kcopyd_clients--;
647 if (!kcopyd_clients) {
648 jobs_exit();
649 destroy_workqueue(_kcopyd_wq);
650 _kcopyd_wq = NULL;
652 mutex_unlock(&kcopyd_init_lock);
655 int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
657 int r = 0;
658 struct kcopyd_client *kc;
660 r = kcopyd_init();
661 if (r)
662 return r;
664 kc = kmalloc(sizeof(*kc), GFP_KERNEL);
665 if (!kc) {
666 kcopyd_exit();
667 return -ENOMEM;
670 spin_lock_init(&kc->lock);
671 kc->pages = NULL;
672 kc->nr_pages = kc->nr_free_pages = 0;
673 r = client_alloc_pages(kc, nr_pages);
674 if (r) {
675 kfree(kc);
676 kcopyd_exit();
677 return r;
680 kc->io_client = dm_io_client_create(nr_pages);
681 if (IS_ERR(kc->io_client)) {
682 r = PTR_ERR(kc->io_client);
683 client_free_pages(kc);
684 kfree(kc);
685 kcopyd_exit();
686 return r;
689 init_waitqueue_head(&kc->destroyq);
690 atomic_set(&kc->nr_jobs, 0);
692 client_add(kc);
693 *result = kc;
694 return 0;
697 void kcopyd_client_destroy(struct kcopyd_client *kc)
699 /* Wait for completion of all jobs submitted by this client. */
700 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
702 dm_io_client_destroy(kc->io_client);
703 client_free_pages(kc);
704 client_del(kc);
705 kfree(kc);
706 kcopyd_exit();
709 EXPORT_SYMBOL(kcopyd_client_create);
710 EXPORT_SYMBOL(kcopyd_client_destroy);
711 EXPORT_SYMBOL(kcopyd_copy);