loop: remove the incorrect write_begin/write_end shortcut
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / block / loop.c
blob46cdd69455575f3fb74e0efe3e91dfb1626d8422
1 /*
2 * linux/drivers/block/loop.c
4 * Written by Theodore Ts'o, 3/29/93
6 * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
7 * permitted under the GNU General Public License.
9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
21 * Loadable modules and other fixes by AK, 1998
23 * Make real block number available to downstream transfer functions, enables
24 * CBC (and relatives) mode encryption requiring unique IVs per data block.
25 * Reed H. Petty, rhp@draper.net
27 * Maximum number of loop devices now dynamic via max_loop module parameter.
28 * Russell Kroll <rkroll@exploits.org> 19990701
30 * Maximum number of loop devices when compiled-in now selectable by passing
31 * max_loop=<1-255> to the kernel on boot.
32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
34 * Completely rewrite request handling to be make_request_fn style and
35 * non blocking, pushing work to a helper thread. Lots of fixes from
36 * Al Viro too.
37 * Jens Axboe <axboe@suse.de>, Nov 2000
39 * Support up to 256 loop devices
40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
42 * Support for falling back on the write file operation when the address space
43 * operations write_begin is not available on the backing filesystem.
44 * Anton Altaparmakov, 16 Feb 2005
46 * Still To Fix:
47 * - Advisory locking is ignored here.
48 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
52 #include <linux/module.h>
53 #include <linux/moduleparam.h>
54 #include <linux/sched.h>
55 #include <linux/fs.h>
56 #include <linux/file.h>
57 #include <linux/stat.h>
58 #include <linux/errno.h>
59 #include <linux/major.h>
60 #include <linux/wait.h>
61 #include <linux/blkdev.h>
62 #include <linux/blkpg.h>
63 #include <linux/init.h>
64 #include <linux/swap.h>
65 #include <linux/slab.h>
66 #include <linux/loop.h>
67 #include <linux/compat.h>
68 #include <linux/suspend.h>
69 #include <linux/freezer.h>
70 #include <linux/mutex.h>
71 #include <linux/writeback.h>
72 #include <linux/buffer_head.h> /* for invalidate_bdev() */
73 #include <linux/completion.h>
74 #include <linux/highmem.h>
75 #include <linux/kthread.h>
76 #include <linux/splice.h>
77 #include <linux/sysfs.h>
78 #include <linux/miscdevice.h>
79 #include <asm/uaccess.h>
81 static DEFINE_IDR(loop_index_idr);
82 static DEFINE_MUTEX(loop_index_mutex);
84 static int max_part;
85 static int part_shift;
88 * Transfer functions
90 static int transfer_none(struct loop_device *lo, int cmd,
91 struct page *raw_page, unsigned raw_off,
92 struct page *loop_page, unsigned loop_off,
93 int size, sector_t real_block)
95 char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
96 char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
98 if (cmd == READ)
99 memcpy(loop_buf, raw_buf, size);
100 else
101 memcpy(raw_buf, loop_buf, size);
103 kunmap_atomic(loop_buf, KM_USER1);
104 kunmap_atomic(raw_buf, KM_USER0);
105 cond_resched();
106 return 0;
109 static int transfer_xor(struct loop_device *lo, int cmd,
110 struct page *raw_page, unsigned raw_off,
111 struct page *loop_page, unsigned loop_off,
112 int size, sector_t real_block)
114 char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
115 char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
116 char *in, *out, *key;
117 int i, keysize;
119 if (cmd == READ) {
120 in = raw_buf;
121 out = loop_buf;
122 } else {
123 in = loop_buf;
124 out = raw_buf;
127 key = lo->lo_encrypt_key;
128 keysize = lo->lo_encrypt_key_size;
129 for (i = 0; i < size; i++)
130 *out++ = *in++ ^ key[(i & 511) % keysize];
132 kunmap_atomic(loop_buf, KM_USER1);
133 kunmap_atomic(raw_buf, KM_USER0);
134 cond_resched();
135 return 0;
138 static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
140 if (unlikely(info->lo_encrypt_key_size <= 0))
141 return -EINVAL;
142 return 0;
145 static struct loop_func_table none_funcs = {
146 .number = LO_CRYPT_NONE,
147 .transfer = transfer_none,
150 static struct loop_func_table xor_funcs = {
151 .number = LO_CRYPT_XOR,
152 .transfer = transfer_xor,
153 .init = xor_init
156 /* xfer_funcs[0] is special - its release function is never called */
157 static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
158 &none_funcs,
159 &xor_funcs
162 static loff_t get_loop_size(struct loop_device *lo, struct file *file)
164 loff_t size, offset, loopsize;
166 /* Compute loopsize in bytes */
167 size = i_size_read(file->f_mapping->host);
168 offset = lo->lo_offset;
169 loopsize = size - offset;
170 if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
171 loopsize = lo->lo_sizelimit;
174 * Unfortunately, if we want to do I/O on the device,
175 * the number of 512-byte sectors has to fit into a sector_t.
177 return loopsize >> 9;
180 static int
181 figure_loop_size(struct loop_device *lo)
183 loff_t size = get_loop_size(lo, lo->lo_backing_file);
184 sector_t x = (sector_t)size;
186 if (unlikely((loff_t)x != size))
187 return -EFBIG;
189 set_capacity(lo->lo_disk, x);
190 return 0;
193 static inline int
194 lo_do_transfer(struct loop_device *lo, int cmd,
195 struct page *rpage, unsigned roffs,
196 struct page *lpage, unsigned loffs,
197 int size, sector_t rblock)
199 if (unlikely(!lo->transfer))
200 return 0;
202 return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
206 * __do_lo_send_write - helper for writing data to a loop device
208 * This helper just factors out common code between do_lo_send_direct_write()
209 * and do_lo_send_write().
211 static int __do_lo_send_write(struct file *file,
212 u8 *buf, const int len, loff_t pos)
214 ssize_t bw;
215 mm_segment_t old_fs = get_fs();
217 set_fs(get_ds());
218 bw = file->f_op->write(file, buf, len, &pos);
219 set_fs(old_fs);
220 if (likely(bw == len))
221 return 0;
222 printk(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
223 (unsigned long long)pos, len);
224 if (bw >= 0)
225 bw = -EIO;
226 return bw;
230 * do_lo_send_direct_write - helper for writing data to a loop device
232 * This is the fast, non-transforming version that does not need double
233 * buffering.
235 static int do_lo_send_direct_write(struct loop_device *lo,
236 struct bio_vec *bvec, loff_t pos, struct page *page)
238 ssize_t bw = __do_lo_send_write(lo->lo_backing_file,
239 kmap(bvec->bv_page) + bvec->bv_offset,
240 bvec->bv_len, pos);
241 kunmap(bvec->bv_page);
242 cond_resched();
243 return bw;
247 * do_lo_send_write - helper for writing data to a loop device
249 * This is the slow, transforming version that needs to double buffer the
250 * data as it cannot do the transformations in place without having direct
251 * access to the destination pages of the backing file.
253 static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
254 loff_t pos, struct page *page)
256 int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page,
257 bvec->bv_offset, bvec->bv_len, pos >> 9);
258 if (likely(!ret))
259 return __do_lo_send_write(lo->lo_backing_file,
260 page_address(page), bvec->bv_len,
261 pos);
262 printk(KERN_ERR "loop: Transfer error at byte offset %llu, "
263 "length %i.\n", (unsigned long long)pos, bvec->bv_len);
264 if (ret > 0)
265 ret = -EIO;
266 return ret;
269 static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
271 int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
272 struct page *page);
273 struct bio_vec *bvec;
274 struct page *page = NULL;
275 int i, ret = 0;
277 if (lo->transfer != transfer_none) {
278 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
279 if (unlikely(!page))
280 goto fail;
281 kmap(page);
282 do_lo_send = do_lo_send_write;
283 } else {
284 do_lo_send = do_lo_send_direct_write;
287 bio_for_each_segment(bvec, bio, i) {
288 ret = do_lo_send(lo, bvec, pos, page);
289 if (ret < 0)
290 break;
291 pos += bvec->bv_len;
293 if (page) {
294 kunmap(page);
295 __free_page(page);
297 out:
298 return ret;
299 fail:
300 printk(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
301 ret = -ENOMEM;
302 goto out;
305 struct lo_read_data {
306 struct loop_device *lo;
307 struct page *page;
308 unsigned offset;
309 int bsize;
312 static int
313 lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
314 struct splice_desc *sd)
316 struct lo_read_data *p = sd->u.data;
317 struct loop_device *lo = p->lo;
318 struct page *page = buf->page;
319 sector_t IV;
320 int size;
322 IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) +
323 (buf->offset >> 9);
324 size = sd->len;
325 if (size > p->bsize)
326 size = p->bsize;
328 if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) {
329 printk(KERN_ERR "loop: transfer error block %ld\n",
330 page->index);
331 size = -EINVAL;
334 flush_dcache_page(p->page);
336 if (size > 0)
337 p->offset += size;
339 return size;
342 static int
343 lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd)
345 return __splice_from_pipe(pipe, sd, lo_splice_actor);
348 static int
349 do_lo_receive(struct loop_device *lo,
350 struct bio_vec *bvec, int bsize, loff_t pos)
352 struct lo_read_data cookie;
353 struct splice_desc sd;
354 struct file *file;
355 long retval;
357 cookie.lo = lo;
358 cookie.page = bvec->bv_page;
359 cookie.offset = bvec->bv_offset;
360 cookie.bsize = bsize;
362 sd.len = 0;
363 sd.total_len = bvec->bv_len;
364 sd.flags = 0;
365 sd.pos = pos;
366 sd.u.data = &cookie;
368 file = lo->lo_backing_file;
369 retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor);
371 if (retval < 0)
372 return retval;
374 return 0;
377 static int
378 lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
380 struct bio_vec *bvec;
381 int i, ret = 0;
383 bio_for_each_segment(bvec, bio, i) {
384 ret = do_lo_receive(lo, bvec, bsize, pos);
385 if (ret < 0)
386 break;
387 pos += bvec->bv_len;
389 return ret;
392 static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
394 loff_t pos;
395 int ret;
397 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
399 if (bio_rw(bio) == WRITE) {
400 struct file *file = lo->lo_backing_file;
402 if (bio->bi_rw & REQ_FLUSH) {
403 ret = vfs_fsync(file, 0);
404 if (unlikely(ret && ret != -EINVAL)) {
405 ret = -EIO;
406 goto out;
410 ret = lo_send(lo, bio, pos);
412 if ((bio->bi_rw & REQ_FUA) && !ret) {
413 ret = vfs_fsync(file, 0);
414 if (unlikely(ret && ret != -EINVAL))
415 ret = -EIO;
417 } else
418 ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
420 out:
421 return ret;
425 * Add bio to back of pending list
427 static void loop_add_bio(struct loop_device *lo, struct bio *bio)
429 bio_list_add(&lo->lo_bio_list, bio);
433 * Grab first pending buffer
435 static struct bio *loop_get_bio(struct loop_device *lo)
437 return bio_list_pop(&lo->lo_bio_list);
440 static int loop_make_request(struct request_queue *q, struct bio *old_bio)
442 struct loop_device *lo = q->queuedata;
443 int rw = bio_rw(old_bio);
445 if (rw == READA)
446 rw = READ;
448 BUG_ON(!lo || (rw != READ && rw != WRITE));
450 spin_lock_irq(&lo->lo_lock);
451 if (lo->lo_state != Lo_bound)
452 goto out;
453 if (unlikely(rw == WRITE && (lo->lo_flags & LO_FLAGS_READ_ONLY)))
454 goto out;
455 loop_add_bio(lo, old_bio);
456 wake_up(&lo->lo_event);
457 spin_unlock_irq(&lo->lo_lock);
458 return 0;
460 out:
461 spin_unlock_irq(&lo->lo_lock);
462 bio_io_error(old_bio);
463 return 0;
466 struct switch_request {
467 struct file *file;
468 struct completion wait;
471 static void do_loop_switch(struct loop_device *, struct switch_request *);
473 static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
475 if (unlikely(!bio->bi_bdev)) {
476 do_loop_switch(lo, bio->bi_private);
477 bio_put(bio);
478 } else {
479 int ret = do_bio_filebacked(lo, bio);
480 bio_endio(bio, ret);
485 * worker thread that handles reads/writes to file backed loop devices,
486 * to avoid blocking in our make_request_fn. it also does loop decrypting
487 * on reads for block backed loop, as that is too heavy to do from
488 * b_end_io context where irqs may be disabled.
490 * Loop explanation: loop_clr_fd() sets lo_state to Lo_rundown before
491 * calling kthread_stop(). Therefore once kthread_should_stop() is
492 * true, make_request will not place any more requests. Therefore
493 * once kthread_should_stop() is true and lo_bio is NULL, we are
494 * done with the loop.
496 static int loop_thread(void *data)
498 struct loop_device *lo = data;
499 struct bio *bio;
501 set_user_nice(current, -20);
503 while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
505 wait_event_interruptible(lo->lo_event,
506 !bio_list_empty(&lo->lo_bio_list) ||
507 kthread_should_stop());
509 if (bio_list_empty(&lo->lo_bio_list))
510 continue;
511 spin_lock_irq(&lo->lo_lock);
512 bio = loop_get_bio(lo);
513 spin_unlock_irq(&lo->lo_lock);
515 BUG_ON(!bio);
516 loop_handle_bio(lo, bio);
519 return 0;
523 * loop_switch performs the hard work of switching a backing store.
524 * First it needs to flush existing IO, it does this by sending a magic
525 * BIO down the pipe. The completion of this BIO does the actual switch.
527 static int loop_switch(struct loop_device *lo, struct file *file)
529 struct switch_request w;
530 struct bio *bio = bio_alloc(GFP_KERNEL, 0);
531 if (!bio)
532 return -ENOMEM;
533 init_completion(&w.wait);
534 w.file = file;
535 bio->bi_private = &w;
536 bio->bi_bdev = NULL;
537 loop_make_request(lo->lo_queue, bio);
538 wait_for_completion(&w.wait);
539 return 0;
543 * Helper to flush the IOs in loop, but keeping loop thread running
545 static int loop_flush(struct loop_device *lo)
547 /* loop not yet configured, no running thread, nothing to flush */
548 if (!lo->lo_thread)
549 return 0;
551 return loop_switch(lo, NULL);
555 * Do the actual switch; called from the BIO completion routine
557 static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
559 struct file *file = p->file;
560 struct file *old_file = lo->lo_backing_file;
561 struct address_space *mapping;
563 /* if no new file, only flush of queued bios requested */
564 if (!file)
565 goto out;
567 mapping = file->f_mapping;
568 mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
569 lo->lo_backing_file = file;
570 lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ?
571 mapping->host->i_bdev->bd_block_size : PAGE_SIZE;
572 lo->old_gfp_mask = mapping_gfp_mask(mapping);
573 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
574 out:
575 complete(&p->wait);
580 * loop_change_fd switched the backing store of a loopback device to
581 * a new file. This is useful for operating system installers to free up
582 * the original file and in High Availability environments to switch to
583 * an alternative location for the content in case of server meltdown.
584 * This can only work if the loop device is used read-only, and if the
585 * new backing store is the same size and type as the old backing store.
587 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
588 unsigned int arg)
590 struct file *file, *old_file;
591 struct inode *inode;
592 int error;
594 error = -ENXIO;
595 if (lo->lo_state != Lo_bound)
596 goto out;
598 /* the loop device has to be read-only */
599 error = -EINVAL;
600 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
601 goto out;
603 error = -EBADF;
604 file = fget(arg);
605 if (!file)
606 goto out;
608 inode = file->f_mapping->host;
609 old_file = lo->lo_backing_file;
611 error = -EINVAL;
613 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
614 goto out_putf;
616 /* size of the new backing store needs to be the same */
617 if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
618 goto out_putf;
620 /* and ... switch */
621 error = loop_switch(lo, file);
622 if (error)
623 goto out_putf;
625 fput(old_file);
626 if (max_part > 0)
627 ioctl_by_bdev(bdev, BLKRRPART, 0);
628 return 0;
630 out_putf:
631 fput(file);
632 out:
633 return error;
636 static inline int is_loop_device(struct file *file)
638 struct inode *i = file->f_mapping->host;
640 return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
643 /* loop sysfs attributes */
645 static ssize_t loop_attr_show(struct device *dev, char *page,
646 ssize_t (*callback)(struct loop_device *, char *))
648 struct gendisk *disk = dev_to_disk(dev);
649 struct loop_device *lo = disk->private_data;
651 return callback(lo, page);
654 #define LOOP_ATTR_RO(_name) \
655 static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \
656 static ssize_t loop_attr_do_show_##_name(struct device *d, \
657 struct device_attribute *attr, char *b) \
659 return loop_attr_show(d, b, loop_attr_##_name##_show); \
661 static struct device_attribute loop_attr_##_name = \
662 __ATTR(_name, S_IRUGO, loop_attr_do_show_##_name, NULL);
664 static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
666 ssize_t ret;
667 char *p = NULL;
669 spin_lock_irq(&lo->lo_lock);
670 if (lo->lo_backing_file)
671 p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1);
672 spin_unlock_irq(&lo->lo_lock);
674 if (IS_ERR_OR_NULL(p))
675 ret = PTR_ERR(p);
676 else {
677 ret = strlen(p);
678 memmove(buf, p, ret);
679 buf[ret++] = '\n';
680 buf[ret] = 0;
683 return ret;
686 static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
688 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset);
691 static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
693 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
696 static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
698 int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
700 return sprintf(buf, "%s\n", autoclear ? "1" : "0");
703 LOOP_ATTR_RO(backing_file);
704 LOOP_ATTR_RO(offset);
705 LOOP_ATTR_RO(sizelimit);
706 LOOP_ATTR_RO(autoclear);
708 static struct attribute *loop_attrs[] = {
709 &loop_attr_backing_file.attr,
710 &loop_attr_offset.attr,
711 &loop_attr_sizelimit.attr,
712 &loop_attr_autoclear.attr,
713 NULL,
716 static struct attribute_group loop_attribute_group = {
717 .name = "loop",
718 .attrs= loop_attrs,
721 static int loop_sysfs_init(struct loop_device *lo)
723 return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
724 &loop_attribute_group);
727 static void loop_sysfs_exit(struct loop_device *lo)
729 sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
730 &loop_attribute_group);
733 static int loop_set_fd(struct loop_device *lo, fmode_t mode,
734 struct block_device *bdev, unsigned int arg)
736 struct file *file, *f;
737 struct inode *inode;
738 struct address_space *mapping;
739 unsigned lo_blocksize;
740 int lo_flags = 0;
741 int error;
742 loff_t size;
744 /* This is safe, since we have a reference from open(). */
745 __module_get(THIS_MODULE);
747 error = -EBADF;
748 file = fget(arg);
749 if (!file)
750 goto out;
752 error = -EBUSY;
753 if (lo->lo_state != Lo_unbound)
754 goto out_putf;
756 /* Avoid recursion */
757 f = file;
758 while (is_loop_device(f)) {
759 struct loop_device *l;
761 if (f->f_mapping->host->i_bdev == bdev)
762 goto out_putf;
764 l = f->f_mapping->host->i_bdev->bd_disk->private_data;
765 if (l->lo_state == Lo_unbound) {
766 error = -EINVAL;
767 goto out_putf;
769 f = l->lo_backing_file;
772 mapping = file->f_mapping;
773 inode = mapping->host;
775 error = -EINVAL;
776 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
777 goto out_putf;
779 if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
780 !file->f_op->write)
781 lo_flags |= LO_FLAGS_READ_ONLY;
783 lo_blocksize = S_ISBLK(inode->i_mode) ?
784 inode->i_bdev->bd_block_size : PAGE_SIZE;
786 error = -EFBIG;
787 size = get_loop_size(lo, file);
788 if ((loff_t)(sector_t)size != size)
789 goto out_putf;
791 error = 0;
793 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
795 lo->lo_blocksize = lo_blocksize;
796 lo->lo_device = bdev;
797 lo->lo_flags = lo_flags;
798 lo->lo_backing_file = file;
799 lo->transfer = transfer_none;
800 lo->ioctl = NULL;
801 lo->lo_sizelimit = 0;
802 lo->old_gfp_mask = mapping_gfp_mask(mapping);
803 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
805 bio_list_init(&lo->lo_bio_list);
808 * set queue make_request_fn, and add limits based on lower level
809 * device
811 blk_queue_make_request(lo->lo_queue, loop_make_request);
812 lo->lo_queue->queuedata = lo;
814 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
815 blk_queue_flush(lo->lo_queue, REQ_FLUSH);
817 set_capacity(lo->lo_disk, size);
818 bd_set_size(bdev, size << 9);
819 loop_sysfs_init(lo);
820 /* let user-space know about the new size */
821 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
823 set_blocksize(bdev, lo_blocksize);
825 lo->lo_thread = kthread_create(loop_thread, lo, "loop%d",
826 lo->lo_number);
827 if (IS_ERR(lo->lo_thread)) {
828 error = PTR_ERR(lo->lo_thread);
829 goto out_clr;
831 lo->lo_state = Lo_bound;
832 wake_up_process(lo->lo_thread);
833 if (max_part > 0)
834 ioctl_by_bdev(bdev, BLKRRPART, 0);
835 return 0;
837 out_clr:
838 loop_sysfs_exit(lo);
839 lo->lo_thread = NULL;
840 lo->lo_device = NULL;
841 lo->lo_backing_file = NULL;
842 lo->lo_flags = 0;
843 set_capacity(lo->lo_disk, 0);
844 invalidate_bdev(bdev);
845 bd_set_size(bdev, 0);
846 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
847 mapping_set_gfp_mask(mapping, lo->old_gfp_mask);
848 lo->lo_state = Lo_unbound;
849 out_putf:
850 fput(file);
851 out:
852 /* This is safe: open() is still holding a reference. */
853 module_put(THIS_MODULE);
854 return error;
857 static int
858 loop_release_xfer(struct loop_device *lo)
860 int err = 0;
861 struct loop_func_table *xfer = lo->lo_encryption;
863 if (xfer) {
864 if (xfer->release)
865 err = xfer->release(lo);
866 lo->transfer = NULL;
867 lo->lo_encryption = NULL;
868 module_put(xfer->owner);
870 return err;
873 static int
874 loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
875 const struct loop_info64 *i)
877 int err = 0;
879 if (xfer) {
880 struct module *owner = xfer->owner;
882 if (!try_module_get(owner))
883 return -EINVAL;
884 if (xfer->init)
885 err = xfer->init(lo, i);
886 if (err)
887 module_put(owner);
888 else
889 lo->lo_encryption = xfer;
891 return err;
894 static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
896 struct file *filp = lo->lo_backing_file;
897 gfp_t gfp = lo->old_gfp_mask;
899 if (lo->lo_state != Lo_bound)
900 return -ENXIO;
902 if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */
903 return -EBUSY;
905 if (filp == NULL)
906 return -EINVAL;
908 spin_lock_irq(&lo->lo_lock);
909 lo->lo_state = Lo_rundown;
910 spin_unlock_irq(&lo->lo_lock);
912 kthread_stop(lo->lo_thread);
914 spin_lock_irq(&lo->lo_lock);
915 lo->lo_backing_file = NULL;
916 spin_unlock_irq(&lo->lo_lock);
918 loop_release_xfer(lo);
919 lo->transfer = NULL;
920 lo->ioctl = NULL;
921 lo->lo_device = NULL;
922 lo->lo_encryption = NULL;
923 lo->lo_offset = 0;
924 lo->lo_sizelimit = 0;
925 lo->lo_encrypt_key_size = 0;
926 lo->lo_flags = 0;
927 lo->lo_thread = NULL;
928 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
929 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
930 memset(lo->lo_file_name, 0, LO_NAME_SIZE);
931 if (bdev)
932 invalidate_bdev(bdev);
933 set_capacity(lo->lo_disk, 0);
934 loop_sysfs_exit(lo);
935 if (bdev) {
936 bd_set_size(bdev, 0);
937 /* let user-space know about this change */
938 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
940 mapping_set_gfp_mask(filp->f_mapping, gfp);
941 lo->lo_state = Lo_unbound;
942 /* This is safe: open() is still holding a reference. */
943 module_put(THIS_MODULE);
944 if (max_part > 0 && bdev)
945 ioctl_by_bdev(bdev, BLKRRPART, 0);
946 mutex_unlock(&lo->lo_ctl_mutex);
948 * Need not hold lo_ctl_mutex to fput backing file.
949 * Calling fput holding lo_ctl_mutex triggers a circular
950 * lock dependency possibility warning as fput can take
951 * bd_mutex which is usually taken before lo_ctl_mutex.
953 fput(filp);
954 return 0;
957 static int
958 loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
960 int err;
961 struct loop_func_table *xfer;
962 uid_t uid = current_uid();
964 if (lo->lo_encrypt_key_size &&
965 lo->lo_key_owner != uid &&
966 !capable(CAP_SYS_ADMIN))
967 return -EPERM;
968 if (lo->lo_state != Lo_bound)
969 return -ENXIO;
970 if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
971 return -EINVAL;
973 err = loop_release_xfer(lo);
974 if (err)
975 return err;
977 if (info->lo_encrypt_type) {
978 unsigned int type = info->lo_encrypt_type;
980 if (type >= MAX_LO_CRYPT)
981 return -EINVAL;
982 xfer = xfer_funcs[type];
983 if (xfer == NULL)
984 return -EINVAL;
985 } else
986 xfer = NULL;
988 err = loop_init_xfer(lo, xfer, info);
989 if (err)
990 return err;
992 if (lo->lo_offset != info->lo_offset ||
993 lo->lo_sizelimit != info->lo_sizelimit) {
994 lo->lo_offset = info->lo_offset;
995 lo->lo_sizelimit = info->lo_sizelimit;
996 if (figure_loop_size(lo))
997 return -EFBIG;
1000 memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
1001 memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
1002 lo->lo_file_name[LO_NAME_SIZE-1] = 0;
1003 lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
1005 if (!xfer)
1006 xfer = &none_funcs;
1007 lo->transfer = xfer->transfer;
1008 lo->ioctl = xfer->ioctl;
1010 if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) !=
1011 (info->lo_flags & LO_FLAGS_AUTOCLEAR))
1012 lo->lo_flags ^= LO_FLAGS_AUTOCLEAR;
1014 lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
1015 lo->lo_init[0] = info->lo_init[0];
1016 lo->lo_init[1] = info->lo_init[1];
1017 if (info->lo_encrypt_key_size) {
1018 memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
1019 info->lo_encrypt_key_size);
1020 lo->lo_key_owner = uid;
1023 return 0;
1026 static int
1027 loop_get_status(struct loop_device *lo, struct loop_info64 *info)
1029 struct file *file = lo->lo_backing_file;
1030 struct kstat stat;
1031 int error;
1033 if (lo->lo_state != Lo_bound)
1034 return -ENXIO;
1035 error = vfs_getattr(file->f_path.mnt, file->f_path.dentry, &stat);
1036 if (error)
1037 return error;
1038 memset(info, 0, sizeof(*info));
1039 info->lo_number = lo->lo_number;
1040 info->lo_device = huge_encode_dev(stat.dev);
1041 info->lo_inode = stat.ino;
1042 info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev);
1043 info->lo_offset = lo->lo_offset;
1044 info->lo_sizelimit = lo->lo_sizelimit;
1045 info->lo_flags = lo->lo_flags;
1046 memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
1047 memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
1048 info->lo_encrypt_type =
1049 lo->lo_encryption ? lo->lo_encryption->number : 0;
1050 if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
1051 info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
1052 memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
1053 lo->lo_encrypt_key_size);
1055 return 0;
1058 static void
1059 loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
1061 memset(info64, 0, sizeof(*info64));
1062 info64->lo_number = info->lo_number;
1063 info64->lo_device = info->lo_device;
1064 info64->lo_inode = info->lo_inode;
1065 info64->lo_rdevice = info->lo_rdevice;
1066 info64->lo_offset = info->lo_offset;
1067 info64->lo_sizelimit = 0;
1068 info64->lo_encrypt_type = info->lo_encrypt_type;
1069 info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
1070 info64->lo_flags = info->lo_flags;
1071 info64->lo_init[0] = info->lo_init[0];
1072 info64->lo_init[1] = info->lo_init[1];
1073 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1074 memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
1075 else
1076 memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
1077 memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
1080 static int
1081 loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
1083 memset(info, 0, sizeof(*info));
1084 info->lo_number = info64->lo_number;
1085 info->lo_device = info64->lo_device;
1086 info->lo_inode = info64->lo_inode;
1087 info->lo_rdevice = info64->lo_rdevice;
1088 info->lo_offset = info64->lo_offset;
1089 info->lo_encrypt_type = info64->lo_encrypt_type;
1090 info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
1091 info->lo_flags = info64->lo_flags;
1092 info->lo_init[0] = info64->lo_init[0];
1093 info->lo_init[1] = info64->lo_init[1];
1094 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1095 memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1096 else
1097 memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
1098 memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1100 /* error in case values were truncated */
1101 if (info->lo_device != info64->lo_device ||
1102 info->lo_rdevice != info64->lo_rdevice ||
1103 info->lo_inode != info64->lo_inode ||
1104 info->lo_offset != info64->lo_offset)
1105 return -EOVERFLOW;
1107 return 0;
1110 static int
1111 loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
1113 struct loop_info info;
1114 struct loop_info64 info64;
1116 if (copy_from_user(&info, arg, sizeof (struct loop_info)))
1117 return -EFAULT;
1118 loop_info64_from_old(&info, &info64);
1119 return loop_set_status(lo, &info64);
1122 static int
1123 loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
1125 struct loop_info64 info64;
1127 if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
1128 return -EFAULT;
1129 return loop_set_status(lo, &info64);
1132 static int
1133 loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
1134 struct loop_info info;
1135 struct loop_info64 info64;
1136 int err = 0;
1138 if (!arg)
1139 err = -EINVAL;
1140 if (!err)
1141 err = loop_get_status(lo, &info64);
1142 if (!err)
1143 err = loop_info64_to_old(&info64, &info);
1144 if (!err && copy_to_user(arg, &info, sizeof(info)))
1145 err = -EFAULT;
1147 return err;
1150 static int
1151 loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1152 struct loop_info64 info64;
1153 int err = 0;
1155 if (!arg)
1156 err = -EINVAL;
1157 if (!err)
1158 err = loop_get_status(lo, &info64);
1159 if (!err && copy_to_user(arg, &info64, sizeof(info64)))
1160 err = -EFAULT;
1162 return err;
1165 static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
1167 int err;
1168 sector_t sec;
1169 loff_t sz;
1171 err = -ENXIO;
1172 if (unlikely(lo->lo_state != Lo_bound))
1173 goto out;
1174 err = figure_loop_size(lo);
1175 if (unlikely(err))
1176 goto out;
1177 sec = get_capacity(lo->lo_disk);
1178 /* the width of sector_t may be narrow for bit-shift */
1179 sz = sec;
1180 sz <<= 9;
1181 mutex_lock(&bdev->bd_mutex);
1182 bd_set_size(bdev, sz);
1183 /* let user-space know about the new size */
1184 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
1185 mutex_unlock(&bdev->bd_mutex);
1187 out:
1188 return err;
1191 static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1192 unsigned int cmd, unsigned long arg)
1194 struct loop_device *lo = bdev->bd_disk->private_data;
1195 int err;
1197 mutex_lock_nested(&lo->lo_ctl_mutex, 1);
1198 switch (cmd) {
1199 case LOOP_SET_FD:
1200 err = loop_set_fd(lo, mode, bdev, arg);
1201 break;
1202 case LOOP_CHANGE_FD:
1203 err = loop_change_fd(lo, bdev, arg);
1204 break;
1205 case LOOP_CLR_FD:
1206 /* loop_clr_fd would have unlocked lo_ctl_mutex on success */
1207 err = loop_clr_fd(lo, bdev);
1208 if (!err)
1209 goto out_unlocked;
1210 break;
1211 case LOOP_SET_STATUS:
1212 err = loop_set_status_old(lo, (struct loop_info __user *) arg);
1213 break;
1214 case LOOP_GET_STATUS:
1215 err = loop_get_status_old(lo, (struct loop_info __user *) arg);
1216 break;
1217 case LOOP_SET_STATUS64:
1218 err = loop_set_status64(lo, (struct loop_info64 __user *) arg);
1219 break;
1220 case LOOP_GET_STATUS64:
1221 err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
1222 break;
1223 case LOOP_SET_CAPACITY:
1224 err = -EPERM;
1225 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1226 err = loop_set_capacity(lo, bdev);
1227 break;
1228 default:
1229 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1231 mutex_unlock(&lo->lo_ctl_mutex);
1233 out_unlocked:
1234 return err;
1237 #ifdef CONFIG_COMPAT
1238 struct compat_loop_info {
1239 compat_int_t lo_number; /* ioctl r/o */
1240 compat_dev_t lo_device; /* ioctl r/o */
1241 compat_ulong_t lo_inode; /* ioctl r/o */
1242 compat_dev_t lo_rdevice; /* ioctl r/o */
1243 compat_int_t lo_offset;
1244 compat_int_t lo_encrypt_type;
1245 compat_int_t lo_encrypt_key_size; /* ioctl w/o */
1246 compat_int_t lo_flags; /* ioctl r/o */
1247 char lo_name[LO_NAME_SIZE];
1248 unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
1249 compat_ulong_t lo_init[2];
1250 char reserved[4];
1254 * Transfer 32-bit compatibility structure in userspace to 64-bit loop info
1255 * - noinlined to reduce stack space usage in main part of driver
1257 static noinline int
1258 loop_info64_from_compat(const struct compat_loop_info __user *arg,
1259 struct loop_info64 *info64)
1261 struct compat_loop_info info;
1263 if (copy_from_user(&info, arg, sizeof(info)))
1264 return -EFAULT;
1266 memset(info64, 0, sizeof(*info64));
1267 info64->lo_number = info.lo_number;
1268 info64->lo_device = info.lo_device;
1269 info64->lo_inode = info.lo_inode;
1270 info64->lo_rdevice = info.lo_rdevice;
1271 info64->lo_offset = info.lo_offset;
1272 info64->lo_sizelimit = 0;
1273 info64->lo_encrypt_type = info.lo_encrypt_type;
1274 info64->lo_encrypt_key_size = info.lo_encrypt_key_size;
1275 info64->lo_flags = info.lo_flags;
1276 info64->lo_init[0] = info.lo_init[0];
1277 info64->lo_init[1] = info.lo_init[1];
1278 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1279 memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE);
1280 else
1281 memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
1282 memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE);
1283 return 0;
1287 * Transfer 64-bit loop info to 32-bit compatibility structure in userspace
1288 * - noinlined to reduce stack space usage in main part of driver
1290 static noinline int
1291 loop_info64_to_compat(const struct loop_info64 *info64,
1292 struct compat_loop_info __user *arg)
1294 struct compat_loop_info info;
1296 memset(&info, 0, sizeof(info));
1297 info.lo_number = info64->lo_number;
1298 info.lo_device = info64->lo_device;
1299 info.lo_inode = info64->lo_inode;
1300 info.lo_rdevice = info64->lo_rdevice;
1301 info.lo_offset = info64->lo_offset;
1302 info.lo_encrypt_type = info64->lo_encrypt_type;
1303 info.lo_encrypt_key_size = info64->lo_encrypt_key_size;
1304 info.lo_flags = info64->lo_flags;
1305 info.lo_init[0] = info64->lo_init[0];
1306 info.lo_init[1] = info64->lo_init[1];
1307 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1308 memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1309 else
1310 memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
1311 memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1313 /* error in case values were truncated */
1314 if (info.lo_device != info64->lo_device ||
1315 info.lo_rdevice != info64->lo_rdevice ||
1316 info.lo_inode != info64->lo_inode ||
1317 info.lo_offset != info64->lo_offset ||
1318 info.lo_init[0] != info64->lo_init[0] ||
1319 info.lo_init[1] != info64->lo_init[1])
1320 return -EOVERFLOW;
1322 if (copy_to_user(arg, &info, sizeof(info)))
1323 return -EFAULT;
1324 return 0;
1327 static int
1328 loop_set_status_compat(struct loop_device *lo,
1329 const struct compat_loop_info __user *arg)
1331 struct loop_info64 info64;
1332 int ret;
1334 ret = loop_info64_from_compat(arg, &info64);
1335 if (ret < 0)
1336 return ret;
1337 return loop_set_status(lo, &info64);
1340 static int
1341 loop_get_status_compat(struct loop_device *lo,
1342 struct compat_loop_info __user *arg)
1344 struct loop_info64 info64;
1345 int err = 0;
1347 if (!arg)
1348 err = -EINVAL;
1349 if (!err)
1350 err = loop_get_status(lo, &info64);
1351 if (!err)
1352 err = loop_info64_to_compat(&info64, arg);
1353 return err;
1356 static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1357 unsigned int cmd, unsigned long arg)
1359 struct loop_device *lo = bdev->bd_disk->private_data;
1360 int err;
1362 switch(cmd) {
1363 case LOOP_SET_STATUS:
1364 mutex_lock(&lo->lo_ctl_mutex);
1365 err = loop_set_status_compat(
1366 lo, (const struct compat_loop_info __user *) arg);
1367 mutex_unlock(&lo->lo_ctl_mutex);
1368 break;
1369 case LOOP_GET_STATUS:
1370 mutex_lock(&lo->lo_ctl_mutex);
1371 err = loop_get_status_compat(
1372 lo, (struct compat_loop_info __user *) arg);
1373 mutex_unlock(&lo->lo_ctl_mutex);
1374 break;
1375 case LOOP_SET_CAPACITY:
1376 case LOOP_CLR_FD:
1377 case LOOP_GET_STATUS64:
1378 case LOOP_SET_STATUS64:
1379 arg = (unsigned long) compat_ptr(arg);
1380 case LOOP_SET_FD:
1381 case LOOP_CHANGE_FD:
1382 err = lo_ioctl(bdev, mode, cmd, arg);
1383 break;
1384 default:
1385 err = -ENOIOCTLCMD;
1386 break;
1388 return err;
1390 #endif
1392 static int lo_open(struct block_device *bdev, fmode_t mode)
1394 struct loop_device *lo;
1395 int err = 0;
1397 mutex_lock(&loop_index_mutex);
1398 lo = bdev->bd_disk->private_data;
1399 if (!lo) {
1400 err = -ENXIO;
1401 goto out;
1404 mutex_lock(&lo->lo_ctl_mutex);
1405 lo->lo_refcnt++;
1406 mutex_unlock(&lo->lo_ctl_mutex);
1407 out:
1408 mutex_unlock(&loop_index_mutex);
1409 return err;
1412 static int lo_release(struct gendisk *disk, fmode_t mode)
1414 struct loop_device *lo = disk->private_data;
1415 int err;
1417 mutex_lock(&lo->lo_ctl_mutex);
1419 if (--lo->lo_refcnt)
1420 goto out;
1422 if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
1424 * In autoclear mode, stop the loop thread
1425 * and remove configuration after last close.
1427 err = loop_clr_fd(lo, NULL);
1428 if (!err)
1429 goto out_unlocked;
1430 } else {
1432 * Otherwise keep thread (if running) and config,
1433 * but flush possible ongoing bios in thread.
1435 loop_flush(lo);
1438 out:
1439 mutex_unlock(&lo->lo_ctl_mutex);
1440 out_unlocked:
1441 return 0;
1444 static const struct block_device_operations lo_fops = {
1445 .owner = THIS_MODULE,
1446 .open = lo_open,
1447 .release = lo_release,
1448 .ioctl = lo_ioctl,
1449 #ifdef CONFIG_COMPAT
1450 .compat_ioctl = lo_compat_ioctl,
1451 #endif
1455 * And now the modules code and kernel interface.
1457 static int max_loop;
1458 module_param(max_loop, int, S_IRUGO);
1459 MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
1460 module_param(max_part, int, S_IRUGO);
1461 MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
1462 MODULE_LICENSE("GPL");
1463 MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
1465 int loop_register_transfer(struct loop_func_table *funcs)
1467 unsigned int n = funcs->number;
1469 if (n >= MAX_LO_CRYPT || xfer_funcs[n])
1470 return -EINVAL;
1471 xfer_funcs[n] = funcs;
1472 return 0;
1475 static int unregister_transfer_cb(int id, void *ptr, void *data)
1477 struct loop_device *lo = ptr;
1478 struct loop_func_table *xfer = data;
1480 mutex_lock(&lo->lo_ctl_mutex);
1481 if (lo->lo_encryption == xfer)
1482 loop_release_xfer(lo);
1483 mutex_unlock(&lo->lo_ctl_mutex);
1484 return 0;
1487 int loop_unregister_transfer(int number)
1489 unsigned int n = number;
1490 struct loop_func_table *xfer;
1492 if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
1493 return -EINVAL;
1495 xfer_funcs[n] = NULL;
1496 idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
1497 return 0;
1500 EXPORT_SYMBOL(loop_register_transfer);
1501 EXPORT_SYMBOL(loop_unregister_transfer);
1503 static int loop_add(struct loop_device **l, int i)
1505 struct loop_device *lo;
1506 struct gendisk *disk;
1507 int err;
1509 lo = kzalloc(sizeof(*lo), GFP_KERNEL);
1510 if (!lo) {
1511 err = -ENOMEM;
1512 goto out;
1515 err = idr_pre_get(&loop_index_idr, GFP_KERNEL);
1516 if (err < 0)
1517 goto out_free_dev;
1519 if (i >= 0) {
1520 int m;
1522 /* create specific i in the index */
1523 err = idr_get_new_above(&loop_index_idr, lo, i, &m);
1524 if (err >= 0 && i != m) {
1525 idr_remove(&loop_index_idr, m);
1526 err = -EEXIST;
1528 } else if (i == -1) {
1529 int m;
1531 /* get next free nr */
1532 err = idr_get_new(&loop_index_idr, lo, &m);
1533 if (err >= 0)
1534 i = m;
1535 } else {
1536 err = -EINVAL;
1538 if (err < 0)
1539 goto out_free_dev;
1541 lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
1542 if (!lo->lo_queue)
1543 goto out_free_dev;
1545 disk = lo->lo_disk = alloc_disk(1 << part_shift);
1546 if (!disk)
1547 goto out_free_queue;
1549 mutex_init(&lo->lo_ctl_mutex);
1550 lo->lo_number = i;
1551 lo->lo_thread = NULL;
1552 init_waitqueue_head(&lo->lo_event);
1553 spin_lock_init(&lo->lo_lock);
1554 disk->major = LOOP_MAJOR;
1555 disk->first_minor = i << part_shift;
1556 disk->fops = &lo_fops;
1557 disk->private_data = lo;
1558 disk->queue = lo->lo_queue;
1559 sprintf(disk->disk_name, "loop%d", i);
1560 add_disk(disk);
1561 *l = lo;
1562 return lo->lo_number;
1564 out_free_queue:
1565 blk_cleanup_queue(lo->lo_queue);
1566 out_free_dev:
1567 kfree(lo);
1568 out:
1569 return err;
1572 static void loop_remove(struct loop_device *lo)
1574 del_gendisk(lo->lo_disk);
1575 blk_cleanup_queue(lo->lo_queue);
1576 put_disk(lo->lo_disk);
1577 kfree(lo);
1580 static int find_free_cb(int id, void *ptr, void *data)
1582 struct loop_device *lo = ptr;
1583 struct loop_device **l = data;
1585 if (lo->lo_state == Lo_unbound) {
1586 *l = lo;
1587 return 1;
1589 return 0;
1592 static int loop_lookup(struct loop_device **l, int i)
1594 struct loop_device *lo;
1595 int ret = -ENODEV;
1597 if (i < 0) {
1598 int err;
1600 err = idr_for_each(&loop_index_idr, &find_free_cb, &lo);
1601 if (err == 1) {
1602 *l = lo;
1603 ret = lo->lo_number;
1605 goto out;
1608 /* lookup and return a specific i */
1609 lo = idr_find(&loop_index_idr, i);
1610 if (lo) {
1611 *l = lo;
1612 ret = lo->lo_number;
1614 out:
1615 return ret;
1618 static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1620 struct loop_device *lo;
1621 struct kobject *kobj;
1622 int err;
1624 mutex_lock(&loop_index_mutex);
1625 err = loop_lookup(&lo, MINOR(dev) >> part_shift);
1626 if (err < 0)
1627 err = loop_add(&lo, MINOR(dev) >> part_shift);
1628 if (err < 0)
1629 kobj = ERR_PTR(err);
1630 else
1631 kobj = get_disk(lo->lo_disk);
1632 mutex_unlock(&loop_index_mutex);
1634 *part = 0;
1635 return kobj;
1638 static long loop_control_ioctl(struct file *file, unsigned int cmd,
1639 unsigned long parm)
1641 struct loop_device *lo;
1642 int ret = -ENOSYS;
1644 mutex_lock(&loop_index_mutex);
1645 switch (cmd) {
1646 case LOOP_CTL_ADD:
1647 ret = loop_lookup(&lo, parm);
1648 if (ret >= 0) {
1649 ret = -EEXIST;
1650 break;
1652 ret = loop_add(&lo, parm);
1653 break;
1654 case LOOP_CTL_REMOVE:
1655 ret = loop_lookup(&lo, parm);
1656 if (ret < 0)
1657 break;
1658 mutex_lock(&lo->lo_ctl_mutex);
1659 if (lo->lo_state != Lo_unbound) {
1660 ret = -EBUSY;
1661 mutex_unlock(&lo->lo_ctl_mutex);
1662 break;
1664 if (lo->lo_refcnt > 0) {
1665 ret = -EBUSY;
1666 mutex_unlock(&lo->lo_ctl_mutex);
1667 break;
1669 lo->lo_disk->private_data = NULL;
1670 mutex_unlock(&lo->lo_ctl_mutex);
1671 idr_remove(&loop_index_idr, lo->lo_number);
1672 loop_remove(lo);
1673 break;
1674 case LOOP_CTL_GET_FREE:
1675 ret = loop_lookup(&lo, -1);
1676 if (ret >= 0)
1677 break;
1678 ret = loop_add(&lo, -1);
1680 mutex_unlock(&loop_index_mutex);
1682 return ret;
1685 static const struct file_operations loop_ctl_fops = {
1686 .open = nonseekable_open,
1687 .unlocked_ioctl = loop_control_ioctl,
1688 .compat_ioctl = loop_control_ioctl,
1689 .owner = THIS_MODULE,
1690 .llseek = noop_llseek,
1693 static struct miscdevice loop_misc = {
1694 .minor = LOOP_CTRL_MINOR,
1695 .name = "loop-control",
1696 .fops = &loop_ctl_fops,
1699 MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
1700 MODULE_ALIAS("devname:loop-control");
1702 static int __init loop_init(void)
1704 int i, nr;
1705 unsigned long range;
1706 struct loop_device *lo;
1707 int err;
1709 err = misc_register(&loop_misc);
1710 if (err < 0)
1711 return err;
1713 part_shift = 0;
1714 if (max_part > 0) {
1715 part_shift = fls(max_part);
1718 * Adjust max_part according to part_shift as it is exported
1719 * to user space so that user can decide correct minor number
1720 * if [s]he want to create more devices.
1722 * Note that -1 is required because partition 0 is reserved
1723 * for the whole disk.
1725 max_part = (1UL << part_shift) - 1;
1728 if ((1UL << part_shift) > DISK_MAX_PARTS)
1729 return -EINVAL;
1731 if (max_loop > 1UL << (MINORBITS - part_shift))
1732 return -EINVAL;
1735 * If max_loop is specified, create that many devices upfront.
1736 * This also becomes a hard limit. If max_loop is not specified,
1737 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
1738 * init time. Loop devices can be requested on-demand with the
1739 * /dev/loop-control interface, or be instantiated by accessing
1740 * a 'dead' device node.
1742 if (max_loop) {
1743 nr = max_loop;
1744 range = max_loop << part_shift;
1745 } else {
1746 nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
1747 range = 1UL << MINORBITS;
1750 if (register_blkdev(LOOP_MAJOR, "loop"))
1751 return -EIO;
1753 blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
1754 THIS_MODULE, loop_probe, NULL, NULL);
1756 /* pre-create number of devices given by config or max_loop */
1757 mutex_lock(&loop_index_mutex);
1758 for (i = 0; i < nr; i++)
1759 loop_add(&lo, i);
1760 mutex_unlock(&loop_index_mutex);
1762 printk(KERN_INFO "loop: module loaded\n");
1763 return 0;
1766 static int loop_exit_cb(int id, void *ptr, void *data)
1768 struct loop_device *lo = ptr;
1770 loop_remove(lo);
1771 return 0;
1774 static void __exit loop_exit(void)
1776 unsigned long range;
1778 range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
1780 idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
1781 idr_remove_all(&loop_index_idr);
1782 idr_destroy(&loop_index_idr);
1784 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
1785 unregister_blkdev(LOOP_MAJOR, "loop");
1787 misc_deregister(&loop_misc);
1790 module_init(loop_init);
1791 module_exit(loop_exit);
1793 #ifndef MODULE
1794 static int __init max_loop_setup(char *str)
1796 max_loop = simple_strtol(str, NULL, 0);
1797 return 1;
1800 __setup("max_loop=", max_loop_setup);
1801 #endif