Merge 'staging-next' to Linus's tree
[wandboard.git] / drivers / staging / zram / zram_drv.c
blob8c3c057aa8478fb2a436edfd571843513544ca71
1 /*
2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
12 * Project home: http://compcache.googlecode.com
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/genhd.h>
26 #include <linux/highmem.h>
27 #include <linux/slab.h>
28 #include <linux/lzo.h>
29 #include <linux/string.h>
30 #include <linux/vmalloc.h>
32 #include "zram_drv.h"
34 /* Globals */
35 static int zram_major;
36 struct zram *devices;
38 /* Module params (documentation at end) */
39 unsigned int num_devices;
41 static void zram_stat_inc(u32 *v)
43 *v = *v + 1;
46 static void zram_stat_dec(u32 *v)
48 *v = *v - 1;
51 static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
53 spin_lock(&zram->stat64_lock);
54 *v = *v + inc;
55 spin_unlock(&zram->stat64_lock);
58 static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
60 spin_lock(&zram->stat64_lock);
61 *v = *v - dec;
62 spin_unlock(&zram->stat64_lock);
65 static void zram_stat64_inc(struct zram *zram, u64 *v)
67 zram_stat64_add(zram, v, 1);
70 static int zram_test_flag(struct zram *zram, u32 index,
71 enum zram_pageflags flag)
73 return zram->table[index].flags & BIT(flag);
76 static void zram_set_flag(struct zram *zram, u32 index,
77 enum zram_pageflags flag)
79 zram->table[index].flags |= BIT(flag);
82 static void zram_clear_flag(struct zram *zram, u32 index,
83 enum zram_pageflags flag)
85 zram->table[index].flags &= ~BIT(flag);
88 static int page_zero_filled(void *ptr)
90 unsigned int pos;
91 unsigned long *page;
93 page = (unsigned long *)ptr;
95 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
96 if (page[pos])
97 return 0;
100 return 1;
103 static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
105 if (!zram->disksize) {
106 pr_info(
107 "disk size not provided. You can use disksize_kb module "
108 "param to specify size.\nUsing default: (%u%% of RAM).\n",
109 default_disksize_perc_ram
111 zram->disksize = default_disksize_perc_ram *
112 (totalram_bytes / 100);
115 if (zram->disksize > 2 * (totalram_bytes)) {
116 pr_info(
117 "There is little point creating a zram of greater than "
118 "twice the size of memory since we expect a 2:1 compression "
119 "ratio. Note that zram uses about 0.1%% of the size of "
120 "the disk when not in use so a huge zram is "
121 "wasteful.\n"
122 "\tMemory Size: %zu kB\n"
123 "\tSize you selected: %llu kB\n"
124 "Continuing anyway ...\n",
125 totalram_bytes >> 10, zram->disksize
129 zram->disksize &= PAGE_MASK;
132 static void zram_free_page(struct zram *zram, size_t index)
134 u32 clen;
135 void *obj;
137 struct page *page = zram->table[index].page;
138 u32 offset = zram->table[index].offset;
140 if (unlikely(!page)) {
142 * No memory is allocated for zero filled pages.
143 * Simply clear zero page flag.
145 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
146 zram_clear_flag(zram, index, ZRAM_ZERO);
147 zram_stat_dec(&zram->stats.pages_zero);
149 return;
152 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
153 clen = PAGE_SIZE;
154 __free_page(page);
155 zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
156 zram_stat_dec(&zram->stats.pages_expand);
157 goto out;
160 obj = kmap_atomic(page, KM_USER0) + offset;
161 clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
162 kunmap_atomic(obj, KM_USER0);
164 xv_free(zram->mem_pool, page, offset);
165 if (clen <= PAGE_SIZE / 2)
166 zram_stat_dec(&zram->stats.good_compress);
168 out:
169 zram_stat64_sub(zram, &zram->stats.compr_size, clen);
170 zram_stat_dec(&zram->stats.pages_stored);
172 zram->table[index].page = NULL;
173 zram->table[index].offset = 0;
176 static void handle_zero_page(struct page *page)
178 void *user_mem;
180 user_mem = kmap_atomic(page, KM_USER0);
181 memset(user_mem, 0, PAGE_SIZE);
182 kunmap_atomic(user_mem, KM_USER0);
184 flush_dcache_page(page);
187 static void handle_uncompressed_page(struct zram *zram,
188 struct page *page, u32 index)
190 unsigned char *user_mem, *cmem;
192 user_mem = kmap_atomic(page, KM_USER0);
193 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
194 zram->table[index].offset;
196 memcpy(user_mem, cmem, PAGE_SIZE);
197 kunmap_atomic(user_mem, KM_USER0);
198 kunmap_atomic(cmem, KM_USER1);
200 flush_dcache_page(page);
203 static int zram_read(struct zram *zram, struct bio *bio)
206 int i;
207 u32 index;
208 struct bio_vec *bvec;
210 if (unlikely(!zram->init_done)) {
211 set_bit(BIO_UPTODATE, &bio->bi_flags);
212 bio_endio(bio, 0);
213 return 0;
216 zram_stat64_inc(zram, &zram->stats.num_reads);
217 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
219 bio_for_each_segment(bvec, bio, i) {
220 int ret;
221 size_t clen;
222 struct page *page;
223 struct zobj_header *zheader;
224 unsigned char *user_mem, *cmem;
226 page = bvec->bv_page;
228 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
229 handle_zero_page(page);
230 continue;
233 /* Requested page is not present in compressed area */
234 if (unlikely(!zram->table[index].page)) {
235 pr_debug("Read before write: sector=%lu, size=%u",
236 (ulong)(bio->bi_sector), bio->bi_size);
237 /* Do nothing */
238 continue;
241 /* Page is stored uncompressed since it's incompressible */
242 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
243 handle_uncompressed_page(zram, page, index);
244 continue;
247 user_mem = kmap_atomic(page, KM_USER0);
248 clen = PAGE_SIZE;
250 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
251 zram->table[index].offset;
253 ret = lzo1x_decompress_safe(
254 cmem + sizeof(*zheader),
255 xv_get_object_size(cmem) - sizeof(*zheader),
256 user_mem, &clen);
258 kunmap_atomic(user_mem, KM_USER0);
259 kunmap_atomic(cmem, KM_USER1);
261 /* Should NEVER happen. Return bio error if it does. */
262 if (unlikely(ret != LZO_E_OK)) {
263 pr_err("Decompression failed! err=%d, page=%u\n",
264 ret, index);
265 zram_stat64_inc(zram, &zram->stats.failed_reads);
266 goto out;
269 flush_dcache_page(page);
270 index++;
273 set_bit(BIO_UPTODATE, &bio->bi_flags);
274 bio_endio(bio, 0);
275 return 0;
277 out:
278 bio_io_error(bio);
279 return 0;
282 static int zram_write(struct zram *zram, struct bio *bio)
284 int i, ret;
285 u32 index;
286 struct bio_vec *bvec;
288 if (unlikely(!zram->init_done)) {
289 ret = zram_init_device(zram);
290 if (ret)
291 goto out;
294 zram_stat64_inc(zram, &zram->stats.num_writes);
295 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
297 bio_for_each_segment(bvec, bio, i) {
298 u32 offset;
299 size_t clen;
300 struct zobj_header *zheader;
301 struct page *page, *page_store;
302 unsigned char *user_mem, *cmem, *src;
304 page = bvec->bv_page;
305 src = zram->compress_buffer;
308 * System overwrites unused sectors. Free memory associated
309 * with this sector now.
311 if (zram->table[index].page ||
312 zram_test_flag(zram, index, ZRAM_ZERO))
313 zram_free_page(zram, index);
315 mutex_lock(&zram->lock);
317 user_mem = kmap_atomic(page, KM_USER0);
318 if (page_zero_filled(user_mem)) {
319 kunmap_atomic(user_mem, KM_USER0);
320 mutex_unlock(&zram->lock);
321 zram_stat_inc(&zram->stats.pages_zero);
322 zram_set_flag(zram, index, ZRAM_ZERO);
323 continue;
326 ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
327 zram->compress_workmem);
329 kunmap_atomic(user_mem, KM_USER0);
331 if (unlikely(ret != LZO_E_OK)) {
332 mutex_unlock(&zram->lock);
333 pr_err("Compression failed! err=%d\n", ret);
334 zram_stat64_inc(zram, &zram->stats.failed_writes);
335 goto out;
339 * Page is incompressible. Store it as-is (uncompressed)
340 * since we do not want to return too many disk write
341 * errors which has side effect of hanging the system.
343 if (unlikely(clen > max_zpage_size)) {
344 clen = PAGE_SIZE;
345 page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
346 if (unlikely(!page_store)) {
347 mutex_unlock(&zram->lock);
348 pr_info("Error allocating memory for "
349 "incompressible page: %u\n", index);
350 zram_stat64_inc(zram,
351 &zram->stats.failed_writes);
352 goto out;
355 offset = 0;
356 zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
357 zram_stat_inc(&zram->stats.pages_expand);
358 zram->table[index].page = page_store;
359 src = kmap_atomic(page, KM_USER0);
360 goto memstore;
363 if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
364 &zram->table[index].page, &offset,
365 GFP_NOIO | __GFP_HIGHMEM)) {
366 mutex_unlock(&zram->lock);
367 pr_info("Error allocating memory for compressed "
368 "page: %u, size=%zu\n", index, clen);
369 zram_stat64_inc(zram, &zram->stats.failed_writes);
370 goto out;
373 memstore:
374 zram->table[index].offset = offset;
376 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
377 zram->table[index].offset;
379 #if 0
380 /* Back-reference needed for memory defragmentation */
381 if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
382 zheader = (struct zobj_header *)cmem;
383 zheader->table_idx = index;
384 cmem += sizeof(*zheader);
386 #endif
388 memcpy(cmem, src, clen);
390 kunmap_atomic(cmem, KM_USER1);
391 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
392 kunmap_atomic(src, KM_USER0);
394 /* Update stats */
395 zram_stat64_add(zram, &zram->stats.compr_size, clen);
396 zram_stat_inc(&zram->stats.pages_stored);
397 if (clen <= PAGE_SIZE / 2)
398 zram_stat_inc(&zram->stats.good_compress);
400 mutex_unlock(&zram->lock);
401 index++;
404 set_bit(BIO_UPTODATE, &bio->bi_flags);
405 bio_endio(bio, 0);
406 return 0;
408 out:
409 bio_io_error(bio);
410 return 0;
414 * Check if request is within bounds and page aligned.
416 static inline int valid_io_request(struct zram *zram, struct bio *bio)
418 if (unlikely(
419 (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
420 (bio->bi_sector & (SECTORS_PER_PAGE - 1)) ||
421 (bio->bi_size & (PAGE_SIZE - 1)))) {
423 return 0;
426 /* I/O request is valid */
427 return 1;
431 * Handler function for all zram I/O requests.
433 static int zram_make_request(struct request_queue *queue, struct bio *bio)
435 int ret = 0;
436 struct zram *zram = queue->queuedata;
438 if (unlikely(!zram->init_done)) {
439 set_bit(BIO_UPTODATE, &bio->bi_flags);
440 bio_endio(bio, 0);
441 return 0;
444 if (!valid_io_request(zram, bio)) {
445 zram_stat64_inc(zram, &zram->stats.invalid_io);
446 bio_io_error(bio);
447 return 0;
450 switch (bio_data_dir(bio)) {
451 case READ:
452 ret = zram_read(zram, bio);
453 break;
455 case WRITE:
456 ret = zram_write(zram, bio);
457 break;
460 return ret;
463 void zram_reset_device(struct zram *zram)
465 size_t index;
467 mutex_lock(&zram->init_lock);
468 zram->init_done = 0;
470 /* Free various per-device buffers */
471 kfree(zram->compress_workmem);
472 free_pages((unsigned long)zram->compress_buffer, 1);
474 zram->compress_workmem = NULL;
475 zram->compress_buffer = NULL;
477 /* Free all pages that are still in this zram device */
478 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
479 struct page *page;
480 u16 offset;
482 page = zram->table[index].page;
483 offset = zram->table[index].offset;
485 if (!page)
486 continue;
488 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
489 __free_page(page);
490 else
491 xv_free(zram->mem_pool, page, offset);
494 vfree(zram->table);
495 zram->table = NULL;
497 xv_destroy_pool(zram->mem_pool);
498 zram->mem_pool = NULL;
500 /* Reset stats */
501 memset(&zram->stats, 0, sizeof(zram->stats));
503 zram->disksize = 0;
504 mutex_unlock(&zram->init_lock);
507 int zram_init_device(struct zram *zram)
509 int ret;
510 size_t num_pages;
512 mutex_lock(&zram->init_lock);
514 if (zram->init_done) {
515 mutex_unlock(&zram->init_lock);
516 return 0;
519 zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
521 zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
522 if (!zram->compress_workmem) {
523 pr_err("Error allocating compressor working memory!\n");
524 ret = -ENOMEM;
525 goto fail;
528 zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
529 if (!zram->compress_buffer) {
530 pr_err("Error allocating compressor buffer space\n");
531 ret = -ENOMEM;
532 goto fail;
535 num_pages = zram->disksize >> PAGE_SHIFT;
536 zram->table = vmalloc(num_pages * sizeof(*zram->table));
537 if (!zram->table) {
538 pr_err("Error allocating zram address table\n");
539 /* To prevent accessing table entries during cleanup */
540 zram->disksize = 0;
541 ret = -ENOMEM;
542 goto fail;
544 memset(zram->table, 0, num_pages * sizeof(*zram->table));
546 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
548 /* zram devices sort of resembles non-rotational disks */
549 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
551 zram->mem_pool = xv_create_pool();
552 if (!zram->mem_pool) {
553 pr_err("Error creating memory pool\n");
554 ret = -ENOMEM;
555 goto fail;
558 zram->init_done = 1;
559 mutex_unlock(&zram->init_lock);
561 pr_debug("Initialization done!\n");
562 return 0;
564 fail:
565 mutex_unlock(&zram->init_lock);
566 zram_reset_device(zram);
568 pr_err("Initialization failed: err=%d\n", ret);
569 return ret;
572 void zram_slot_free_notify(struct block_device *bdev, unsigned long index)
574 struct zram *zram;
576 zram = bdev->bd_disk->private_data;
577 zram_free_page(zram, index);
578 zram_stat64_inc(zram, &zram->stats.notify_free);
581 static const struct block_device_operations zram_devops = {
582 .swap_slot_free_notify = zram_slot_free_notify,
583 .owner = THIS_MODULE
586 static int create_device(struct zram *zram, int device_id)
588 int ret = 0;
590 mutex_init(&zram->lock);
591 mutex_init(&zram->init_lock);
592 spin_lock_init(&zram->stat64_lock);
594 zram->queue = blk_alloc_queue(GFP_KERNEL);
595 if (!zram->queue) {
596 pr_err("Error allocating disk queue for device %d\n",
597 device_id);
598 ret = -ENOMEM;
599 goto out;
602 blk_queue_make_request(zram->queue, zram_make_request);
603 zram->queue->queuedata = zram;
605 /* gendisk structure */
606 zram->disk = alloc_disk(1);
607 if (!zram->disk) {
608 blk_cleanup_queue(zram->queue);
609 pr_warning("Error allocating disk structure for device %d\n",
610 device_id);
611 ret = -ENOMEM;
612 goto out;
615 zram->disk->major = zram_major;
616 zram->disk->first_minor = device_id;
617 zram->disk->fops = &zram_devops;
618 zram->disk->queue = zram->queue;
619 zram->disk->private_data = zram;
620 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
622 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
623 set_capacity(zram->disk, 0);
626 * To ensure that we always get PAGE_SIZE aligned
627 * and n*PAGE_SIZED sized I/O requests.
629 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
630 blk_queue_logical_block_size(zram->disk->queue, PAGE_SIZE);
631 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
632 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
634 add_disk(zram->disk);
636 #ifdef CONFIG_SYSFS
637 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
638 &zram_disk_attr_group);
639 if (ret < 0) {
640 pr_warning("Error creating sysfs group");
641 goto out;
643 #endif
645 zram->init_done = 0;
647 out:
648 return ret;
651 static void destroy_device(struct zram *zram)
653 #ifdef CONFIG_SYSFS
654 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
655 &zram_disk_attr_group);
656 #endif
658 if (zram->disk) {
659 del_gendisk(zram->disk);
660 put_disk(zram->disk);
663 if (zram->queue)
664 blk_cleanup_queue(zram->queue);
667 static int __init zram_init(void)
669 int ret, dev_id;
671 if (num_devices > max_num_devices) {
672 pr_warning("Invalid value for num_devices: %u\n",
673 num_devices);
674 ret = -EINVAL;
675 goto out;
678 zram_major = register_blkdev(0, "zram");
679 if (zram_major <= 0) {
680 pr_warning("Unable to get major number\n");
681 ret = -EBUSY;
682 goto out;
685 if (!num_devices) {
686 pr_info("num_devices not specified. Using default: 1\n");
687 num_devices = 1;
690 /* Allocate the device array and initialize each one */
691 pr_info("Creating %u devices ...\n", num_devices);
692 devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
693 if (!devices) {
694 ret = -ENOMEM;
695 goto unregister;
698 for (dev_id = 0; dev_id < num_devices; dev_id++) {
699 ret = create_device(&devices[dev_id], dev_id);
700 if (ret)
701 goto free_devices;
704 return 0;
706 free_devices:
707 while (dev_id)
708 destroy_device(&devices[--dev_id]);
709 kfree(devices);
710 unregister:
711 unregister_blkdev(zram_major, "zram");
712 out:
713 return ret;
716 static void __exit zram_exit(void)
718 int i;
719 struct zram *zram;
721 for (i = 0; i < num_devices; i++) {
722 zram = &devices[i];
724 destroy_device(zram);
725 if (zram->init_done)
726 zram_reset_device(zram);
729 unregister_blkdev(zram_major, "zram");
731 kfree(devices);
732 pr_debug("Cleanup done!\n");
735 module_param(num_devices, uint, 0);
736 MODULE_PARM_DESC(num_devices, "Number of zram devices");
738 module_init(zram_init);
739 module_exit(zram_exit);
741 MODULE_LICENSE("Dual BSD/GPL");
742 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
743 MODULE_DESCRIPTION("Compressed RAM Block Device");