gma500: begin the config based split
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / zram / zram_drv.c
blobaab4ec482124e5354fd06f23c899edab5a873371
1 /*
2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
12 * Project home: http://compcache.googlecode.com
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #ifdef CONFIG_ZRAM_DEBUG
19 #define DEBUG
20 #endif
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/lzo.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
36 #include "zram_drv.h"
38 /* Globals */
39 static int zram_major;
40 struct zram *devices;
42 /* Module params (documentation at end) */
43 unsigned int num_devices;
45 static void zram_stat_inc(u32 *v)
47 *v = *v + 1;
50 static void zram_stat_dec(u32 *v)
52 *v = *v - 1;
55 static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
57 spin_lock(&zram->stat64_lock);
58 *v = *v + inc;
59 spin_unlock(&zram->stat64_lock);
62 static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
64 spin_lock(&zram->stat64_lock);
65 *v = *v - dec;
66 spin_unlock(&zram->stat64_lock);
69 static void zram_stat64_inc(struct zram *zram, u64 *v)
71 zram_stat64_add(zram, v, 1);
74 static int zram_test_flag(struct zram *zram, u32 index,
75 enum zram_pageflags flag)
77 return zram->table[index].flags & BIT(flag);
80 static void zram_set_flag(struct zram *zram, u32 index,
81 enum zram_pageflags flag)
83 zram->table[index].flags |= BIT(flag);
86 static void zram_clear_flag(struct zram *zram, u32 index,
87 enum zram_pageflags flag)
89 zram->table[index].flags &= ~BIT(flag);
92 static int page_zero_filled(void *ptr)
94 unsigned int pos;
95 unsigned long *page;
97 page = (unsigned long *)ptr;
99 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
100 if (page[pos])
101 return 0;
104 return 1;
107 static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
109 if (!zram->disksize) {
110 pr_info(
111 "disk size not provided. You can use disksize_kb module "
112 "param to specify size.\nUsing default: (%u%% of RAM).\n",
113 default_disksize_perc_ram
115 zram->disksize = default_disksize_perc_ram *
116 (totalram_bytes / 100);
119 if (zram->disksize > 2 * (totalram_bytes)) {
120 pr_info(
121 "There is little point creating a zram of greater than "
122 "twice the size of memory since we expect a 2:1 compression "
123 "ratio. Note that zram uses about 0.1%% of the size of "
124 "the disk when not in use so a huge zram is "
125 "wasteful.\n"
126 "\tMemory Size: %zu kB\n"
127 "\tSize you selected: %llu kB\n"
128 "Continuing anyway ...\n",
129 totalram_bytes >> 10, zram->disksize
133 zram->disksize &= PAGE_MASK;
136 static void zram_free_page(struct zram *zram, size_t index)
138 u32 clen;
139 void *obj;
141 struct page *page = zram->table[index].page;
142 u32 offset = zram->table[index].offset;
144 if (unlikely(!page)) {
146 * No memory is allocated for zero filled pages.
147 * Simply clear zero page flag.
149 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
150 zram_clear_flag(zram, index, ZRAM_ZERO);
151 zram_stat_dec(&zram->stats.pages_zero);
153 return;
156 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
157 clen = PAGE_SIZE;
158 __free_page(page);
159 zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
160 zram_stat_dec(&zram->stats.pages_expand);
161 goto out;
164 obj = kmap_atomic(page, KM_USER0) + offset;
165 clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
166 kunmap_atomic(obj, KM_USER0);
168 xv_free(zram->mem_pool, page, offset);
169 if (clen <= PAGE_SIZE / 2)
170 zram_stat_dec(&zram->stats.good_compress);
172 out:
173 zram_stat64_sub(zram, &zram->stats.compr_size, clen);
174 zram_stat_dec(&zram->stats.pages_stored);
176 zram->table[index].page = NULL;
177 zram->table[index].offset = 0;
180 static void handle_zero_page(struct page *page)
182 void *user_mem;
184 user_mem = kmap_atomic(page, KM_USER0);
185 memset(user_mem, 0, PAGE_SIZE);
186 kunmap_atomic(user_mem, KM_USER0);
188 flush_dcache_page(page);
191 static void handle_uncompressed_page(struct zram *zram,
192 struct page *page, u32 index)
194 unsigned char *user_mem, *cmem;
196 user_mem = kmap_atomic(page, KM_USER0);
197 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
198 zram->table[index].offset;
200 memcpy(user_mem, cmem, PAGE_SIZE);
201 kunmap_atomic(user_mem, KM_USER0);
202 kunmap_atomic(cmem, KM_USER1);
204 flush_dcache_page(page);
207 static void zram_read(struct zram *zram, struct bio *bio)
210 int i;
211 u32 index;
212 struct bio_vec *bvec;
214 zram_stat64_inc(zram, &zram->stats.num_reads);
215 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
217 bio_for_each_segment(bvec, bio, i) {
218 int ret;
219 size_t clen;
220 struct page *page;
221 struct zobj_header *zheader;
222 unsigned char *user_mem, *cmem;
224 page = bvec->bv_page;
226 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
227 handle_zero_page(page);
228 index++;
229 continue;
232 /* Requested page is not present in compressed area */
233 if (unlikely(!zram->table[index].page)) {
234 pr_debug("Read before write: sector=%lu, size=%u",
235 (ulong)(bio->bi_sector), bio->bi_size);
236 handle_zero_page(page);
237 index++;
238 continue;
241 /* Page is stored uncompressed since it's incompressible */
242 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
243 handle_uncompressed_page(zram, page, index);
244 index++;
245 continue;
248 user_mem = kmap_atomic(page, KM_USER0);
249 clen = PAGE_SIZE;
251 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
252 zram->table[index].offset;
254 ret = lzo1x_decompress_safe(
255 cmem + sizeof(*zheader),
256 xv_get_object_size(cmem) - sizeof(*zheader),
257 user_mem, &clen);
259 kunmap_atomic(user_mem, KM_USER0);
260 kunmap_atomic(cmem, KM_USER1);
262 /* Should NEVER happen. Return bio error if it does. */
263 if (unlikely(ret != LZO_E_OK)) {
264 pr_err("Decompression failed! err=%d, page=%u\n",
265 ret, index);
266 zram_stat64_inc(zram, &zram->stats.failed_reads);
267 goto out;
270 flush_dcache_page(page);
271 index++;
274 set_bit(BIO_UPTODATE, &bio->bi_flags);
275 bio_endio(bio, 0);
276 return;
278 out:
279 bio_io_error(bio);
282 static void zram_write(struct zram *zram, struct bio *bio)
284 int i;
285 u32 index;
286 struct bio_vec *bvec;
288 zram_stat64_inc(zram, &zram->stats.num_writes);
289 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
291 bio_for_each_segment(bvec, bio, i) {
292 int ret;
293 u32 offset;
294 size_t clen;
295 struct zobj_header *zheader;
296 struct page *page, *page_store;
297 unsigned char *user_mem, *cmem, *src;
299 page = bvec->bv_page;
300 src = zram->compress_buffer;
303 * System overwrites unused sectors. Free memory associated
304 * with this sector now.
306 if (zram->table[index].page ||
307 zram_test_flag(zram, index, ZRAM_ZERO))
308 zram_free_page(zram, index);
310 mutex_lock(&zram->lock);
312 user_mem = kmap_atomic(page, KM_USER0);
313 if (page_zero_filled(user_mem)) {
314 kunmap_atomic(user_mem, KM_USER0);
315 mutex_unlock(&zram->lock);
316 zram_stat_inc(&zram->stats.pages_zero);
317 zram_set_flag(zram, index, ZRAM_ZERO);
318 index++;
319 continue;
322 ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
323 zram->compress_workmem);
325 kunmap_atomic(user_mem, KM_USER0);
327 if (unlikely(ret != LZO_E_OK)) {
328 mutex_unlock(&zram->lock);
329 pr_err("Compression failed! err=%d\n", ret);
330 zram_stat64_inc(zram, &zram->stats.failed_writes);
331 goto out;
335 * Page is incompressible. Store it as-is (uncompressed)
336 * since we do not want to return too many disk write
337 * errors which has side effect of hanging the system.
339 if (unlikely(clen > max_zpage_size)) {
340 clen = PAGE_SIZE;
341 page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
342 if (unlikely(!page_store)) {
343 mutex_unlock(&zram->lock);
344 pr_info("Error allocating memory for "
345 "incompressible page: %u\n", index);
346 zram_stat64_inc(zram,
347 &zram->stats.failed_writes);
348 goto out;
351 offset = 0;
352 zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
353 zram_stat_inc(&zram->stats.pages_expand);
354 zram->table[index].page = page_store;
355 src = kmap_atomic(page, KM_USER0);
356 goto memstore;
359 if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
360 &zram->table[index].page, &offset,
361 GFP_NOIO | __GFP_HIGHMEM)) {
362 mutex_unlock(&zram->lock);
363 pr_info("Error allocating memory for compressed "
364 "page: %u, size=%zu\n", index, clen);
365 zram_stat64_inc(zram, &zram->stats.failed_writes);
366 goto out;
369 memstore:
370 zram->table[index].offset = offset;
372 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
373 zram->table[index].offset;
375 #if 0
376 /* Back-reference needed for memory defragmentation */
377 if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
378 zheader = (struct zobj_header *)cmem;
379 zheader->table_idx = index;
380 cmem += sizeof(*zheader);
382 #endif
384 memcpy(cmem, src, clen);
386 kunmap_atomic(cmem, KM_USER1);
387 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
388 kunmap_atomic(src, KM_USER0);
390 /* Update stats */
391 zram_stat64_add(zram, &zram->stats.compr_size, clen);
392 zram_stat_inc(&zram->stats.pages_stored);
393 if (clen <= PAGE_SIZE / 2)
394 zram_stat_inc(&zram->stats.good_compress);
396 mutex_unlock(&zram->lock);
397 index++;
400 set_bit(BIO_UPTODATE, &bio->bi_flags);
401 bio_endio(bio, 0);
402 return;
404 out:
405 bio_io_error(bio);
409 * Check if request is within bounds and page aligned.
411 static inline int valid_io_request(struct zram *zram, struct bio *bio)
413 if (unlikely(
414 (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
415 (bio->bi_sector & (SECTORS_PER_PAGE - 1)) ||
416 (bio->bi_size & (PAGE_SIZE - 1)))) {
418 return 0;
421 /* I/O request is valid */
422 return 1;
426 * Handler function for all zram I/O requests.
428 static int zram_make_request(struct request_queue *queue, struct bio *bio)
430 struct zram *zram = queue->queuedata;
432 if (!valid_io_request(zram, bio)) {
433 zram_stat64_inc(zram, &zram->stats.invalid_io);
434 bio_io_error(bio);
435 return 0;
438 if (unlikely(!zram->init_done) && zram_init_device(zram)) {
439 bio_io_error(bio);
440 return 0;
443 switch (bio_data_dir(bio)) {
444 case READ:
445 zram_read(zram, bio);
446 break;
448 case WRITE:
449 zram_write(zram, bio);
450 break;
453 return 0;
456 void zram_reset_device(struct zram *zram)
458 size_t index;
460 mutex_lock(&zram->init_lock);
461 zram->init_done = 0;
463 /* Free various per-device buffers */
464 kfree(zram->compress_workmem);
465 free_pages((unsigned long)zram->compress_buffer, 1);
467 zram->compress_workmem = NULL;
468 zram->compress_buffer = NULL;
470 /* Free all pages that are still in this zram device */
471 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
472 struct page *page;
473 u16 offset;
475 page = zram->table[index].page;
476 offset = zram->table[index].offset;
478 if (!page)
479 continue;
481 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
482 __free_page(page);
483 else
484 xv_free(zram->mem_pool, page, offset);
487 vfree(zram->table);
488 zram->table = NULL;
490 xv_destroy_pool(zram->mem_pool);
491 zram->mem_pool = NULL;
493 /* Reset stats */
494 memset(&zram->stats, 0, sizeof(zram->stats));
496 zram->disksize = 0;
497 mutex_unlock(&zram->init_lock);
500 int zram_init_device(struct zram *zram)
502 int ret;
503 size_t num_pages;
505 mutex_lock(&zram->init_lock);
507 if (zram->init_done) {
508 mutex_unlock(&zram->init_lock);
509 return 0;
512 zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
514 zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
515 if (!zram->compress_workmem) {
516 pr_err("Error allocating compressor working memory!\n");
517 ret = -ENOMEM;
518 goto fail;
521 zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
522 if (!zram->compress_buffer) {
523 pr_err("Error allocating compressor buffer space\n");
524 ret = -ENOMEM;
525 goto fail;
528 num_pages = zram->disksize >> PAGE_SHIFT;
529 zram->table = vzalloc(num_pages * sizeof(*zram->table));
530 if (!zram->table) {
531 pr_err("Error allocating zram address table\n");
532 /* To prevent accessing table entries during cleanup */
533 zram->disksize = 0;
534 ret = -ENOMEM;
535 goto fail;
538 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
540 /* zram devices sort of resembles non-rotational disks */
541 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
543 zram->mem_pool = xv_create_pool();
544 if (!zram->mem_pool) {
545 pr_err("Error creating memory pool\n");
546 ret = -ENOMEM;
547 goto fail;
550 zram->init_done = 1;
551 mutex_unlock(&zram->init_lock);
553 pr_debug("Initialization done!\n");
554 return 0;
556 fail:
557 mutex_unlock(&zram->init_lock);
558 zram_reset_device(zram);
560 pr_err("Initialization failed: err=%d\n", ret);
561 return ret;
564 void zram_slot_free_notify(struct block_device *bdev, unsigned long index)
566 struct zram *zram;
568 zram = bdev->bd_disk->private_data;
569 zram_free_page(zram, index);
570 zram_stat64_inc(zram, &zram->stats.notify_free);
573 static const struct block_device_operations zram_devops = {
574 .swap_slot_free_notify = zram_slot_free_notify,
575 .owner = THIS_MODULE
578 static int create_device(struct zram *zram, int device_id)
580 int ret = 0;
582 mutex_init(&zram->lock);
583 mutex_init(&zram->init_lock);
584 spin_lock_init(&zram->stat64_lock);
586 zram->queue = blk_alloc_queue(GFP_KERNEL);
587 if (!zram->queue) {
588 pr_err("Error allocating disk queue for device %d\n",
589 device_id);
590 ret = -ENOMEM;
591 goto out;
594 blk_queue_make_request(zram->queue, zram_make_request);
595 zram->queue->queuedata = zram;
597 /* gendisk structure */
598 zram->disk = alloc_disk(1);
599 if (!zram->disk) {
600 blk_cleanup_queue(zram->queue);
601 pr_warning("Error allocating disk structure for device %d\n",
602 device_id);
603 ret = -ENOMEM;
604 goto out;
607 zram->disk->major = zram_major;
608 zram->disk->first_minor = device_id;
609 zram->disk->fops = &zram_devops;
610 zram->disk->queue = zram->queue;
611 zram->disk->private_data = zram;
612 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
614 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
615 set_capacity(zram->disk, 0);
618 * To ensure that we always get PAGE_SIZE aligned
619 * and n*PAGE_SIZED sized I/O requests.
621 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
622 blk_queue_logical_block_size(zram->disk->queue,
623 ZRAM_LOGICAL_BLOCK_SIZE);
624 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
625 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
627 add_disk(zram->disk);
629 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
630 &zram_disk_attr_group);
631 if (ret < 0) {
632 pr_warning("Error creating sysfs group");
633 goto out;
636 zram->init_done = 0;
638 out:
639 return ret;
642 static void destroy_device(struct zram *zram)
644 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
645 &zram_disk_attr_group);
647 if (zram->disk) {
648 del_gendisk(zram->disk);
649 put_disk(zram->disk);
652 if (zram->queue)
653 blk_cleanup_queue(zram->queue);
656 static int __init zram_init(void)
658 int ret, dev_id;
660 if (num_devices > max_num_devices) {
661 pr_warning("Invalid value for num_devices: %u\n",
662 num_devices);
663 ret = -EINVAL;
664 goto out;
667 zram_major = register_blkdev(0, "zram");
668 if (zram_major <= 0) {
669 pr_warning("Unable to get major number\n");
670 ret = -EBUSY;
671 goto out;
674 if (!num_devices) {
675 pr_info("num_devices not specified. Using default: 1\n");
676 num_devices = 1;
679 /* Allocate the device array and initialize each one */
680 pr_info("Creating %u devices ...\n", num_devices);
681 devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
682 if (!devices) {
683 ret = -ENOMEM;
684 goto unregister;
687 for (dev_id = 0; dev_id < num_devices; dev_id++) {
688 ret = create_device(&devices[dev_id], dev_id);
689 if (ret)
690 goto free_devices;
693 return 0;
695 free_devices:
696 while (dev_id)
697 destroy_device(&devices[--dev_id]);
698 kfree(devices);
699 unregister:
700 unregister_blkdev(zram_major, "zram");
701 out:
702 return ret;
705 static void __exit zram_exit(void)
707 int i;
708 struct zram *zram;
710 for (i = 0; i < num_devices; i++) {
711 zram = &devices[i];
713 destroy_device(zram);
714 if (zram->init_done)
715 zram_reset_device(zram);
718 unregister_blkdev(zram_major, "zram");
720 kfree(devices);
721 pr_debug("Cleanup done!\n");
724 module_param(num_devices, uint, 0);
725 MODULE_PARM_DESC(num_devices, "Number of zram devices");
727 module_init(zram_init);
728 module_exit(zram_exit);
730 MODULE_LICENSE("Dual BSD/GPL");
731 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
732 MODULE_DESCRIPTION("Compressed RAM Block Device");