USB host: Move AMD PLL quirk to pci-quirks.c
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / zram / zram_drv.c
blob4bd8cbdaee76fe8de24b4d88bf0543314f285410
1 /*
2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
12 * Project home: http://compcache.googlecode.com
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/genhd.h>
26 #include <linux/highmem.h>
27 #include <linux/slab.h>
28 #include <linux/lzo.h>
29 #include <linux/string.h>
30 #include <linux/vmalloc.h>
32 #include "zram_drv.h"
34 /* Globals */
35 static int zram_major;
36 struct zram *devices;
38 /* Module params (documentation at end) */
39 unsigned int num_devices;
41 static void zram_stat_inc(u32 *v)
43 *v = *v + 1;
46 static void zram_stat_dec(u32 *v)
48 *v = *v - 1;
51 static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
53 spin_lock(&zram->stat64_lock);
54 *v = *v + inc;
55 spin_unlock(&zram->stat64_lock);
58 static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
60 spin_lock(&zram->stat64_lock);
61 *v = *v - dec;
62 spin_unlock(&zram->stat64_lock);
65 static void zram_stat64_inc(struct zram *zram, u64 *v)
67 zram_stat64_add(zram, v, 1);
70 static int zram_test_flag(struct zram *zram, u32 index,
71 enum zram_pageflags flag)
73 return zram->table[index].flags & BIT(flag);
76 static void zram_set_flag(struct zram *zram, u32 index,
77 enum zram_pageflags flag)
79 zram->table[index].flags |= BIT(flag);
82 static void zram_clear_flag(struct zram *zram, u32 index,
83 enum zram_pageflags flag)
85 zram->table[index].flags &= ~BIT(flag);
88 static int page_zero_filled(void *ptr)
90 unsigned int pos;
91 unsigned long *page;
93 page = (unsigned long *)ptr;
95 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
96 if (page[pos])
97 return 0;
100 return 1;
103 static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
105 if (!zram->disksize) {
106 pr_info(
107 "disk size not provided. You can use disksize_kb module "
108 "param to specify size.\nUsing default: (%u%% of RAM).\n",
109 default_disksize_perc_ram
111 zram->disksize = default_disksize_perc_ram *
112 (totalram_bytes / 100);
115 if (zram->disksize > 2 * (totalram_bytes)) {
116 pr_info(
117 "There is little point creating a zram of greater than "
118 "twice the size of memory since we expect a 2:1 compression "
119 "ratio. Note that zram uses about 0.1%% of the size of "
120 "the disk when not in use so a huge zram is "
121 "wasteful.\n"
122 "\tMemory Size: %zu kB\n"
123 "\tSize you selected: %llu kB\n"
124 "Continuing anyway ...\n",
125 totalram_bytes >> 10, zram->disksize
129 zram->disksize &= PAGE_MASK;
132 static void zram_free_page(struct zram *zram, size_t index)
134 u32 clen;
135 void *obj;
137 struct page *page = zram->table[index].page;
138 u32 offset = zram->table[index].offset;
140 if (unlikely(!page)) {
142 * No memory is allocated for zero filled pages.
143 * Simply clear zero page flag.
145 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
146 zram_clear_flag(zram, index, ZRAM_ZERO);
147 zram_stat_dec(&zram->stats.pages_zero);
149 return;
152 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
153 clen = PAGE_SIZE;
154 __free_page(page);
155 zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
156 zram_stat_dec(&zram->stats.pages_expand);
157 goto out;
160 obj = kmap_atomic(page, KM_USER0) + offset;
161 clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
162 kunmap_atomic(obj, KM_USER0);
164 xv_free(zram->mem_pool, page, offset);
165 if (clen <= PAGE_SIZE / 2)
166 zram_stat_dec(&zram->stats.good_compress);
168 out:
169 zram_stat64_sub(zram, &zram->stats.compr_size, clen);
170 zram_stat_dec(&zram->stats.pages_stored);
172 zram->table[index].page = NULL;
173 zram->table[index].offset = 0;
176 static void handle_zero_page(struct page *page)
178 void *user_mem;
180 user_mem = kmap_atomic(page, KM_USER0);
181 memset(user_mem, 0, PAGE_SIZE);
182 kunmap_atomic(user_mem, KM_USER0);
184 flush_dcache_page(page);
187 static void handle_uncompressed_page(struct zram *zram,
188 struct page *page, u32 index)
190 unsigned char *user_mem, *cmem;
192 user_mem = kmap_atomic(page, KM_USER0);
193 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
194 zram->table[index].offset;
196 memcpy(user_mem, cmem, PAGE_SIZE);
197 kunmap_atomic(user_mem, KM_USER0);
198 kunmap_atomic(cmem, KM_USER1);
200 flush_dcache_page(page);
203 static int zram_read(struct zram *zram, struct bio *bio)
206 int i;
207 u32 index;
208 struct bio_vec *bvec;
210 if (unlikely(!zram->init_done)) {
211 set_bit(BIO_UPTODATE, &bio->bi_flags);
212 bio_endio(bio, 0);
213 return 0;
216 zram_stat64_inc(zram, &zram->stats.num_reads);
217 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
219 bio_for_each_segment(bvec, bio, i) {
220 int ret;
221 size_t clen;
222 struct page *page;
223 struct zobj_header *zheader;
224 unsigned char *user_mem, *cmem;
226 page = bvec->bv_page;
228 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
229 handle_zero_page(page);
230 index++;
231 continue;
234 /* Requested page is not present in compressed area */
235 if (unlikely(!zram->table[index].page)) {
236 pr_debug("Read before write: sector=%lu, size=%u",
237 (ulong)(bio->bi_sector), bio->bi_size);
238 /* Do nothing */
239 index++;
240 continue;
243 /* Page is stored uncompressed since it's incompressible */
244 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
245 handle_uncompressed_page(zram, page, index);
246 index++;
247 continue;
250 user_mem = kmap_atomic(page, KM_USER0);
251 clen = PAGE_SIZE;
253 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
254 zram->table[index].offset;
256 ret = lzo1x_decompress_safe(
257 cmem + sizeof(*zheader),
258 xv_get_object_size(cmem) - sizeof(*zheader),
259 user_mem, &clen);
261 kunmap_atomic(user_mem, KM_USER0);
262 kunmap_atomic(cmem, KM_USER1);
264 /* Should NEVER happen. Return bio error if it does. */
265 if (unlikely(ret != LZO_E_OK)) {
266 pr_err("Decompression failed! err=%d, page=%u\n",
267 ret, index);
268 zram_stat64_inc(zram, &zram->stats.failed_reads);
269 goto out;
272 flush_dcache_page(page);
273 index++;
276 set_bit(BIO_UPTODATE, &bio->bi_flags);
277 bio_endio(bio, 0);
278 return 0;
280 out:
281 bio_io_error(bio);
282 return 0;
285 static int zram_write(struct zram *zram, struct bio *bio)
287 int i, ret;
288 u32 index;
289 struct bio_vec *bvec;
291 if (unlikely(!zram->init_done)) {
292 ret = zram_init_device(zram);
293 if (ret)
294 goto out;
297 zram_stat64_inc(zram, &zram->stats.num_writes);
298 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
300 bio_for_each_segment(bvec, bio, i) {
301 u32 offset;
302 size_t clen;
303 struct zobj_header *zheader;
304 struct page *page, *page_store;
305 unsigned char *user_mem, *cmem, *src;
307 page = bvec->bv_page;
308 src = zram->compress_buffer;
311 * System overwrites unused sectors. Free memory associated
312 * with this sector now.
314 if (zram->table[index].page ||
315 zram_test_flag(zram, index, ZRAM_ZERO))
316 zram_free_page(zram, index);
318 mutex_lock(&zram->lock);
320 user_mem = kmap_atomic(page, KM_USER0);
321 if (page_zero_filled(user_mem)) {
322 kunmap_atomic(user_mem, KM_USER0);
323 mutex_unlock(&zram->lock);
324 zram_stat_inc(&zram->stats.pages_zero);
325 zram_set_flag(zram, index, ZRAM_ZERO);
326 index++;
327 continue;
330 ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
331 zram->compress_workmem);
333 kunmap_atomic(user_mem, KM_USER0);
335 if (unlikely(ret != LZO_E_OK)) {
336 mutex_unlock(&zram->lock);
337 pr_err("Compression failed! err=%d\n", ret);
338 zram_stat64_inc(zram, &zram->stats.failed_writes);
339 goto out;
343 * Page is incompressible. Store it as-is (uncompressed)
344 * since we do not want to return too many disk write
345 * errors which has side effect of hanging the system.
347 if (unlikely(clen > max_zpage_size)) {
348 clen = PAGE_SIZE;
349 page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
350 if (unlikely(!page_store)) {
351 mutex_unlock(&zram->lock);
352 pr_info("Error allocating memory for "
353 "incompressible page: %u\n", index);
354 zram_stat64_inc(zram,
355 &zram->stats.failed_writes);
356 goto out;
359 offset = 0;
360 zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
361 zram_stat_inc(&zram->stats.pages_expand);
362 zram->table[index].page = page_store;
363 src = kmap_atomic(page, KM_USER0);
364 goto memstore;
367 if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
368 &zram->table[index].page, &offset,
369 GFP_NOIO | __GFP_HIGHMEM)) {
370 mutex_unlock(&zram->lock);
371 pr_info("Error allocating memory for compressed "
372 "page: %u, size=%zu\n", index, clen);
373 zram_stat64_inc(zram, &zram->stats.failed_writes);
374 goto out;
377 memstore:
378 zram->table[index].offset = offset;
380 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
381 zram->table[index].offset;
383 #if 0
384 /* Back-reference needed for memory defragmentation */
385 if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
386 zheader = (struct zobj_header *)cmem;
387 zheader->table_idx = index;
388 cmem += sizeof(*zheader);
390 #endif
392 memcpy(cmem, src, clen);
394 kunmap_atomic(cmem, KM_USER1);
395 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
396 kunmap_atomic(src, KM_USER0);
398 /* Update stats */
399 zram_stat64_add(zram, &zram->stats.compr_size, clen);
400 zram_stat_inc(&zram->stats.pages_stored);
401 if (clen <= PAGE_SIZE / 2)
402 zram_stat_inc(&zram->stats.good_compress);
404 mutex_unlock(&zram->lock);
405 index++;
408 set_bit(BIO_UPTODATE, &bio->bi_flags);
409 bio_endio(bio, 0);
410 return 0;
412 out:
413 bio_io_error(bio);
414 return 0;
418 * Check if request is within bounds and page aligned.
420 static inline int valid_io_request(struct zram *zram, struct bio *bio)
422 if (unlikely(
423 (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
424 (bio->bi_sector & (SECTORS_PER_PAGE - 1)) ||
425 (bio->bi_size & (PAGE_SIZE - 1)))) {
427 return 0;
430 /* I/O request is valid */
431 return 1;
435 * Handler function for all zram I/O requests.
437 static int zram_make_request(struct request_queue *queue, struct bio *bio)
439 int ret = 0;
440 struct zram *zram = queue->queuedata;
442 if (!valid_io_request(zram, bio)) {
443 zram_stat64_inc(zram, &zram->stats.invalid_io);
444 bio_io_error(bio);
445 return 0;
448 switch (bio_data_dir(bio)) {
449 case READ:
450 ret = zram_read(zram, bio);
451 break;
453 case WRITE:
454 ret = zram_write(zram, bio);
455 break;
458 return ret;
461 void zram_reset_device(struct zram *zram)
463 size_t index;
465 mutex_lock(&zram->init_lock);
466 zram->init_done = 0;
468 /* Free various per-device buffers */
469 kfree(zram->compress_workmem);
470 free_pages((unsigned long)zram->compress_buffer, 1);
472 zram->compress_workmem = NULL;
473 zram->compress_buffer = NULL;
475 /* Free all pages that are still in this zram device */
476 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
477 struct page *page;
478 u16 offset;
480 page = zram->table[index].page;
481 offset = zram->table[index].offset;
483 if (!page)
484 continue;
486 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
487 __free_page(page);
488 else
489 xv_free(zram->mem_pool, page, offset);
492 vfree(zram->table);
493 zram->table = NULL;
495 xv_destroy_pool(zram->mem_pool);
496 zram->mem_pool = NULL;
498 /* Reset stats */
499 memset(&zram->stats, 0, sizeof(zram->stats));
501 zram->disksize = 0;
502 mutex_unlock(&zram->init_lock);
505 int zram_init_device(struct zram *zram)
507 int ret;
508 size_t num_pages;
510 mutex_lock(&zram->init_lock);
512 if (zram->init_done) {
513 mutex_unlock(&zram->init_lock);
514 return 0;
517 zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
519 zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
520 if (!zram->compress_workmem) {
521 pr_err("Error allocating compressor working memory!\n");
522 ret = -ENOMEM;
523 goto fail;
526 zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
527 if (!zram->compress_buffer) {
528 pr_err("Error allocating compressor buffer space\n");
529 ret = -ENOMEM;
530 goto fail;
533 num_pages = zram->disksize >> PAGE_SHIFT;
534 zram->table = vzalloc(num_pages * sizeof(*zram->table));
535 if (!zram->table) {
536 pr_err("Error allocating zram address table\n");
537 /* To prevent accessing table entries during cleanup */
538 zram->disksize = 0;
539 ret = -ENOMEM;
540 goto fail;
543 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
545 /* zram devices sort of resembles non-rotational disks */
546 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
548 zram->mem_pool = xv_create_pool();
549 if (!zram->mem_pool) {
550 pr_err("Error creating memory pool\n");
551 ret = -ENOMEM;
552 goto fail;
555 zram->init_done = 1;
556 mutex_unlock(&zram->init_lock);
558 pr_debug("Initialization done!\n");
559 return 0;
561 fail:
562 mutex_unlock(&zram->init_lock);
563 zram_reset_device(zram);
565 pr_err("Initialization failed: err=%d\n", ret);
566 return ret;
569 void zram_slot_free_notify(struct block_device *bdev, unsigned long index)
571 struct zram *zram;
573 zram = bdev->bd_disk->private_data;
574 zram_free_page(zram, index);
575 zram_stat64_inc(zram, &zram->stats.notify_free);
578 static const struct block_device_operations zram_devops = {
579 .swap_slot_free_notify = zram_slot_free_notify,
580 .owner = THIS_MODULE
583 static int create_device(struct zram *zram, int device_id)
585 int ret = 0;
587 mutex_init(&zram->lock);
588 mutex_init(&zram->init_lock);
589 spin_lock_init(&zram->stat64_lock);
591 zram->queue = blk_alloc_queue(GFP_KERNEL);
592 if (!zram->queue) {
593 pr_err("Error allocating disk queue for device %d\n",
594 device_id);
595 ret = -ENOMEM;
596 goto out;
599 blk_queue_make_request(zram->queue, zram_make_request);
600 zram->queue->queuedata = zram;
602 /* gendisk structure */
603 zram->disk = alloc_disk(1);
604 if (!zram->disk) {
605 blk_cleanup_queue(zram->queue);
606 pr_warning("Error allocating disk structure for device %d\n",
607 device_id);
608 ret = -ENOMEM;
609 goto out;
612 zram->disk->major = zram_major;
613 zram->disk->first_minor = device_id;
614 zram->disk->fops = &zram_devops;
615 zram->disk->queue = zram->queue;
616 zram->disk->private_data = zram;
617 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
619 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
620 set_capacity(zram->disk, 0);
623 * To ensure that we always get PAGE_SIZE aligned
624 * and n*PAGE_SIZED sized I/O requests.
626 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
627 blk_queue_logical_block_size(zram->disk->queue, PAGE_SIZE);
628 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
629 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
631 add_disk(zram->disk);
633 #ifdef CONFIG_SYSFS
634 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
635 &zram_disk_attr_group);
636 if (ret < 0) {
637 pr_warning("Error creating sysfs group");
638 goto out;
640 #endif
642 zram->init_done = 0;
644 out:
645 return ret;
648 static void destroy_device(struct zram *zram)
650 #ifdef CONFIG_SYSFS
651 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
652 &zram_disk_attr_group);
653 #endif
655 if (zram->disk) {
656 del_gendisk(zram->disk);
657 put_disk(zram->disk);
660 if (zram->queue)
661 blk_cleanup_queue(zram->queue);
664 static int __init zram_init(void)
666 int ret, dev_id;
668 if (num_devices > max_num_devices) {
669 pr_warning("Invalid value for num_devices: %u\n",
670 num_devices);
671 ret = -EINVAL;
672 goto out;
675 zram_major = register_blkdev(0, "zram");
676 if (zram_major <= 0) {
677 pr_warning("Unable to get major number\n");
678 ret = -EBUSY;
679 goto out;
682 if (!num_devices) {
683 pr_info("num_devices not specified. Using default: 1\n");
684 num_devices = 1;
687 /* Allocate the device array and initialize each one */
688 pr_info("Creating %u devices ...\n", num_devices);
689 devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
690 if (!devices) {
691 ret = -ENOMEM;
692 goto unregister;
695 for (dev_id = 0; dev_id < num_devices; dev_id++) {
696 ret = create_device(&devices[dev_id], dev_id);
697 if (ret)
698 goto free_devices;
701 return 0;
703 free_devices:
704 while (dev_id)
705 destroy_device(&devices[--dev_id]);
706 kfree(devices);
707 unregister:
708 unregister_blkdev(zram_major, "zram");
709 out:
710 return ret;
713 static void __exit zram_exit(void)
715 int i;
716 struct zram *zram;
718 for (i = 0; i < num_devices; i++) {
719 zram = &devices[i];
721 destroy_device(zram);
722 if (zram->init_done)
723 zram_reset_device(zram);
726 unregister_blkdev(zram_major, "zram");
728 kfree(devices);
729 pr_debug("Cleanup done!\n");
732 module_param(num_devices, uint, 0);
733 MODULE_PARM_DESC(num_devices, "Number of zram devices");
735 module_init(zram_init);
736 module_exit(zram_exit);
738 MODULE_LICENSE("Dual BSD/GPL");
739 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
740 MODULE_DESCRIPTION("Compressed RAM Block Device");