2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
12 * Project home: http://compcache.googlecode.com
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/genhd.h>
26 #include <linux/highmem.h>
27 #include <linux/slab.h>
28 #include <linux/lzo.h>
29 #include <linux/string.h>
30 #include <linux/vmalloc.h>
35 static int zram_major
;
38 /* Module params (documentation at end) */
39 unsigned int num_devices
;
41 static void zram_stat_inc(u32
*v
)
46 static void zram_stat_dec(u32
*v
)
51 static void zram_stat64_add(struct zram
*zram
, u64
*v
, u64 inc
)
53 spin_lock(&zram
->stat64_lock
);
55 spin_unlock(&zram
->stat64_lock
);
58 static void zram_stat64_sub(struct zram
*zram
, u64
*v
, u64 dec
)
60 spin_lock(&zram
->stat64_lock
);
62 spin_unlock(&zram
->stat64_lock
);
65 static void zram_stat64_inc(struct zram
*zram
, u64
*v
)
67 zram_stat64_add(zram
, v
, 1);
70 static int zram_test_flag(struct zram
*zram
, u32 index
,
71 enum zram_pageflags flag
)
73 return zram
->table
[index
].flags
& BIT(flag
);
76 static void zram_set_flag(struct zram
*zram
, u32 index
,
77 enum zram_pageflags flag
)
79 zram
->table
[index
].flags
|= BIT(flag
);
82 static void zram_clear_flag(struct zram
*zram
, u32 index
,
83 enum zram_pageflags flag
)
85 zram
->table
[index
].flags
&= ~BIT(flag
);
88 static int page_zero_filled(void *ptr
)
93 page
= (unsigned long *)ptr
;
95 for (pos
= 0; pos
!= PAGE_SIZE
/ sizeof(*page
); pos
++) {
103 static void zram_set_disksize(struct zram
*zram
, size_t totalram_bytes
)
105 if (!zram
->disksize
) {
107 "disk size not provided. You can use disksize_kb module "
108 "param to specify size.\nUsing default: (%u%% of RAM).\n",
109 default_disksize_perc_ram
111 zram
->disksize
= default_disksize_perc_ram
*
112 (totalram_bytes
/ 100);
115 if (zram
->disksize
> 2 * (totalram_bytes
)) {
117 "There is little point creating a zram of greater than "
118 "twice the size of memory since we expect a 2:1 compression "
119 "ratio. Note that zram uses about 0.1%% of the size of "
120 "the disk when not in use so a huge zram is "
122 "\tMemory Size: %zu kB\n"
123 "\tSize you selected: %llu kB\n"
124 "Continuing anyway ...\n",
125 totalram_bytes
>> 10, zram
->disksize
129 zram
->disksize
&= PAGE_MASK
;
132 static void zram_free_page(struct zram
*zram
, size_t index
)
137 struct page
*page
= zram
->table
[index
].page
;
138 u32 offset
= zram
->table
[index
].offset
;
140 if (unlikely(!page
)) {
142 * No memory is allocated for zero filled pages.
143 * Simply clear zero page flag.
145 if (zram_test_flag(zram
, index
, ZRAM_ZERO
)) {
146 zram_clear_flag(zram
, index
, ZRAM_ZERO
);
147 zram_stat_dec(&zram
->stats
.pages_zero
);
152 if (unlikely(zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
))) {
155 zram_clear_flag(zram
, index
, ZRAM_UNCOMPRESSED
);
156 zram_stat_dec(&zram
->stats
.pages_expand
);
160 obj
= kmap_atomic(page
, KM_USER0
) + offset
;
161 clen
= xv_get_object_size(obj
) - sizeof(struct zobj_header
);
162 kunmap_atomic(obj
, KM_USER0
);
164 xv_free(zram
->mem_pool
, page
, offset
);
165 if (clen
<= PAGE_SIZE
/ 2)
166 zram_stat_dec(&zram
->stats
.good_compress
);
169 zram_stat64_sub(zram
, &zram
->stats
.compr_size
, clen
);
170 zram_stat_dec(&zram
->stats
.pages_stored
);
172 zram
->table
[index
].page
= NULL
;
173 zram
->table
[index
].offset
= 0;
176 static void handle_zero_page(struct page
*page
)
180 user_mem
= kmap_atomic(page
, KM_USER0
);
181 memset(user_mem
, 0, PAGE_SIZE
);
182 kunmap_atomic(user_mem
, KM_USER0
);
184 flush_dcache_page(page
);
187 static void handle_uncompressed_page(struct zram
*zram
,
188 struct page
*page
, u32 index
)
190 unsigned char *user_mem
, *cmem
;
192 user_mem
= kmap_atomic(page
, KM_USER0
);
193 cmem
= kmap_atomic(zram
->table
[index
].page
, KM_USER1
) +
194 zram
->table
[index
].offset
;
196 memcpy(user_mem
, cmem
, PAGE_SIZE
);
197 kunmap_atomic(user_mem
, KM_USER0
);
198 kunmap_atomic(cmem
, KM_USER1
);
200 flush_dcache_page(page
);
203 static int zram_read(struct zram
*zram
, struct bio
*bio
)
208 struct bio_vec
*bvec
;
210 if (unlikely(!zram
->init_done
)) {
211 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
216 zram_stat64_inc(zram
, &zram
->stats
.num_reads
);
217 index
= bio
->bi_sector
>> SECTORS_PER_PAGE_SHIFT
;
219 bio_for_each_segment(bvec
, bio
, i
) {
223 struct zobj_header
*zheader
;
224 unsigned char *user_mem
, *cmem
;
226 page
= bvec
->bv_page
;
228 if (zram_test_flag(zram
, index
, ZRAM_ZERO
)) {
229 handle_zero_page(page
);
234 /* Requested page is not present in compressed area */
235 if (unlikely(!zram
->table
[index
].page
)) {
236 pr_debug("Read before write: sector=%lu, size=%u",
237 (ulong
)(bio
->bi_sector
), bio
->bi_size
);
243 /* Page is stored uncompressed since it's incompressible */
244 if (unlikely(zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
))) {
245 handle_uncompressed_page(zram
, page
, index
);
250 user_mem
= kmap_atomic(page
, KM_USER0
);
253 cmem
= kmap_atomic(zram
->table
[index
].page
, KM_USER1
) +
254 zram
->table
[index
].offset
;
256 ret
= lzo1x_decompress_safe(
257 cmem
+ sizeof(*zheader
),
258 xv_get_object_size(cmem
) - sizeof(*zheader
),
261 kunmap_atomic(user_mem
, KM_USER0
);
262 kunmap_atomic(cmem
, KM_USER1
);
264 /* Should NEVER happen. Return bio error if it does. */
265 if (unlikely(ret
!= LZO_E_OK
)) {
266 pr_err("Decompression failed! err=%d, page=%u\n",
268 zram_stat64_inc(zram
, &zram
->stats
.failed_reads
);
272 flush_dcache_page(page
);
276 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
285 static int zram_write(struct zram
*zram
, struct bio
*bio
)
289 struct bio_vec
*bvec
;
291 if (unlikely(!zram
->init_done
)) {
292 ret
= zram_init_device(zram
);
297 zram_stat64_inc(zram
, &zram
->stats
.num_writes
);
298 index
= bio
->bi_sector
>> SECTORS_PER_PAGE_SHIFT
;
300 bio_for_each_segment(bvec
, bio
, i
) {
303 struct zobj_header
*zheader
;
304 struct page
*page
, *page_store
;
305 unsigned char *user_mem
, *cmem
, *src
;
307 page
= bvec
->bv_page
;
308 src
= zram
->compress_buffer
;
311 * System overwrites unused sectors. Free memory associated
312 * with this sector now.
314 if (zram
->table
[index
].page
||
315 zram_test_flag(zram
, index
, ZRAM_ZERO
))
316 zram_free_page(zram
, index
);
318 mutex_lock(&zram
->lock
);
320 user_mem
= kmap_atomic(page
, KM_USER0
);
321 if (page_zero_filled(user_mem
)) {
322 kunmap_atomic(user_mem
, KM_USER0
);
323 mutex_unlock(&zram
->lock
);
324 zram_stat_inc(&zram
->stats
.pages_zero
);
325 zram_set_flag(zram
, index
, ZRAM_ZERO
);
330 ret
= lzo1x_1_compress(user_mem
, PAGE_SIZE
, src
, &clen
,
331 zram
->compress_workmem
);
333 kunmap_atomic(user_mem
, KM_USER0
);
335 if (unlikely(ret
!= LZO_E_OK
)) {
336 mutex_unlock(&zram
->lock
);
337 pr_err("Compression failed! err=%d\n", ret
);
338 zram_stat64_inc(zram
, &zram
->stats
.failed_writes
);
343 * Page is incompressible. Store it as-is (uncompressed)
344 * since we do not want to return too many disk write
345 * errors which has side effect of hanging the system.
347 if (unlikely(clen
> max_zpage_size
)) {
349 page_store
= alloc_page(GFP_NOIO
| __GFP_HIGHMEM
);
350 if (unlikely(!page_store
)) {
351 mutex_unlock(&zram
->lock
);
352 pr_info("Error allocating memory for "
353 "incompressible page: %u\n", index
);
354 zram_stat64_inc(zram
,
355 &zram
->stats
.failed_writes
);
360 zram_set_flag(zram
, index
, ZRAM_UNCOMPRESSED
);
361 zram_stat_inc(&zram
->stats
.pages_expand
);
362 zram
->table
[index
].page
= page_store
;
363 src
= kmap_atomic(page
, KM_USER0
);
367 if (xv_malloc(zram
->mem_pool
, clen
+ sizeof(*zheader
),
368 &zram
->table
[index
].page
, &offset
,
369 GFP_NOIO
| __GFP_HIGHMEM
)) {
370 mutex_unlock(&zram
->lock
);
371 pr_info("Error allocating memory for compressed "
372 "page: %u, size=%zu\n", index
, clen
);
373 zram_stat64_inc(zram
, &zram
->stats
.failed_writes
);
378 zram
->table
[index
].offset
= offset
;
380 cmem
= kmap_atomic(zram
->table
[index
].page
, KM_USER1
) +
381 zram
->table
[index
].offset
;
384 /* Back-reference needed for memory defragmentation */
385 if (!zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
)) {
386 zheader
= (struct zobj_header
*)cmem
;
387 zheader
->table_idx
= index
;
388 cmem
+= sizeof(*zheader
);
392 memcpy(cmem
, src
, clen
);
394 kunmap_atomic(cmem
, KM_USER1
);
395 if (unlikely(zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
)))
396 kunmap_atomic(src
, KM_USER0
);
399 zram_stat64_add(zram
, &zram
->stats
.compr_size
, clen
);
400 zram_stat_inc(&zram
->stats
.pages_stored
);
401 if (clen
<= PAGE_SIZE
/ 2)
402 zram_stat_inc(&zram
->stats
.good_compress
);
404 mutex_unlock(&zram
->lock
);
408 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
418 * Check if request is within bounds and page aligned.
420 static inline int valid_io_request(struct zram
*zram
, struct bio
*bio
)
423 (bio
->bi_sector
>= (zram
->disksize
>> SECTOR_SHIFT
)) ||
424 (bio
->bi_sector
& (SECTORS_PER_PAGE
- 1)) ||
425 (bio
->bi_size
& (PAGE_SIZE
- 1)))) {
430 /* I/O request is valid */
435 * Handler function for all zram I/O requests.
437 static int zram_make_request(struct request_queue
*queue
, struct bio
*bio
)
440 struct zram
*zram
= queue
->queuedata
;
442 if (!valid_io_request(zram
, bio
)) {
443 zram_stat64_inc(zram
, &zram
->stats
.invalid_io
);
448 switch (bio_data_dir(bio
)) {
450 ret
= zram_read(zram
, bio
);
454 ret
= zram_write(zram
, bio
);
461 void zram_reset_device(struct zram
*zram
)
465 mutex_lock(&zram
->init_lock
);
468 /* Free various per-device buffers */
469 kfree(zram
->compress_workmem
);
470 free_pages((unsigned long)zram
->compress_buffer
, 1);
472 zram
->compress_workmem
= NULL
;
473 zram
->compress_buffer
= NULL
;
475 /* Free all pages that are still in this zram device */
476 for (index
= 0; index
< zram
->disksize
>> PAGE_SHIFT
; index
++) {
480 page
= zram
->table
[index
].page
;
481 offset
= zram
->table
[index
].offset
;
486 if (unlikely(zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
)))
489 xv_free(zram
->mem_pool
, page
, offset
);
495 xv_destroy_pool(zram
->mem_pool
);
496 zram
->mem_pool
= NULL
;
499 memset(&zram
->stats
, 0, sizeof(zram
->stats
));
502 mutex_unlock(&zram
->init_lock
);
505 int zram_init_device(struct zram
*zram
)
510 mutex_lock(&zram
->init_lock
);
512 if (zram
->init_done
) {
513 mutex_unlock(&zram
->init_lock
);
517 zram_set_disksize(zram
, totalram_pages
<< PAGE_SHIFT
);
519 zram
->compress_workmem
= kzalloc(LZO1X_MEM_COMPRESS
, GFP_KERNEL
);
520 if (!zram
->compress_workmem
) {
521 pr_err("Error allocating compressor working memory!\n");
526 zram
->compress_buffer
= (void *)__get_free_pages(__GFP_ZERO
, 1);
527 if (!zram
->compress_buffer
) {
528 pr_err("Error allocating compressor buffer space\n");
533 num_pages
= zram
->disksize
>> PAGE_SHIFT
;
534 zram
->table
= vzalloc(num_pages
* sizeof(*zram
->table
));
536 pr_err("Error allocating zram address table\n");
537 /* To prevent accessing table entries during cleanup */
543 set_capacity(zram
->disk
, zram
->disksize
>> SECTOR_SHIFT
);
545 /* zram devices sort of resembles non-rotational disks */
546 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, zram
->disk
->queue
);
548 zram
->mem_pool
= xv_create_pool();
549 if (!zram
->mem_pool
) {
550 pr_err("Error creating memory pool\n");
556 mutex_unlock(&zram
->init_lock
);
558 pr_debug("Initialization done!\n");
562 mutex_unlock(&zram
->init_lock
);
563 zram_reset_device(zram
);
565 pr_err("Initialization failed: err=%d\n", ret
);
569 void zram_slot_free_notify(struct block_device
*bdev
, unsigned long index
)
573 zram
= bdev
->bd_disk
->private_data
;
574 zram_free_page(zram
, index
);
575 zram_stat64_inc(zram
, &zram
->stats
.notify_free
);
578 static const struct block_device_operations zram_devops
= {
579 .swap_slot_free_notify
= zram_slot_free_notify
,
583 static int create_device(struct zram
*zram
, int device_id
)
587 mutex_init(&zram
->lock
);
588 mutex_init(&zram
->init_lock
);
589 spin_lock_init(&zram
->stat64_lock
);
591 zram
->queue
= blk_alloc_queue(GFP_KERNEL
);
593 pr_err("Error allocating disk queue for device %d\n",
599 blk_queue_make_request(zram
->queue
, zram_make_request
);
600 zram
->queue
->queuedata
= zram
;
602 /* gendisk structure */
603 zram
->disk
= alloc_disk(1);
605 blk_cleanup_queue(zram
->queue
);
606 pr_warning("Error allocating disk structure for device %d\n",
612 zram
->disk
->major
= zram_major
;
613 zram
->disk
->first_minor
= device_id
;
614 zram
->disk
->fops
= &zram_devops
;
615 zram
->disk
->queue
= zram
->queue
;
616 zram
->disk
->private_data
= zram
;
617 snprintf(zram
->disk
->disk_name
, 16, "zram%d", device_id
);
619 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
620 set_capacity(zram
->disk
, 0);
623 * To ensure that we always get PAGE_SIZE aligned
624 * and n*PAGE_SIZED sized I/O requests.
626 blk_queue_physical_block_size(zram
->disk
->queue
, PAGE_SIZE
);
627 blk_queue_logical_block_size(zram
->disk
->queue
, PAGE_SIZE
);
628 blk_queue_io_min(zram
->disk
->queue
, PAGE_SIZE
);
629 blk_queue_io_opt(zram
->disk
->queue
, PAGE_SIZE
);
631 add_disk(zram
->disk
);
634 ret
= sysfs_create_group(&disk_to_dev(zram
->disk
)->kobj
,
635 &zram_disk_attr_group
);
637 pr_warning("Error creating sysfs group");
648 static void destroy_device(struct zram
*zram
)
651 sysfs_remove_group(&disk_to_dev(zram
->disk
)->kobj
,
652 &zram_disk_attr_group
);
656 del_gendisk(zram
->disk
);
657 put_disk(zram
->disk
);
661 blk_cleanup_queue(zram
->queue
);
664 static int __init
zram_init(void)
668 if (num_devices
> max_num_devices
) {
669 pr_warning("Invalid value for num_devices: %u\n",
675 zram_major
= register_blkdev(0, "zram");
676 if (zram_major
<= 0) {
677 pr_warning("Unable to get major number\n");
683 pr_info("num_devices not specified. Using default: 1\n");
687 /* Allocate the device array and initialize each one */
688 pr_info("Creating %u devices ...\n", num_devices
);
689 devices
= kzalloc(num_devices
* sizeof(struct zram
), GFP_KERNEL
);
695 for (dev_id
= 0; dev_id
< num_devices
; dev_id
++) {
696 ret
= create_device(&devices
[dev_id
], dev_id
);
705 destroy_device(&devices
[--dev_id
]);
708 unregister_blkdev(zram_major
, "zram");
713 static void __exit
zram_exit(void)
718 for (i
= 0; i
< num_devices
; i
++) {
721 destroy_device(zram
);
723 zram_reset_device(zram
);
726 unregister_blkdev(zram_major
, "zram");
729 pr_debug("Cleanup done!\n");
732 module_param(num_devices
, uint
, 0);
733 MODULE_PARM_DESC(num_devices
, "Number of zram devices");
735 module_init(zram_init
);
736 module_exit(zram_exit
);
738 MODULE_LICENSE("Dual BSD/GPL");
739 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
740 MODULE_DESCRIPTION("Compressed RAM Block Device");