2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
12 * Project home: http://compcache.googlecode.com
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/genhd.h>
26 #include <linux/highmem.h>
27 #include <linux/slab.h>
28 #include <linux/lzo.h>
29 #include <linux/string.h>
30 #include <linux/vmalloc.h>
35 static int zram_major
;
38 /* Module params (documentation at end) */
39 unsigned int num_devices
;
41 static void zram_stat_inc(u32
*v
)
46 static void zram_stat_dec(u32
*v
)
51 static void zram_stat64_add(struct zram
*zram
, u64
*v
, u64 inc
)
53 spin_lock(&zram
->stat64_lock
);
55 spin_unlock(&zram
->stat64_lock
);
58 static void zram_stat64_sub(struct zram
*zram
, u64
*v
, u64 dec
)
60 spin_lock(&zram
->stat64_lock
);
62 spin_unlock(&zram
->stat64_lock
);
65 static void zram_stat64_inc(struct zram
*zram
, u64
*v
)
67 zram_stat64_add(zram
, v
, 1);
70 static int zram_test_flag(struct zram
*zram
, u32 index
,
71 enum zram_pageflags flag
)
73 return zram
->table
[index
].flags
& BIT(flag
);
76 static void zram_set_flag(struct zram
*zram
, u32 index
,
77 enum zram_pageflags flag
)
79 zram
->table
[index
].flags
|= BIT(flag
);
82 static void zram_clear_flag(struct zram
*zram
, u32 index
,
83 enum zram_pageflags flag
)
85 zram
->table
[index
].flags
&= ~BIT(flag
);
88 static int page_zero_filled(void *ptr
)
93 page
= (unsigned long *)ptr
;
95 for (pos
= 0; pos
!= PAGE_SIZE
/ sizeof(*page
); pos
++) {
103 static void zram_set_disksize(struct zram
*zram
, size_t totalram_bytes
)
105 if (!zram
->disksize
) {
107 "disk size not provided. You can use disksize_kb module "
108 "param to specify size.\nUsing default: (%u%% of RAM).\n",
109 default_disksize_perc_ram
111 zram
->disksize
= default_disksize_perc_ram
*
112 (totalram_bytes
/ 100);
115 if (zram
->disksize
> 2 * (totalram_bytes
)) {
117 "There is little point creating a zram of greater than "
118 "twice the size of memory since we expect a 2:1 compression "
119 "ratio. Note that zram uses about 0.1%% of the size of "
120 "the disk when not in use so a huge zram is "
122 "\tMemory Size: %zu kB\n"
123 "\tSize you selected: %llu kB\n"
124 "Continuing anyway ...\n",
125 totalram_bytes
>> 10, zram
->disksize
129 zram
->disksize
&= PAGE_MASK
;
132 static void zram_free_page(struct zram
*zram
, size_t index
)
137 struct page
*page
= zram
->table
[index
].page
;
138 u32 offset
= zram
->table
[index
].offset
;
140 if (unlikely(!page
)) {
142 * No memory is allocated for zero filled pages.
143 * Simply clear zero page flag.
145 if (zram_test_flag(zram
, index
, ZRAM_ZERO
)) {
146 zram_clear_flag(zram
, index
, ZRAM_ZERO
);
147 zram_stat_dec(&zram
->stats
.pages_zero
);
152 if (unlikely(zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
))) {
155 zram_clear_flag(zram
, index
, ZRAM_UNCOMPRESSED
);
156 zram_stat_dec(&zram
->stats
.pages_expand
);
160 obj
= kmap_atomic(page
, KM_USER0
) + offset
;
161 clen
= xv_get_object_size(obj
) - sizeof(struct zobj_header
);
162 kunmap_atomic(obj
, KM_USER0
);
164 xv_free(zram
->mem_pool
, page
, offset
);
165 if (clen
<= PAGE_SIZE
/ 2)
166 zram_stat_dec(&zram
->stats
.good_compress
);
169 zram_stat64_sub(zram
, &zram
->stats
.compr_size
, clen
);
170 zram_stat_dec(&zram
->stats
.pages_stored
);
172 zram
->table
[index
].page
= NULL
;
173 zram
->table
[index
].offset
= 0;
176 static void handle_zero_page(struct page
*page
)
180 user_mem
= kmap_atomic(page
, KM_USER0
);
181 memset(user_mem
, 0, PAGE_SIZE
);
182 kunmap_atomic(user_mem
, KM_USER0
);
184 flush_dcache_page(page
);
187 static void handle_uncompressed_page(struct zram
*zram
,
188 struct page
*page
, u32 index
)
190 unsigned char *user_mem
, *cmem
;
192 user_mem
= kmap_atomic(page
, KM_USER0
);
193 cmem
= kmap_atomic(zram
->table
[index
].page
, KM_USER1
) +
194 zram
->table
[index
].offset
;
196 memcpy(user_mem
, cmem
, PAGE_SIZE
);
197 kunmap_atomic(user_mem
, KM_USER0
);
198 kunmap_atomic(cmem
, KM_USER1
);
200 flush_dcache_page(page
);
203 static int zram_read(struct zram
*zram
, struct bio
*bio
)
208 struct bio_vec
*bvec
;
210 if (unlikely(!zram
->init_done
)) {
211 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
216 zram_stat64_inc(zram
, &zram
->stats
.num_reads
);
217 index
= bio
->bi_sector
>> SECTORS_PER_PAGE_SHIFT
;
219 bio_for_each_segment(bvec
, bio
, i
) {
223 struct zobj_header
*zheader
;
224 unsigned char *user_mem
, *cmem
;
226 page
= bvec
->bv_page
;
228 if (zram_test_flag(zram
, index
, ZRAM_ZERO
)) {
229 handle_zero_page(page
);
233 /* Requested page is not present in compressed area */
234 if (unlikely(!zram
->table
[index
].page
)) {
235 pr_debug("Read before write: sector=%lu, size=%u",
236 (ulong
)(bio
->bi_sector
), bio
->bi_size
);
241 /* Page is stored uncompressed since it's incompressible */
242 if (unlikely(zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
))) {
243 handle_uncompressed_page(zram
, page
, index
);
247 user_mem
= kmap_atomic(page
, KM_USER0
);
250 cmem
= kmap_atomic(zram
->table
[index
].page
, KM_USER1
) +
251 zram
->table
[index
].offset
;
253 ret
= lzo1x_decompress_safe(
254 cmem
+ sizeof(*zheader
),
255 xv_get_object_size(cmem
) - sizeof(*zheader
),
258 kunmap_atomic(user_mem
, KM_USER0
);
259 kunmap_atomic(cmem
, KM_USER1
);
261 /* Should NEVER happen. Return bio error if it does. */
262 if (unlikely(ret
!= LZO_E_OK
)) {
263 pr_err("Decompression failed! err=%d, page=%u\n",
265 zram_stat64_inc(zram
, &zram
->stats
.failed_reads
);
269 flush_dcache_page(page
);
273 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
282 static int zram_write(struct zram
*zram
, struct bio
*bio
)
286 struct bio_vec
*bvec
;
288 if (unlikely(!zram
->init_done
)) {
289 ret
= zram_init_device(zram
);
294 zram_stat64_inc(zram
, &zram
->stats
.num_writes
);
295 index
= bio
->bi_sector
>> SECTORS_PER_PAGE_SHIFT
;
297 bio_for_each_segment(bvec
, bio
, i
) {
300 struct zobj_header
*zheader
;
301 struct page
*page
, *page_store
;
302 unsigned char *user_mem
, *cmem
, *src
;
304 page
= bvec
->bv_page
;
305 src
= zram
->compress_buffer
;
308 * System overwrites unused sectors. Free memory associated
309 * with this sector now.
311 if (zram
->table
[index
].page
||
312 zram_test_flag(zram
, index
, ZRAM_ZERO
))
313 zram_free_page(zram
, index
);
315 mutex_lock(&zram
->lock
);
317 user_mem
= kmap_atomic(page
, KM_USER0
);
318 if (page_zero_filled(user_mem
)) {
319 kunmap_atomic(user_mem
, KM_USER0
);
320 mutex_unlock(&zram
->lock
);
321 zram_stat_inc(&zram
->stats
.pages_zero
);
322 zram_set_flag(zram
, index
, ZRAM_ZERO
);
326 ret
= lzo1x_1_compress(user_mem
, PAGE_SIZE
, src
, &clen
,
327 zram
->compress_workmem
);
329 kunmap_atomic(user_mem
, KM_USER0
);
331 if (unlikely(ret
!= LZO_E_OK
)) {
332 mutex_unlock(&zram
->lock
);
333 pr_err("Compression failed! err=%d\n", ret
);
334 zram_stat64_inc(zram
, &zram
->stats
.failed_writes
);
339 * Page is incompressible. Store it as-is (uncompressed)
340 * since we do not want to return too many disk write
341 * errors which has side effect of hanging the system.
343 if (unlikely(clen
> max_zpage_size
)) {
345 page_store
= alloc_page(GFP_NOIO
| __GFP_HIGHMEM
);
346 if (unlikely(!page_store
)) {
347 mutex_unlock(&zram
->lock
);
348 pr_info("Error allocating memory for "
349 "incompressible page: %u\n", index
);
350 zram_stat64_inc(zram
,
351 &zram
->stats
.failed_writes
);
356 zram_set_flag(zram
, index
, ZRAM_UNCOMPRESSED
);
357 zram_stat_inc(&zram
->stats
.pages_expand
);
358 zram
->table
[index
].page
= page_store
;
359 src
= kmap_atomic(page
, KM_USER0
);
363 if (xv_malloc(zram
->mem_pool
, clen
+ sizeof(*zheader
),
364 &zram
->table
[index
].page
, &offset
,
365 GFP_NOIO
| __GFP_HIGHMEM
)) {
366 mutex_unlock(&zram
->lock
);
367 pr_info("Error allocating memory for compressed "
368 "page: %u, size=%zu\n", index
, clen
);
369 zram_stat64_inc(zram
, &zram
->stats
.failed_writes
);
374 zram
->table
[index
].offset
= offset
;
376 cmem
= kmap_atomic(zram
->table
[index
].page
, KM_USER1
) +
377 zram
->table
[index
].offset
;
380 /* Back-reference needed for memory defragmentation */
381 if (!zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
)) {
382 zheader
= (struct zobj_header
*)cmem
;
383 zheader
->table_idx
= index
;
384 cmem
+= sizeof(*zheader
);
388 memcpy(cmem
, src
, clen
);
390 kunmap_atomic(cmem
, KM_USER1
);
391 if (unlikely(zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
)))
392 kunmap_atomic(src
, KM_USER0
);
395 zram_stat64_add(zram
, &zram
->stats
.compr_size
, clen
);
396 zram_stat_inc(&zram
->stats
.pages_stored
);
397 if (clen
<= PAGE_SIZE
/ 2)
398 zram_stat_inc(&zram
->stats
.good_compress
);
400 mutex_unlock(&zram
->lock
);
404 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
414 * Check if request is within bounds and page aligned.
416 static inline int valid_io_request(struct zram
*zram
, struct bio
*bio
)
419 (bio
->bi_sector
>= (zram
->disksize
>> SECTOR_SHIFT
)) ||
420 (bio
->bi_sector
& (SECTORS_PER_PAGE
- 1)) ||
421 (bio
->bi_size
& (PAGE_SIZE
- 1)))) {
426 /* I/O request is valid */
431 * Handler function for all zram I/O requests.
433 static int zram_make_request(struct request_queue
*queue
, struct bio
*bio
)
436 struct zram
*zram
= queue
->queuedata
;
438 if (unlikely(!zram
->init_done
)) {
439 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
444 if (!valid_io_request(zram
, bio
)) {
445 zram_stat64_inc(zram
, &zram
->stats
.invalid_io
);
450 switch (bio_data_dir(bio
)) {
452 ret
= zram_read(zram
, bio
);
456 ret
= zram_write(zram
, bio
);
463 void zram_reset_device(struct zram
*zram
)
467 mutex_lock(&zram
->init_lock
);
470 /* Free various per-device buffers */
471 kfree(zram
->compress_workmem
);
472 free_pages((unsigned long)zram
->compress_buffer
, 1);
474 zram
->compress_workmem
= NULL
;
475 zram
->compress_buffer
= NULL
;
477 /* Free all pages that are still in this zram device */
478 for (index
= 0; index
< zram
->disksize
>> PAGE_SHIFT
; index
++) {
482 page
= zram
->table
[index
].page
;
483 offset
= zram
->table
[index
].offset
;
488 if (unlikely(zram_test_flag(zram
, index
, ZRAM_UNCOMPRESSED
)))
491 xv_free(zram
->mem_pool
, page
, offset
);
497 xv_destroy_pool(zram
->mem_pool
);
498 zram
->mem_pool
= NULL
;
501 memset(&zram
->stats
, 0, sizeof(zram
->stats
));
504 mutex_unlock(&zram
->init_lock
);
507 int zram_init_device(struct zram
*zram
)
512 mutex_lock(&zram
->init_lock
);
514 if (zram
->init_done
) {
515 mutex_unlock(&zram
->init_lock
);
519 zram_set_disksize(zram
, totalram_pages
<< PAGE_SHIFT
);
521 zram
->compress_workmem
= kzalloc(LZO1X_MEM_COMPRESS
, GFP_KERNEL
);
522 if (!zram
->compress_workmem
) {
523 pr_err("Error allocating compressor working memory!\n");
528 zram
->compress_buffer
= (void *)__get_free_pages(__GFP_ZERO
, 1);
529 if (!zram
->compress_buffer
) {
530 pr_err("Error allocating compressor buffer space\n");
535 num_pages
= zram
->disksize
>> PAGE_SHIFT
;
536 zram
->table
= vmalloc(num_pages
* sizeof(*zram
->table
));
538 pr_err("Error allocating zram address table\n");
539 /* To prevent accessing table entries during cleanup */
544 memset(zram
->table
, 0, num_pages
* sizeof(*zram
->table
));
546 set_capacity(zram
->disk
, zram
->disksize
>> SECTOR_SHIFT
);
548 /* zram devices sort of resembles non-rotational disks */
549 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, zram
->disk
->queue
);
551 zram
->mem_pool
= xv_create_pool();
552 if (!zram
->mem_pool
) {
553 pr_err("Error creating memory pool\n");
559 mutex_unlock(&zram
->init_lock
);
561 pr_debug("Initialization done!\n");
565 mutex_unlock(&zram
->init_lock
);
566 zram_reset_device(zram
);
568 pr_err("Initialization failed: err=%d\n", ret
);
572 void zram_slot_free_notify(struct block_device
*bdev
, unsigned long index
)
576 zram
= bdev
->bd_disk
->private_data
;
577 zram_free_page(zram
, index
);
578 zram_stat64_inc(zram
, &zram
->stats
.notify_free
);
581 static const struct block_device_operations zram_devops
= {
582 .swap_slot_free_notify
= zram_slot_free_notify
,
586 static int create_device(struct zram
*zram
, int device_id
)
590 mutex_init(&zram
->lock
);
591 mutex_init(&zram
->init_lock
);
592 spin_lock_init(&zram
->stat64_lock
);
594 zram
->queue
= blk_alloc_queue(GFP_KERNEL
);
596 pr_err("Error allocating disk queue for device %d\n",
602 blk_queue_make_request(zram
->queue
, zram_make_request
);
603 zram
->queue
->queuedata
= zram
;
605 /* gendisk structure */
606 zram
->disk
= alloc_disk(1);
608 blk_cleanup_queue(zram
->queue
);
609 pr_warning("Error allocating disk structure for device %d\n",
615 zram
->disk
->major
= zram_major
;
616 zram
->disk
->first_minor
= device_id
;
617 zram
->disk
->fops
= &zram_devops
;
618 zram
->disk
->queue
= zram
->queue
;
619 zram
->disk
->private_data
= zram
;
620 snprintf(zram
->disk
->disk_name
, 16, "zram%d", device_id
);
622 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
623 set_capacity(zram
->disk
, 0);
626 * To ensure that we always get PAGE_SIZE aligned
627 * and n*PAGE_SIZED sized I/O requests.
629 blk_queue_physical_block_size(zram
->disk
->queue
, PAGE_SIZE
);
630 blk_queue_logical_block_size(zram
->disk
->queue
, PAGE_SIZE
);
631 blk_queue_io_min(zram
->disk
->queue
, PAGE_SIZE
);
632 blk_queue_io_opt(zram
->disk
->queue
, PAGE_SIZE
);
634 add_disk(zram
->disk
);
637 ret
= sysfs_create_group(&disk_to_dev(zram
->disk
)->kobj
,
638 &zram_disk_attr_group
);
640 pr_warning("Error creating sysfs group");
651 static void destroy_device(struct zram
*zram
)
654 sysfs_remove_group(&disk_to_dev(zram
->disk
)->kobj
,
655 &zram_disk_attr_group
);
659 del_gendisk(zram
->disk
);
660 put_disk(zram
->disk
);
664 blk_cleanup_queue(zram
->queue
);
667 static int __init
zram_init(void)
671 if (num_devices
> max_num_devices
) {
672 pr_warning("Invalid value for num_devices: %u\n",
678 zram_major
= register_blkdev(0, "zram");
679 if (zram_major
<= 0) {
680 pr_warning("Unable to get major number\n");
686 pr_info("num_devices not specified. Using default: 1\n");
690 /* Allocate the device array and initialize each one */
691 pr_info("Creating %u devices ...\n", num_devices
);
692 devices
= kzalloc(num_devices
* sizeof(struct zram
), GFP_KERNEL
);
698 for (dev_id
= 0; dev_id
< num_devices
; dev_id
++) {
699 ret
= create_device(&devices
[dev_id
], dev_id
);
708 destroy_device(&devices
[--dev_id
]);
711 unregister_blkdev(zram_major
, "zram");
716 static void __exit
zram_exit(void)
721 for (i
= 0; i
< num_devices
; i
++) {
724 destroy_device(zram
);
726 zram_reset_device(zram
);
729 unregister_blkdev(zram_major
, "zram");
732 pr_debug("Cleanup done!\n");
735 module_param(num_devices
, uint
, 0);
736 MODULE_PARM_DESC(num_devices
, "Number of zram devices");
738 module_init(zram_init
);
739 module_exit(zram_exit
);
741 MODULE_LICENSE("Dual BSD/GPL");
742 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
743 MODULE_DESCRIPTION("Compressed RAM Block Device");