2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4 * Copyright (C) 2006-2007 Red Hat, Inc. All rights reserved.
6 * This file is released under the GPL.
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/bio.h>
14 #include <linux/blkdev.h>
15 #include <linux/mempool.h>
16 #include <linux/slab.h>
17 #include <linux/crypto.h>
18 #include <linux/workqueue.h>
19 #include <linux/backing-dev.h>
20 #include <asm/atomic.h>
21 #include <linux/scatterlist.h>
23 #include <asm/unaligned.h>
27 #define DM_MSG_PREFIX "crypt"
28 #define MESG_STR(x) x, sizeof(x)
31 * context holding the current state of a multi-part conversion
33 struct convert_context
{
36 unsigned int offset_in
;
37 unsigned int offset_out
;
44 * per bio private data
47 struct dm_target
*target
;
49 struct work_struct work
;
51 struct convert_context ctx
;
60 struct crypt_iv_operations
{
61 int (*ctr
)(struct crypt_config
*cc
, struct dm_target
*ti
,
63 void (*dtr
)(struct crypt_config
*cc
);
64 const char *(*status
)(struct crypt_config
*cc
);
65 int (*generator
)(struct crypt_config
*cc
, u8
*iv
, sector_t sector
);
69 * Crypt: maps a linear range of a block device
70 * and encrypts / decrypts at the same time.
72 enum flags
{ DM_CRYPT_SUSPENDED
, DM_CRYPT_KEY_VALID
};
78 * pool for per bio private data and
79 * for encryption buffer pages
85 struct workqueue_struct
*io_queue
;
86 struct workqueue_struct
*crypt_queue
;
90 struct crypt_iv_operations
*iv_gen_ops
;
93 struct crypto_cipher
*essiv_tfm
;
99 char cipher
[CRYPTO_MAX_ALG_NAME
];
100 char chainmode
[CRYPTO_MAX_ALG_NAME
];
101 struct crypto_blkcipher
*tfm
;
103 unsigned int key_size
;
108 #define MIN_POOL_PAGES 32
109 #define MIN_BIO_PAGES 8
111 static struct kmem_cache
*_crypt_io_pool
;
113 static void clone_init(struct dm_crypt_io
*, struct bio
*);
114 static void kcryptd_queue_crypt(struct dm_crypt_io
*io
);
117 * Different IV generation algorithms:
119 * plain: the initial vector is the 32-bit little-endian version of the sector
120 * number, padded with zeros if necessary.
122 * essiv: "encrypted sector|salt initial vector", the sector number is
123 * encrypted with the bulk cipher using a salt as key. The salt
124 * should be derived from the bulk cipher's key via hashing.
126 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
127 * (needed for LRW-32-AES and possible other narrow block modes)
129 * null: the initial vector is always zero. Provides compatibility with
130 * obsolete loop_fish2 devices. Do not use for new devices.
132 * plumb: unimplemented, see:
133 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
136 static int crypt_iv_plain_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
138 memset(iv
, 0, cc
->iv_size
);
139 *(u32
*)iv
= cpu_to_le32(sector
& 0xffffffff);
144 static int crypt_iv_essiv_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
147 struct crypto_cipher
*essiv_tfm
;
148 struct crypto_hash
*hash_tfm
;
149 struct hash_desc desc
;
150 struct scatterlist sg
;
151 unsigned int saltsize
;
156 ti
->error
= "Digest algorithm missing for ESSIV mode";
160 /* Hash the cipher key with the given hash algorithm */
161 hash_tfm
= crypto_alloc_hash(opts
, 0, CRYPTO_ALG_ASYNC
);
162 if (IS_ERR(hash_tfm
)) {
163 ti
->error
= "Error initializing ESSIV hash";
164 return PTR_ERR(hash_tfm
);
167 saltsize
= crypto_hash_digestsize(hash_tfm
);
168 salt
= kmalloc(saltsize
, GFP_KERNEL
);
170 ti
->error
= "Error kmallocing salt storage in ESSIV";
171 crypto_free_hash(hash_tfm
);
175 sg_init_one(&sg
, cc
->key
, cc
->key_size
);
177 desc
.flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
178 err
= crypto_hash_digest(&desc
, &sg
, cc
->key_size
, salt
);
179 crypto_free_hash(hash_tfm
);
182 ti
->error
= "Error calculating hash in ESSIV";
187 /* Setup the essiv_tfm with the given salt */
188 essiv_tfm
= crypto_alloc_cipher(cc
->cipher
, 0, CRYPTO_ALG_ASYNC
);
189 if (IS_ERR(essiv_tfm
)) {
190 ti
->error
= "Error allocating crypto tfm for ESSIV";
192 return PTR_ERR(essiv_tfm
);
194 if (crypto_cipher_blocksize(essiv_tfm
) !=
195 crypto_blkcipher_ivsize(cc
->tfm
)) {
196 ti
->error
= "Block size of ESSIV cipher does "
197 "not match IV size of block cipher";
198 crypto_free_cipher(essiv_tfm
);
202 err
= crypto_cipher_setkey(essiv_tfm
, salt
, saltsize
);
204 ti
->error
= "Failed to set key for ESSIV cipher";
205 crypto_free_cipher(essiv_tfm
);
211 cc
->iv_gen_private
.essiv_tfm
= essiv_tfm
;
215 static void crypt_iv_essiv_dtr(struct crypt_config
*cc
)
217 crypto_free_cipher(cc
->iv_gen_private
.essiv_tfm
);
218 cc
->iv_gen_private
.essiv_tfm
= NULL
;
221 static int crypt_iv_essiv_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
223 memset(iv
, 0, cc
->iv_size
);
224 *(u64
*)iv
= cpu_to_le64(sector
);
225 crypto_cipher_encrypt_one(cc
->iv_gen_private
.essiv_tfm
, iv
, iv
);
229 static int crypt_iv_benbi_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
232 unsigned int bs
= crypto_blkcipher_blocksize(cc
->tfm
);
235 /* we need to calculate how far we must shift the sector count
236 * to get the cipher block count, we use this shift in _gen */
238 if (1 << log
!= bs
) {
239 ti
->error
= "cypher blocksize is not a power of 2";
244 ti
->error
= "cypher blocksize is > 512";
248 cc
->iv_gen_private
.benbi_shift
= 9 - log
;
253 static void crypt_iv_benbi_dtr(struct crypt_config
*cc
)
257 static int crypt_iv_benbi_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
261 memset(iv
, 0, cc
->iv_size
- sizeof(u64
)); /* rest is cleared below */
263 val
= cpu_to_be64(((u64
)sector
<< cc
->iv_gen_private
.benbi_shift
) + 1);
264 put_unaligned(val
, (__be64
*)(iv
+ cc
->iv_size
- sizeof(u64
)));
269 static int crypt_iv_null_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
271 memset(iv
, 0, cc
->iv_size
);
276 static struct crypt_iv_operations crypt_iv_plain_ops
= {
277 .generator
= crypt_iv_plain_gen
280 static struct crypt_iv_operations crypt_iv_essiv_ops
= {
281 .ctr
= crypt_iv_essiv_ctr
,
282 .dtr
= crypt_iv_essiv_dtr
,
283 .generator
= crypt_iv_essiv_gen
286 static struct crypt_iv_operations crypt_iv_benbi_ops
= {
287 .ctr
= crypt_iv_benbi_ctr
,
288 .dtr
= crypt_iv_benbi_dtr
,
289 .generator
= crypt_iv_benbi_gen
292 static struct crypt_iv_operations crypt_iv_null_ops
= {
293 .generator
= crypt_iv_null_gen
297 crypt_convert_scatterlist(struct crypt_config
*cc
, struct scatterlist
*out
,
298 struct scatterlist
*in
, unsigned int length
,
299 int write
, sector_t sector
)
301 u8 iv
[cc
->iv_size
] __attribute__ ((aligned(__alignof__(u64
))));
302 struct blkcipher_desc desc
= {
305 .flags
= CRYPTO_TFM_REQ_MAY_SLEEP
,
309 if (cc
->iv_gen_ops
) {
310 r
= cc
->iv_gen_ops
->generator(cc
, iv
, sector
);
315 r
= crypto_blkcipher_encrypt_iv(&desc
, out
, in
, length
);
317 r
= crypto_blkcipher_decrypt_iv(&desc
, out
, in
, length
);
320 r
= crypto_blkcipher_encrypt(&desc
, out
, in
, length
);
322 r
= crypto_blkcipher_decrypt(&desc
, out
, in
, length
);
328 static void crypt_convert_init(struct crypt_config
*cc
,
329 struct convert_context
*ctx
,
330 struct bio
*bio_out
, struct bio
*bio_in
,
333 ctx
->bio_in
= bio_in
;
334 ctx
->bio_out
= bio_out
;
337 ctx
->idx_in
= bio_in
? bio_in
->bi_idx
: 0;
338 ctx
->idx_out
= bio_out
? bio_out
->bi_idx
: 0;
339 ctx
->sector
= sector
+ cc
->iv_offset
;
343 * Encrypt / decrypt data from one bio to another one (can be the same one)
345 static int crypt_convert(struct crypt_config
*cc
,
346 struct convert_context
*ctx
)
350 while(ctx
->idx_in
< ctx
->bio_in
->bi_vcnt
&&
351 ctx
->idx_out
< ctx
->bio_out
->bi_vcnt
) {
352 struct bio_vec
*bv_in
= bio_iovec_idx(ctx
->bio_in
, ctx
->idx_in
);
353 struct bio_vec
*bv_out
= bio_iovec_idx(ctx
->bio_out
, ctx
->idx_out
);
354 struct scatterlist sg_in
, sg_out
;
356 sg_init_table(&sg_in
, 1);
357 sg_set_page(&sg_in
, bv_in
->bv_page
, 1 << SECTOR_SHIFT
, bv_in
->bv_offset
+ ctx
->offset_in
);
359 sg_init_table(&sg_out
, 1);
360 sg_set_page(&sg_out
, bv_out
->bv_page
, 1 << SECTOR_SHIFT
, bv_out
->bv_offset
+ ctx
->offset_out
);
362 ctx
->offset_in
+= sg_in
.length
;
363 if (ctx
->offset_in
>= bv_in
->bv_len
) {
368 ctx
->offset_out
+= sg_out
.length
;
369 if (ctx
->offset_out
>= bv_out
->bv_len
) {
374 r
= crypt_convert_scatterlist(cc
, &sg_out
, &sg_in
, sg_in
.length
,
375 bio_data_dir(ctx
->bio_in
) == WRITE
, ctx
->sector
);
385 static void dm_crypt_bio_destructor(struct bio
*bio
)
387 struct dm_crypt_io
*io
= bio
->bi_private
;
388 struct crypt_config
*cc
= io
->target
->private;
390 bio_free(bio
, cc
->bs
);
394 * Generate a new unfragmented bio with the given size
395 * This should never violate the device limitations
396 * May return a smaller bio when running out of pages
398 static struct bio
*crypt_alloc_buffer(struct dm_crypt_io
*io
, unsigned size
)
400 struct crypt_config
*cc
= io
->target
->private;
402 unsigned int nr_iovecs
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
403 gfp_t gfp_mask
= GFP_NOIO
| __GFP_HIGHMEM
;
407 clone
= bio_alloc_bioset(GFP_NOIO
, nr_iovecs
, cc
->bs
);
411 clone_init(io
, clone
);
413 for (i
= 0; i
< nr_iovecs
; i
++) {
414 page
= mempool_alloc(cc
->page_pool
, gfp_mask
);
419 * if additional pages cannot be allocated without waiting,
420 * return a partially allocated bio, the caller will then try
421 * to allocate additional bios while submitting this partial bio
423 if (i
== (MIN_BIO_PAGES
- 1))
424 gfp_mask
= (gfp_mask
| __GFP_NOWARN
) & ~__GFP_WAIT
;
426 len
= (size
> PAGE_SIZE
) ? PAGE_SIZE
: size
;
428 if (!bio_add_page(clone
, page
, len
, 0)) {
429 mempool_free(page
, cc
->page_pool
);
436 if (!clone
->bi_size
) {
444 static void crypt_free_buffer_pages(struct crypt_config
*cc
, struct bio
*clone
)
449 for (i
= 0; i
< clone
->bi_vcnt
; i
++) {
450 bv
= bio_iovec_idx(clone
, i
);
451 BUG_ON(!bv
->bv_page
);
452 mempool_free(bv
->bv_page
, cc
->page_pool
);
458 * One of the bios was finished. Check for completion of
459 * the whole request and correctly clean up the buffer.
461 static void crypt_dec_pending(struct dm_crypt_io
*io
)
463 struct crypt_config
*cc
= io
->target
->private;
465 if (!atomic_dec_and_test(&io
->pending
))
468 bio_endio(io
->base_bio
, io
->error
);
469 mempool_free(io
, cc
->io_pool
);
473 * kcryptd/kcryptd_io:
475 * Needed because it would be very unwise to do decryption in an
478 * kcryptd performs the actual encryption or decryption.
480 * kcryptd_io performs the IO submission.
482 * They must be separated as otherwise the final stages could be
483 * starved by new requests which can block in the first stages due
484 * to memory allocation.
486 static void crypt_endio(struct bio
*clone
, int error
)
488 struct dm_crypt_io
*io
= clone
->bi_private
;
489 struct crypt_config
*cc
= io
->target
->private;
490 unsigned rw
= bio_data_dir(clone
);
492 if (unlikely(!bio_flagged(clone
, BIO_UPTODATE
) && !error
))
496 * free the processed pages
499 crypt_free_buffer_pages(cc
, clone
);
503 if (rw
== READ
&& !error
) {
504 kcryptd_queue_crypt(io
);
511 crypt_dec_pending(io
);
514 static void clone_init(struct dm_crypt_io
*io
, struct bio
*clone
)
516 struct crypt_config
*cc
= io
->target
->private;
518 clone
->bi_private
= io
;
519 clone
->bi_end_io
= crypt_endio
;
520 clone
->bi_bdev
= cc
->dev
->bdev
;
521 clone
->bi_rw
= io
->base_bio
->bi_rw
;
522 clone
->bi_destructor
= dm_crypt_bio_destructor
;
525 static void kcryptd_io_read(struct dm_crypt_io
*io
)
527 struct crypt_config
*cc
= io
->target
->private;
528 struct bio
*base_bio
= io
->base_bio
;
531 atomic_inc(&io
->pending
);
534 * The block layer might modify the bvec array, so always
535 * copy the required bvecs because we need the original
536 * one in order to decrypt the whole bio data *afterwards*.
538 clone
= bio_alloc_bioset(GFP_NOIO
, bio_segments(base_bio
), cc
->bs
);
539 if (unlikely(!clone
)) {
541 crypt_dec_pending(io
);
545 clone_init(io
, clone
);
547 clone
->bi_vcnt
= bio_segments(base_bio
);
548 clone
->bi_size
= base_bio
->bi_size
;
549 clone
->bi_sector
= cc
->start
+ io
->sector
;
550 memcpy(clone
->bi_io_vec
, bio_iovec(base_bio
),
551 sizeof(struct bio_vec
) * clone
->bi_vcnt
);
553 generic_make_request(clone
);
556 static void kcryptd_io_write(struct dm_crypt_io
*io
)
560 static void kcryptd_io(struct work_struct
*work
)
562 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
564 if (bio_data_dir(io
->base_bio
) == READ
)
567 kcryptd_io_write(io
);
570 static void kcryptd_queue_io(struct dm_crypt_io
*io
)
572 struct crypt_config
*cc
= io
->target
->private;
574 INIT_WORK(&io
->work
, kcryptd_io
);
575 queue_work(cc
->io_queue
, &io
->work
);
578 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io
*io
, int error
)
582 static void kcryptd_crypt_write_convert(struct dm_crypt_io
*io
)
584 struct crypt_config
*cc
= io
->target
->private;
585 struct bio
*base_bio
= io
->base_bio
;
587 unsigned remaining
= base_bio
->bi_size
;
589 atomic_inc(&io
->pending
);
591 crypt_convert_init(cc
, &io
->ctx
, NULL
, base_bio
, io
->sector
);
594 * The allocated buffers can be smaller than the whole bio,
595 * so repeat the whole process until all the data can be handled.
598 clone
= crypt_alloc_buffer(io
, remaining
);
599 if (unlikely(!clone
)) {
601 crypt_dec_pending(io
);
605 io
->ctx
.bio_out
= clone
;
608 if (unlikely(crypt_convert(cc
, &io
->ctx
) < 0)) {
609 crypt_free_buffer_pages(cc
, clone
);
612 crypt_dec_pending(io
);
616 /* crypt_convert should have filled the clone bio */
617 BUG_ON(io
->ctx
.idx_out
< clone
->bi_vcnt
);
619 clone
->bi_sector
= cc
->start
+ io
->sector
;
620 remaining
-= clone
->bi_size
;
621 io
->sector
+= bio_sectors(clone
);
623 /* Grab another reference to the io struct
624 * before we kick off the request */
626 atomic_inc(&io
->pending
);
628 generic_make_request(clone
);
630 /* Do not reference clone after this - it
631 * may be gone already. */
633 /* out of memory -> run queues */
635 congestion_wait(WRITE
, HZ
/100);
639 static void kcryptd_crypt_read_done(struct dm_crypt_io
*io
, int error
)
641 if (unlikely(error
< 0))
644 crypt_dec_pending(io
);
647 static void kcryptd_crypt_read_convert(struct dm_crypt_io
*io
)
649 struct crypt_config
*cc
= io
->target
->private;
652 crypt_convert_init(cc
, &io
->ctx
, io
->base_bio
, io
->base_bio
,
655 r
= crypt_convert(cc
, &io
->ctx
);
657 kcryptd_crypt_read_done(io
, r
);
660 static void kcryptd_crypt(struct work_struct
*work
)
662 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
664 if (bio_data_dir(io
->base_bio
) == READ
)
665 kcryptd_crypt_read_convert(io
);
667 kcryptd_crypt_write_convert(io
);
670 static void kcryptd_queue_crypt(struct dm_crypt_io
*io
)
672 struct crypt_config
*cc
= io
->target
->private;
674 INIT_WORK(&io
->work
, kcryptd_crypt
);
675 queue_work(cc
->crypt_queue
, &io
->work
);
679 * Decode key from its hex representation
681 static int crypt_decode_key(u8
*key
, char *hex
, unsigned int size
)
689 for (i
= 0; i
< size
; i
++) {
693 key
[i
] = (u8
)simple_strtoul(buffer
, &endp
, 16);
695 if (endp
!= &buffer
[2])
706 * Encode key into its hex representation
708 static void crypt_encode_key(char *hex
, u8
*key
, unsigned int size
)
712 for (i
= 0; i
< size
; i
++) {
713 sprintf(hex
, "%02x", *key
);
719 static int crypt_set_key(struct crypt_config
*cc
, char *key
)
721 unsigned key_size
= strlen(key
) >> 1;
723 if (cc
->key_size
&& cc
->key_size
!= key_size
)
726 cc
->key_size
= key_size
; /* initial settings */
728 if ((!key_size
&& strcmp(key
, "-")) ||
729 (key_size
&& crypt_decode_key(cc
->key
, key
, key_size
) < 0))
732 set_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
737 static int crypt_wipe_key(struct crypt_config
*cc
)
739 clear_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
740 memset(&cc
->key
, 0, cc
->key_size
* sizeof(u8
));
745 * Construct an encryption mapping:
746 * <cipher> <key> <iv_offset> <dev_path> <start>
748 static int crypt_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
750 struct crypt_config
*cc
;
751 struct crypto_blkcipher
*tfm
;
757 unsigned int key_size
;
758 unsigned long long tmpll
;
761 ti
->error
= "Not enough arguments";
766 cipher
= strsep(&tmp
, "-");
767 chainmode
= strsep(&tmp
, "-");
768 ivopts
= strsep(&tmp
, "-");
769 ivmode
= strsep(&ivopts
, ":");
772 DMWARN("Unexpected additional cipher options");
774 key_size
= strlen(argv
[1]) >> 1;
776 cc
= kzalloc(sizeof(*cc
) + key_size
* sizeof(u8
), GFP_KERNEL
);
779 "Cannot allocate transparent encryption context";
783 if (crypt_set_key(cc
, argv
[1])) {
784 ti
->error
= "Error decoding key";
788 /* Compatiblity mode for old dm-crypt cipher strings */
789 if (!chainmode
|| (strcmp(chainmode
, "plain") == 0 && !ivmode
)) {
794 if (strcmp(chainmode
, "ecb") && !ivmode
) {
795 ti
->error
= "This chaining mode requires an IV mechanism";
799 if (snprintf(cc
->cipher
, CRYPTO_MAX_ALG_NAME
, "%s(%s)",
800 chainmode
, cipher
) >= CRYPTO_MAX_ALG_NAME
) {
801 ti
->error
= "Chain mode + cipher name is too long";
805 tfm
= crypto_alloc_blkcipher(cc
->cipher
, 0, CRYPTO_ALG_ASYNC
);
807 ti
->error
= "Error allocating crypto tfm";
811 strcpy(cc
->cipher
, cipher
);
812 strcpy(cc
->chainmode
, chainmode
);
816 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
817 * See comments at iv code
821 cc
->iv_gen_ops
= NULL
;
822 else if (strcmp(ivmode
, "plain") == 0)
823 cc
->iv_gen_ops
= &crypt_iv_plain_ops
;
824 else if (strcmp(ivmode
, "essiv") == 0)
825 cc
->iv_gen_ops
= &crypt_iv_essiv_ops
;
826 else if (strcmp(ivmode
, "benbi") == 0)
827 cc
->iv_gen_ops
= &crypt_iv_benbi_ops
;
828 else if (strcmp(ivmode
, "null") == 0)
829 cc
->iv_gen_ops
= &crypt_iv_null_ops
;
831 ti
->error
= "Invalid IV mode";
835 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->ctr
&&
836 cc
->iv_gen_ops
->ctr(cc
, ti
, ivopts
) < 0)
839 cc
->iv_size
= crypto_blkcipher_ivsize(tfm
);
841 /* at least a 64 bit sector number should fit in our buffer */
842 cc
->iv_size
= max(cc
->iv_size
,
843 (unsigned int)(sizeof(u64
) / sizeof(u8
)));
845 if (cc
->iv_gen_ops
) {
846 DMWARN("Selected cipher does not support IVs");
847 if (cc
->iv_gen_ops
->dtr
)
848 cc
->iv_gen_ops
->dtr(cc
);
849 cc
->iv_gen_ops
= NULL
;
853 cc
->io_pool
= mempool_create_slab_pool(MIN_IOS
, _crypt_io_pool
);
855 ti
->error
= "Cannot allocate crypt io mempool";
859 cc
->page_pool
= mempool_create_page_pool(MIN_POOL_PAGES
, 0);
860 if (!cc
->page_pool
) {
861 ti
->error
= "Cannot allocate page mempool";
865 cc
->bs
= bioset_create(MIN_IOS
, MIN_IOS
);
867 ti
->error
= "Cannot allocate crypt bioset";
871 if (crypto_blkcipher_setkey(tfm
, cc
->key
, key_size
) < 0) {
872 ti
->error
= "Error setting key";
876 if (sscanf(argv
[2], "%llu", &tmpll
) != 1) {
877 ti
->error
= "Invalid iv_offset sector";
880 cc
->iv_offset
= tmpll
;
882 if (sscanf(argv
[4], "%llu", &tmpll
) != 1) {
883 ti
->error
= "Invalid device sector";
888 if (dm_get_device(ti
, argv
[3], cc
->start
, ti
->len
,
889 dm_table_get_mode(ti
->table
), &cc
->dev
)) {
890 ti
->error
= "Device lookup failed";
894 if (ivmode
&& cc
->iv_gen_ops
) {
897 cc
->iv_mode
= kmalloc(strlen(ivmode
) + 1, GFP_KERNEL
);
899 ti
->error
= "Error kmallocing iv_mode string";
900 goto bad_ivmode_string
;
902 strcpy(cc
->iv_mode
, ivmode
);
906 cc
->io_queue
= create_singlethread_workqueue("kcryptd_io");
908 ti
->error
= "Couldn't create kcryptd io queue";
912 cc
->crypt_queue
= create_singlethread_workqueue("kcryptd");
913 if (!cc
->crypt_queue
) {
914 ti
->error
= "Couldn't create kcryptd queue";
915 goto bad_crypt_queue
;
922 destroy_workqueue(cc
->io_queue
);
926 dm_put_device(ti
, cc
->dev
);
930 mempool_destroy(cc
->page_pool
);
932 mempool_destroy(cc
->io_pool
);
934 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->dtr
)
935 cc
->iv_gen_ops
->dtr(cc
);
937 crypto_free_blkcipher(tfm
);
939 /* Must zero key material before freeing */
940 memset(cc
, 0, sizeof(*cc
) + cc
->key_size
* sizeof(u8
));
945 static void crypt_dtr(struct dm_target
*ti
)
947 struct crypt_config
*cc
= (struct crypt_config
*) ti
->private;
949 destroy_workqueue(cc
->io_queue
);
950 destroy_workqueue(cc
->crypt_queue
);
953 mempool_destroy(cc
->page_pool
);
954 mempool_destroy(cc
->io_pool
);
957 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->dtr
)
958 cc
->iv_gen_ops
->dtr(cc
);
959 crypto_free_blkcipher(cc
->tfm
);
960 dm_put_device(ti
, cc
->dev
);
962 /* Must zero key material before freeing */
963 memset(cc
, 0, sizeof(*cc
) + cc
->key_size
* sizeof(u8
));
967 static int crypt_map(struct dm_target
*ti
, struct bio
*bio
,
968 union map_info
*map_context
)
970 struct crypt_config
*cc
= ti
->private;
971 struct dm_crypt_io
*io
;
973 io
= mempool_alloc(cc
->io_pool
, GFP_NOIO
);
976 io
->sector
= bio
->bi_sector
- ti
->begin
;
978 atomic_set(&io
->pending
, 0);
980 if (bio_data_dir(io
->base_bio
) == READ
)
981 kcryptd_queue_io(io
);
983 kcryptd_queue_crypt(io
);
985 return DM_MAPIO_SUBMITTED
;
988 static int crypt_status(struct dm_target
*ti
, status_type_t type
,
989 char *result
, unsigned int maxlen
)
991 struct crypt_config
*cc
= (struct crypt_config
*) ti
->private;
995 case STATUSTYPE_INFO
:
999 case STATUSTYPE_TABLE
:
1001 DMEMIT("%s-%s-%s ", cc
->cipher
, cc
->chainmode
,
1004 DMEMIT("%s-%s ", cc
->cipher
, cc
->chainmode
);
1006 if (cc
->key_size
> 0) {
1007 if ((maxlen
- sz
) < ((cc
->key_size
<< 1) + 1))
1010 crypt_encode_key(result
+ sz
, cc
->key
, cc
->key_size
);
1011 sz
+= cc
->key_size
<< 1;
1018 DMEMIT(" %llu %s %llu", (unsigned long long)cc
->iv_offset
,
1019 cc
->dev
->name
, (unsigned long long)cc
->start
);
1025 static void crypt_postsuspend(struct dm_target
*ti
)
1027 struct crypt_config
*cc
= ti
->private;
1029 set_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
1032 static int crypt_preresume(struct dm_target
*ti
)
1034 struct crypt_config
*cc
= ti
->private;
1036 if (!test_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
)) {
1037 DMERR("aborting resume - crypt key is not set.");
1044 static void crypt_resume(struct dm_target
*ti
)
1046 struct crypt_config
*cc
= ti
->private;
1048 clear_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
1051 /* Message interface
1055 static int crypt_message(struct dm_target
*ti
, unsigned argc
, char **argv
)
1057 struct crypt_config
*cc
= ti
->private;
1062 if (!strnicmp(argv
[0], MESG_STR("key"))) {
1063 if (!test_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
)) {
1064 DMWARN("not suspended during key manipulation.");
1067 if (argc
== 3 && !strnicmp(argv
[1], MESG_STR("set")))
1068 return crypt_set_key(cc
, argv
[2]);
1069 if (argc
== 2 && !strnicmp(argv
[1], MESG_STR("wipe")))
1070 return crypt_wipe_key(cc
);
1074 DMWARN("unrecognised message received.");
1078 static struct target_type crypt_target
= {
1080 .version
= {1, 5, 0},
1081 .module
= THIS_MODULE
,
1085 .status
= crypt_status
,
1086 .postsuspend
= crypt_postsuspend
,
1087 .preresume
= crypt_preresume
,
1088 .resume
= crypt_resume
,
1089 .message
= crypt_message
,
1092 static int __init
dm_crypt_init(void)
1096 _crypt_io_pool
= KMEM_CACHE(dm_crypt_io
, 0);
1097 if (!_crypt_io_pool
)
1100 r
= dm_register_target(&crypt_target
);
1102 DMERR("register failed %d", r
);
1103 kmem_cache_destroy(_crypt_io_pool
);
1109 static void __exit
dm_crypt_exit(void)
1111 int r
= dm_unregister_target(&crypt_target
);
1114 DMERR("unregister failed %d", r
);
1116 kmem_cache_destroy(_crypt_io_pool
);
1119 module_init(dm_crypt_init
);
1120 module_exit(dm_crypt_exit
);
1122 MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
1123 MODULE_DESCRIPTION(DM_NAME
" target for transparent encryption / decryption");
1124 MODULE_LICENSE("GPL");