2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
6 * This file is released under the GPL.
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/bio.h>
14 #include <linux/blkdev.h>
15 #include <linux/mempool.h>
16 #include <linux/slab.h>
17 #include <linux/crypto.h>
18 #include <linux/workqueue.h>
19 #include <linux/backing-dev.h>
20 #include <asm/atomic.h>
21 #include <linux/scatterlist.h>
23 #include <asm/unaligned.h>
27 #define DM_MSG_PREFIX "crypt"
28 #define MESG_STR(x) x, sizeof(x)
31 * per bio private data
34 struct dm_target
*target
;
36 struct bio
*first_clone
;
37 struct work_struct work
;
44 * context holding the current state of a multi-part conversion
46 struct convert_context
{
49 unsigned int offset_in
;
50 unsigned int offset_out
;
59 struct crypt_iv_operations
{
60 int (*ctr
)(struct crypt_config
*cc
, struct dm_target
*ti
,
62 void (*dtr
)(struct crypt_config
*cc
);
63 const char *(*status
)(struct crypt_config
*cc
);
64 int (*generator
)(struct crypt_config
*cc
, u8
*iv
, sector_t sector
);
68 * Crypt: maps a linear range of a block device
69 * and encrypts / decrypts at the same time.
71 enum flags
{ DM_CRYPT_SUSPENDED
, DM_CRYPT_KEY_VALID
};
77 * pool for per bio private data and
78 * for encryption buffer pages
87 struct crypt_iv_operations
*iv_gen_ops
;
90 struct crypto_cipher
*essiv_tfm
;
96 char cipher
[CRYPTO_MAX_ALG_NAME
];
97 char chainmode
[CRYPTO_MAX_ALG_NAME
];
98 struct crypto_blkcipher
*tfm
;
100 unsigned int key_size
;
105 #define MIN_POOL_PAGES 32
106 #define MIN_BIO_PAGES 8
108 static kmem_cache_t
*_crypt_io_pool
;
111 * Different IV generation algorithms:
113 * plain: the initial vector is the 32-bit little-endian version of the sector
114 * number, padded with zeros if neccessary.
116 * essiv: "encrypted sector|salt initial vector", the sector number is
117 * encrypted with the bulk cipher using a salt as key. The salt
118 * should be derived from the bulk cipher's key via hashing.
120 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
121 * (needed for LRW-32-AES and possible other narrow block modes)
123 * plumb: unimplemented, see:
124 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
127 static int crypt_iv_plain_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
129 memset(iv
, 0, cc
->iv_size
);
130 *(u32
*)iv
= cpu_to_le32(sector
& 0xffffffff);
135 static int crypt_iv_essiv_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
138 struct crypto_cipher
*essiv_tfm
;
139 struct crypto_hash
*hash_tfm
;
140 struct hash_desc desc
;
141 struct scatterlist sg
;
142 unsigned int saltsize
;
147 ti
->error
= "Digest algorithm missing for ESSIV mode";
151 /* Hash the cipher key with the given hash algorithm */
152 hash_tfm
= crypto_alloc_hash(opts
, 0, CRYPTO_ALG_ASYNC
);
153 if (IS_ERR(hash_tfm
)) {
154 ti
->error
= "Error initializing ESSIV hash";
155 return PTR_ERR(hash_tfm
);
158 saltsize
= crypto_hash_digestsize(hash_tfm
);
159 salt
= kmalloc(saltsize
, GFP_KERNEL
);
161 ti
->error
= "Error kmallocing salt storage in ESSIV";
162 crypto_free_hash(hash_tfm
);
166 sg_set_buf(&sg
, cc
->key
, cc
->key_size
);
168 desc
.flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
169 err
= crypto_hash_digest(&desc
, &sg
, cc
->key_size
, salt
);
170 crypto_free_hash(hash_tfm
);
173 ti
->error
= "Error calculating hash in ESSIV";
177 /* Setup the essiv_tfm with the given salt */
178 essiv_tfm
= crypto_alloc_cipher(cc
->cipher
, 0, CRYPTO_ALG_ASYNC
);
179 if (IS_ERR(essiv_tfm
)) {
180 ti
->error
= "Error allocating crypto tfm for ESSIV";
182 return PTR_ERR(essiv_tfm
);
184 if (crypto_cipher_blocksize(essiv_tfm
) !=
185 crypto_blkcipher_ivsize(cc
->tfm
)) {
186 ti
->error
= "Block size of ESSIV cipher does "
187 "not match IV size of block cipher";
188 crypto_free_cipher(essiv_tfm
);
192 err
= crypto_cipher_setkey(essiv_tfm
, salt
, saltsize
);
194 ti
->error
= "Failed to set key for ESSIV cipher";
195 crypto_free_cipher(essiv_tfm
);
201 cc
->iv_gen_private
.essiv_tfm
= essiv_tfm
;
205 static void crypt_iv_essiv_dtr(struct crypt_config
*cc
)
207 crypto_free_cipher(cc
->iv_gen_private
.essiv_tfm
);
208 cc
->iv_gen_private
.essiv_tfm
= NULL
;
211 static int crypt_iv_essiv_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
213 memset(iv
, 0, cc
->iv_size
);
214 *(u64
*)iv
= cpu_to_le64(sector
);
215 crypto_cipher_encrypt_one(cc
->iv_gen_private
.essiv_tfm
, iv
, iv
);
219 static int crypt_iv_benbi_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
222 unsigned int bs
= crypto_blkcipher_blocksize(cc
->tfm
);
223 int log
= long_log2(bs
);
225 /* we need to calculate how far we must shift the sector count
226 * to get the cipher block count, we use this shift in _gen */
228 if (1 << log
!= bs
) {
229 ti
->error
= "cypher blocksize is not a power of 2";
234 ti
->error
= "cypher blocksize is > 512";
238 cc
->iv_gen_private
.benbi_shift
= 9 - log
;
243 static void crypt_iv_benbi_dtr(struct crypt_config
*cc
)
247 static int crypt_iv_benbi_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
251 memset(iv
, 0, cc
->iv_size
- sizeof(u64
)); /* rest is cleared below */
253 val
= cpu_to_be64(((u64
)sector
<< cc
->iv_gen_private
.benbi_shift
) + 1);
254 put_unaligned(val
, (__be64
*)(iv
+ cc
->iv_size
- sizeof(u64
)));
259 static struct crypt_iv_operations crypt_iv_plain_ops
= {
260 .generator
= crypt_iv_plain_gen
263 static struct crypt_iv_operations crypt_iv_essiv_ops
= {
264 .ctr
= crypt_iv_essiv_ctr
,
265 .dtr
= crypt_iv_essiv_dtr
,
266 .generator
= crypt_iv_essiv_gen
269 static struct crypt_iv_operations crypt_iv_benbi_ops
= {
270 .ctr
= crypt_iv_benbi_ctr
,
271 .dtr
= crypt_iv_benbi_dtr
,
272 .generator
= crypt_iv_benbi_gen
276 crypt_convert_scatterlist(struct crypt_config
*cc
, struct scatterlist
*out
,
277 struct scatterlist
*in
, unsigned int length
,
278 int write
, sector_t sector
)
280 u8 iv
[cc
->iv_size
] __attribute__ ((aligned(__alignof__(u64
))));
281 struct blkcipher_desc desc
= {
284 .flags
= CRYPTO_TFM_REQ_MAY_SLEEP
,
288 if (cc
->iv_gen_ops
) {
289 r
= cc
->iv_gen_ops
->generator(cc
, iv
, sector
);
294 r
= crypto_blkcipher_encrypt_iv(&desc
, out
, in
, length
);
296 r
= crypto_blkcipher_decrypt_iv(&desc
, out
, in
, length
);
299 r
= crypto_blkcipher_encrypt(&desc
, out
, in
, length
);
301 r
= crypto_blkcipher_decrypt(&desc
, out
, in
, length
);
308 crypt_convert_init(struct crypt_config
*cc
, struct convert_context
*ctx
,
309 struct bio
*bio_out
, struct bio
*bio_in
,
310 sector_t sector
, int write
)
312 ctx
->bio_in
= bio_in
;
313 ctx
->bio_out
= bio_out
;
316 ctx
->idx_in
= bio_in
? bio_in
->bi_idx
: 0;
317 ctx
->idx_out
= bio_out
? bio_out
->bi_idx
: 0;
318 ctx
->sector
= sector
+ cc
->iv_offset
;
323 * Encrypt / decrypt data from one bio to another one (can be the same one)
325 static int crypt_convert(struct crypt_config
*cc
,
326 struct convert_context
*ctx
)
330 while(ctx
->idx_in
< ctx
->bio_in
->bi_vcnt
&&
331 ctx
->idx_out
< ctx
->bio_out
->bi_vcnt
) {
332 struct bio_vec
*bv_in
= bio_iovec_idx(ctx
->bio_in
, ctx
->idx_in
);
333 struct bio_vec
*bv_out
= bio_iovec_idx(ctx
->bio_out
, ctx
->idx_out
);
334 struct scatterlist sg_in
= {
335 .page
= bv_in
->bv_page
,
336 .offset
= bv_in
->bv_offset
+ ctx
->offset_in
,
337 .length
= 1 << SECTOR_SHIFT
339 struct scatterlist sg_out
= {
340 .page
= bv_out
->bv_page
,
341 .offset
= bv_out
->bv_offset
+ ctx
->offset_out
,
342 .length
= 1 << SECTOR_SHIFT
345 ctx
->offset_in
+= sg_in
.length
;
346 if (ctx
->offset_in
>= bv_in
->bv_len
) {
351 ctx
->offset_out
+= sg_out
.length
;
352 if (ctx
->offset_out
>= bv_out
->bv_len
) {
357 r
= crypt_convert_scatterlist(cc
, &sg_out
, &sg_in
, sg_in
.length
,
358 ctx
->write
, ctx
->sector
);
368 static void dm_crypt_bio_destructor(struct bio
*bio
)
370 struct crypt_io
*io
= bio
->bi_private
;
371 struct crypt_config
*cc
= io
->target
->private;
373 bio_free(bio
, cc
->bs
);
377 * Generate a new unfragmented bio with the given size
378 * This should never violate the device limitations
379 * May return a smaller bio when running out of pages
382 crypt_alloc_buffer(struct crypt_config
*cc
, unsigned int size
,
383 struct bio
*base_bio
, unsigned int *bio_vec_idx
)
386 unsigned int nr_iovecs
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
387 gfp_t gfp_mask
= GFP_NOIO
| __GFP_HIGHMEM
;
391 clone
= bio_alloc_bioset(GFP_NOIO
, base_bio
->bi_max_vecs
, cc
->bs
);
392 __bio_clone(clone
, base_bio
);
394 clone
= bio_alloc_bioset(GFP_NOIO
, nr_iovecs
, cc
->bs
);
399 clone
->bi_destructor
= dm_crypt_bio_destructor
;
401 /* if the last bio was not complete, continue where that one ended */
402 clone
->bi_idx
= *bio_vec_idx
;
403 clone
->bi_vcnt
= *bio_vec_idx
;
405 clone
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
407 /* clone->bi_idx pages have already been allocated */
408 size
-= clone
->bi_idx
* PAGE_SIZE
;
410 for (i
= clone
->bi_idx
; i
< nr_iovecs
; i
++) {
411 struct bio_vec
*bv
= bio_iovec_idx(clone
, i
);
413 bv
->bv_page
= mempool_alloc(cc
->page_pool
, gfp_mask
);
418 * if additional pages cannot be allocated without waiting,
419 * return a partially allocated bio, the caller will then try
420 * to allocate additional bios while submitting this partial bio
422 if ((i
- clone
->bi_idx
) == (MIN_BIO_PAGES
- 1))
423 gfp_mask
= (gfp_mask
| __GFP_NOWARN
) & ~__GFP_WAIT
;
426 if (size
> PAGE_SIZE
)
427 bv
->bv_len
= PAGE_SIZE
;
431 clone
->bi_size
+= bv
->bv_len
;
436 if (!clone
->bi_size
) {
442 * Remember the last bio_vec allocated to be able
443 * to correctly continue after the splitting.
445 *bio_vec_idx
= clone
->bi_vcnt
;
450 static void crypt_free_buffer_pages(struct crypt_config
*cc
,
451 struct bio
*clone
, unsigned int bytes
)
453 unsigned int i
, start
, end
;
457 * This is ugly, but Jens Axboe thinks that using bi_idx in the
458 * endio function is too dangerous at the moment, so I calculate the
459 * correct position using bi_vcnt and bi_size.
460 * The bv_offset and bv_len fields might already be modified but we
461 * know that we always allocated whole pages.
462 * A fix to the bi_idx issue in the kernel is in the works, so
463 * we will hopefully be able to revert to the cleaner solution soon.
465 i
= clone
->bi_vcnt
- 1;
466 bv
= bio_iovec_idx(clone
, i
);
467 end
= (i
<< PAGE_SHIFT
) + (bv
->bv_offset
+ bv
->bv_len
) - clone
->bi_size
;
470 start
>>= PAGE_SHIFT
;
472 end
= clone
->bi_vcnt
;
476 for (i
= start
; i
< end
; i
++) {
477 bv
= bio_iovec_idx(clone
, i
);
478 BUG_ON(!bv
->bv_page
);
479 mempool_free(bv
->bv_page
, cc
->page_pool
);
485 * One of the bios was finished. Check for completion of
486 * the whole request and correctly clean up the buffer.
488 static void dec_pending(struct crypt_io
*io
, int error
)
490 struct crypt_config
*cc
= (struct crypt_config
*) io
->target
->private;
495 if (!atomic_dec_and_test(&io
->pending
))
499 bio_put(io
->first_clone
);
501 bio_endio(io
->base_bio
, io
->base_bio
->bi_size
, io
->error
);
503 mempool_free(io
, cc
->io_pool
);
509 * Needed because it would be very unwise to do decryption in an
512 static struct workqueue_struct
*_kcryptd_workqueue
;
513 static void kcryptd_do_work(struct work_struct
*work
);
515 static void kcryptd_queue_io(struct crypt_io
*io
)
517 INIT_WORK(&io
->work
, kcryptd_do_work
);
518 queue_work(_kcryptd_workqueue
, &io
->work
);
521 static int crypt_endio(struct bio
*clone
, unsigned int done
, int error
)
523 struct crypt_io
*io
= clone
->bi_private
;
524 struct crypt_config
*cc
= io
->target
->private;
525 unsigned read_io
= bio_data_dir(clone
) == READ
;
528 * free the processed pages, even if
529 * it's only a partially completed write
532 crypt_free_buffer_pages(cc
, clone
, done
);
534 /* keep going - not finished yet */
535 if (unlikely(clone
->bi_size
))
541 if (unlikely(!bio_flagged(clone
, BIO_UPTODATE
))) {
547 io
->post_process
= 1;
548 kcryptd_queue_io(io
);
553 dec_pending(io
, error
);
557 static void clone_init(struct crypt_io
*io
, struct bio
*clone
)
559 struct crypt_config
*cc
= io
->target
->private;
561 clone
->bi_private
= io
;
562 clone
->bi_end_io
= crypt_endio
;
563 clone
->bi_bdev
= cc
->dev
->bdev
;
564 clone
->bi_rw
= io
->base_bio
->bi_rw
;
567 static void process_read(struct crypt_io
*io
)
569 struct crypt_config
*cc
= io
->target
->private;
570 struct bio
*base_bio
= io
->base_bio
;
572 sector_t sector
= base_bio
->bi_sector
- io
->target
->begin
;
574 atomic_inc(&io
->pending
);
577 * The block layer might modify the bvec array, so always
578 * copy the required bvecs because we need the original
579 * one in order to decrypt the whole bio data *afterwards*.
581 clone
= bio_alloc_bioset(GFP_NOIO
, bio_segments(base_bio
), cc
->bs
);
582 if (unlikely(!clone
)) {
583 dec_pending(io
, -ENOMEM
);
587 clone_init(io
, clone
);
588 clone
->bi_destructor
= dm_crypt_bio_destructor
;
590 clone
->bi_vcnt
= bio_segments(base_bio
);
591 clone
->bi_size
= base_bio
->bi_size
;
592 clone
->bi_sector
= cc
->start
+ sector
;
593 memcpy(clone
->bi_io_vec
, bio_iovec(base_bio
),
594 sizeof(struct bio_vec
) * clone
->bi_vcnt
);
596 generic_make_request(clone
);
599 static void process_write(struct crypt_io
*io
)
601 struct crypt_config
*cc
= io
->target
->private;
602 struct bio
*base_bio
= io
->base_bio
;
604 struct convert_context ctx
;
605 unsigned remaining
= base_bio
->bi_size
;
606 sector_t sector
= base_bio
->bi_sector
- io
->target
->begin
;
607 unsigned bvec_idx
= 0;
609 atomic_inc(&io
->pending
);
611 crypt_convert_init(cc
, &ctx
, NULL
, base_bio
, sector
, 1);
614 * The allocated buffers can be smaller than the whole bio,
615 * so repeat the whole process until all the data can be handled.
618 clone
= crypt_alloc_buffer(cc
, base_bio
->bi_size
,
619 io
->first_clone
, &bvec_idx
);
620 if (unlikely(!clone
)) {
621 dec_pending(io
, -ENOMEM
);
627 if (unlikely(crypt_convert(cc
, &ctx
) < 0)) {
628 crypt_free_buffer_pages(cc
, clone
, clone
->bi_size
);
630 dec_pending(io
, -EIO
);
634 clone_init(io
, clone
);
635 clone
->bi_sector
= cc
->start
+ sector
;
637 if (!io
->first_clone
) {
639 * hold a reference to the first clone, because it
640 * holds the bio_vec array and that can't be freed
641 * before all other clones are released
644 io
->first_clone
= clone
;
647 remaining
-= clone
->bi_size
;
648 sector
+= bio_sectors(clone
);
650 /* prevent bio_put of first_clone */
652 atomic_inc(&io
->pending
);
654 generic_make_request(clone
);
656 /* out of memory -> run queues */
658 congestion_wait(bio_data_dir(clone
), HZ
/100);
662 static void process_read_endio(struct crypt_io
*io
)
664 struct crypt_config
*cc
= io
->target
->private;
665 struct convert_context ctx
;
667 crypt_convert_init(cc
, &ctx
, io
->base_bio
, io
->base_bio
,
668 io
->base_bio
->bi_sector
- io
->target
->begin
, 0);
670 dec_pending(io
, crypt_convert(cc
, &ctx
));
673 static void kcryptd_do_work(struct work_struct
*work
)
675 struct crypt_io
*io
= container_of(work
, struct crypt_io
, work
);
677 if (io
->post_process
)
678 process_read_endio(io
);
679 else if (bio_data_dir(io
->base_bio
) == READ
)
686 * Decode key from its hex representation
688 static int crypt_decode_key(u8
*key
, char *hex
, unsigned int size
)
696 for (i
= 0; i
< size
; i
++) {
700 key
[i
] = (u8
)simple_strtoul(buffer
, &endp
, 16);
702 if (endp
!= &buffer
[2])
713 * Encode key into its hex representation
715 static void crypt_encode_key(char *hex
, u8
*key
, unsigned int size
)
719 for (i
= 0; i
< size
; i
++) {
720 sprintf(hex
, "%02x", *key
);
726 static int crypt_set_key(struct crypt_config
*cc
, char *key
)
728 unsigned key_size
= strlen(key
) >> 1;
730 if (cc
->key_size
&& cc
->key_size
!= key_size
)
733 cc
->key_size
= key_size
; /* initial settings */
735 if ((!key_size
&& strcmp(key
, "-")) ||
736 (key_size
&& crypt_decode_key(cc
->key
, key
, key_size
) < 0))
739 set_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
744 static int crypt_wipe_key(struct crypt_config
*cc
)
746 clear_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
747 memset(&cc
->key
, 0, cc
->key_size
* sizeof(u8
));
752 * Construct an encryption mapping:
753 * <cipher> <key> <iv_offset> <dev_path> <start>
755 static int crypt_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
757 struct crypt_config
*cc
;
758 struct crypto_blkcipher
*tfm
;
764 unsigned int key_size
;
765 unsigned long long tmpll
;
768 ti
->error
= "Not enough arguments";
773 cipher
= strsep(&tmp
, "-");
774 chainmode
= strsep(&tmp
, "-");
775 ivopts
= strsep(&tmp
, "-");
776 ivmode
= strsep(&ivopts
, ":");
779 DMWARN("Unexpected additional cipher options");
781 key_size
= strlen(argv
[1]) >> 1;
783 cc
= kzalloc(sizeof(*cc
) + key_size
* sizeof(u8
), GFP_KERNEL
);
786 "Cannot allocate transparent encryption context";
790 if (crypt_set_key(cc
, argv
[1])) {
791 ti
->error
= "Error decoding key";
795 /* Compatiblity mode for old dm-crypt cipher strings */
796 if (!chainmode
|| (strcmp(chainmode
, "plain") == 0 && !ivmode
)) {
801 if (strcmp(chainmode
, "ecb") && !ivmode
) {
802 ti
->error
= "This chaining mode requires an IV mechanism";
806 if (snprintf(cc
->cipher
, CRYPTO_MAX_ALG_NAME
, "%s(%s)", chainmode
,
807 cipher
) >= CRYPTO_MAX_ALG_NAME
) {
808 ti
->error
= "Chain mode + cipher name is too long";
812 tfm
= crypto_alloc_blkcipher(cc
->cipher
, 0, CRYPTO_ALG_ASYNC
);
814 ti
->error
= "Error allocating crypto tfm";
818 strcpy(cc
->cipher
, cipher
);
819 strcpy(cc
->chainmode
, chainmode
);
823 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
824 * See comments at iv code
828 cc
->iv_gen_ops
= NULL
;
829 else if (strcmp(ivmode
, "plain") == 0)
830 cc
->iv_gen_ops
= &crypt_iv_plain_ops
;
831 else if (strcmp(ivmode
, "essiv") == 0)
832 cc
->iv_gen_ops
= &crypt_iv_essiv_ops
;
833 else if (strcmp(ivmode
, "benbi") == 0)
834 cc
->iv_gen_ops
= &crypt_iv_benbi_ops
;
836 ti
->error
= "Invalid IV mode";
840 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->ctr
&&
841 cc
->iv_gen_ops
->ctr(cc
, ti
, ivopts
) < 0)
844 cc
->iv_size
= crypto_blkcipher_ivsize(tfm
);
846 /* at least a 64 bit sector number should fit in our buffer */
847 cc
->iv_size
= max(cc
->iv_size
,
848 (unsigned int)(sizeof(u64
) / sizeof(u8
)));
850 if (cc
->iv_gen_ops
) {
851 DMWARN("Selected cipher does not support IVs");
852 if (cc
->iv_gen_ops
->dtr
)
853 cc
->iv_gen_ops
->dtr(cc
);
854 cc
->iv_gen_ops
= NULL
;
858 cc
->io_pool
= mempool_create_slab_pool(MIN_IOS
, _crypt_io_pool
);
860 ti
->error
= "Cannot allocate crypt io mempool";
864 cc
->page_pool
= mempool_create_page_pool(MIN_POOL_PAGES
, 0);
865 if (!cc
->page_pool
) {
866 ti
->error
= "Cannot allocate page mempool";
870 cc
->bs
= bioset_create(MIN_IOS
, MIN_IOS
, 4);
872 ti
->error
= "Cannot allocate crypt bioset";
876 if (crypto_blkcipher_setkey(tfm
, cc
->key
, key_size
) < 0) {
877 ti
->error
= "Error setting key";
881 if (sscanf(argv
[2], "%llu", &tmpll
) != 1) {
882 ti
->error
= "Invalid iv_offset sector";
885 cc
->iv_offset
= tmpll
;
887 if (sscanf(argv
[4], "%llu", &tmpll
) != 1) {
888 ti
->error
= "Invalid device sector";
893 if (dm_get_device(ti
, argv
[3], cc
->start
, ti
->len
,
894 dm_table_get_mode(ti
->table
), &cc
->dev
)) {
895 ti
->error
= "Device lookup failed";
899 if (ivmode
&& cc
->iv_gen_ops
) {
902 cc
->iv_mode
= kmalloc(strlen(ivmode
) + 1, GFP_KERNEL
);
904 ti
->error
= "Error kmallocing iv_mode string";
907 strcpy(cc
->iv_mode
, ivmode
);
917 mempool_destroy(cc
->page_pool
);
919 mempool_destroy(cc
->io_pool
);
921 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->dtr
)
922 cc
->iv_gen_ops
->dtr(cc
);
924 crypto_free_blkcipher(tfm
);
926 /* Must zero key material before freeing */
927 memset(cc
, 0, sizeof(*cc
) + cc
->key_size
* sizeof(u8
));
932 static void crypt_dtr(struct dm_target
*ti
)
934 struct crypt_config
*cc
= (struct crypt_config
*) ti
->private;
937 mempool_destroy(cc
->page_pool
);
938 mempool_destroy(cc
->io_pool
);
941 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->dtr
)
942 cc
->iv_gen_ops
->dtr(cc
);
943 crypto_free_blkcipher(cc
->tfm
);
944 dm_put_device(ti
, cc
->dev
);
946 /* Must zero key material before freeing */
947 memset(cc
, 0, sizeof(*cc
) + cc
->key_size
* sizeof(u8
));
951 static int crypt_map(struct dm_target
*ti
, struct bio
*bio
,
952 union map_info
*map_context
)
954 struct crypt_config
*cc
= ti
->private;
957 io
= mempool_alloc(cc
->io_pool
, GFP_NOIO
);
960 io
->first_clone
= NULL
;
961 io
->error
= io
->post_process
= 0;
962 atomic_set(&io
->pending
, 0);
963 kcryptd_queue_io(io
);
968 static int crypt_status(struct dm_target
*ti
, status_type_t type
,
969 char *result
, unsigned int maxlen
)
971 struct crypt_config
*cc
= (struct crypt_config
*) ti
->private;
975 case STATUSTYPE_INFO
:
979 case STATUSTYPE_TABLE
:
981 DMEMIT("%s-%s-%s ", cc
->cipher
, cc
->chainmode
,
984 DMEMIT("%s-%s ", cc
->cipher
, cc
->chainmode
);
986 if (cc
->key_size
> 0) {
987 if ((maxlen
- sz
) < ((cc
->key_size
<< 1) + 1))
990 crypt_encode_key(result
+ sz
, cc
->key
, cc
->key_size
);
991 sz
+= cc
->key_size
<< 1;
998 DMEMIT(" %llu %s %llu", (unsigned long long)cc
->iv_offset
,
999 cc
->dev
->name
, (unsigned long long)cc
->start
);
1005 static void crypt_postsuspend(struct dm_target
*ti
)
1007 struct crypt_config
*cc
= ti
->private;
1009 set_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
1012 static int crypt_preresume(struct dm_target
*ti
)
1014 struct crypt_config
*cc
= ti
->private;
1016 if (!test_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
)) {
1017 DMERR("aborting resume - crypt key is not set.");
1024 static void crypt_resume(struct dm_target
*ti
)
1026 struct crypt_config
*cc
= ti
->private;
1028 clear_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
1031 /* Message interface
1035 static int crypt_message(struct dm_target
*ti
, unsigned argc
, char **argv
)
1037 struct crypt_config
*cc
= ti
->private;
1042 if (!strnicmp(argv
[0], MESG_STR("key"))) {
1043 if (!test_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
)) {
1044 DMWARN("not suspended during key manipulation.");
1047 if (argc
== 3 && !strnicmp(argv
[1], MESG_STR("set")))
1048 return crypt_set_key(cc
, argv
[2]);
1049 if (argc
== 2 && !strnicmp(argv
[1], MESG_STR("wipe")))
1050 return crypt_wipe_key(cc
);
1054 DMWARN("unrecognised message received.");
1058 static struct target_type crypt_target
= {
1060 .version
= {1, 3, 0},
1061 .module
= THIS_MODULE
,
1065 .status
= crypt_status
,
1066 .postsuspend
= crypt_postsuspend
,
1067 .preresume
= crypt_preresume
,
1068 .resume
= crypt_resume
,
1069 .message
= crypt_message
,
1072 static int __init
dm_crypt_init(void)
1076 _crypt_io_pool
= kmem_cache_create("dm-crypt_io",
1077 sizeof(struct crypt_io
),
1079 if (!_crypt_io_pool
)
1082 _kcryptd_workqueue
= create_workqueue("kcryptd");
1083 if (!_kcryptd_workqueue
) {
1085 DMERR("couldn't create kcryptd");
1089 r
= dm_register_target(&crypt_target
);
1091 DMERR("register failed %d", r
);
1098 destroy_workqueue(_kcryptd_workqueue
);
1100 kmem_cache_destroy(_crypt_io_pool
);
1104 static void __exit
dm_crypt_exit(void)
1106 int r
= dm_unregister_target(&crypt_target
);
1109 DMERR("unregister failed %d", r
);
1111 destroy_workqueue(_kcryptd_workqueue
);
1112 kmem_cache_destroy(_crypt_io_pool
);
1115 module_init(dm_crypt_init
);
1116 module_exit(dm_crypt_exit
);
1118 MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
1119 MODULE_DESCRIPTION(DM_NAME
" target for transparent encryption / decryption");
1120 MODULE_LICENSE("GPL");