2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4 * Copyright (C) 2006-2007 Red Hat, Inc. All rights reserved.
6 * This file is released under the GPL.
9 #include <linux/completion.h>
10 #include <linux/err.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/bio.h>
15 #include <linux/blkdev.h>
16 #include <linux/mempool.h>
17 #include <linux/slab.h>
18 #include <linux/crypto.h>
19 #include <linux/workqueue.h>
20 #include <linux/backing-dev.h>
21 #include <asm/atomic.h>
22 #include <linux/scatterlist.h>
24 #include <asm/unaligned.h>
28 #define DM_MSG_PREFIX "crypt"
29 #define MESG_STR(x) x, sizeof(x)
32 * context holding the current state of a multi-part conversion
34 struct convert_context
{
35 struct completion restart
;
38 unsigned int offset_in
;
39 unsigned int offset_out
;
47 * per bio private data
50 struct dm_target
*target
;
52 struct work_struct work
;
54 struct convert_context ctx
;
61 struct dm_crypt_request
{
62 struct scatterlist sg_in
;
63 struct scatterlist sg_out
;
68 struct crypt_iv_operations
{
69 int (*ctr
)(struct crypt_config
*cc
, struct dm_target
*ti
,
71 void (*dtr
)(struct crypt_config
*cc
);
72 const char *(*status
)(struct crypt_config
*cc
);
73 int (*generator
)(struct crypt_config
*cc
, u8
*iv
, sector_t sector
);
77 * Crypt: maps a linear range of a block device
78 * and encrypts / decrypts at the same time.
80 enum flags
{ DM_CRYPT_SUSPENDED
, DM_CRYPT_KEY_VALID
};
86 * pool for per bio private data, crypto requests and
87 * encryption requeusts/buffer pages
94 struct workqueue_struct
*io_queue
;
95 struct workqueue_struct
*crypt_queue
;
99 struct crypt_iv_operations
*iv_gen_ops
;
102 struct crypto_cipher
*essiv_tfm
;
106 unsigned int iv_size
;
109 * Layout of each crypto request:
111 * struct ablkcipher_request
114 * struct dm_crypt_request
118 * The padding is added so that dm_crypt_request and the IV are
121 unsigned int dmreq_start
;
122 struct ablkcipher_request
*req
;
124 char cipher
[CRYPTO_MAX_ALG_NAME
];
125 char chainmode
[CRYPTO_MAX_ALG_NAME
];
126 struct crypto_ablkcipher
*tfm
;
128 unsigned int key_size
;
133 #define MIN_POOL_PAGES 32
134 #define MIN_BIO_PAGES 8
136 static struct kmem_cache
*_crypt_io_pool
;
138 static void clone_init(struct dm_crypt_io
*, struct bio
*);
139 static void kcryptd_queue_crypt(struct dm_crypt_io
*io
);
142 * Different IV generation algorithms:
144 * plain: the initial vector is the 32-bit little-endian version of the sector
145 * number, padded with zeros if necessary.
147 * essiv: "encrypted sector|salt initial vector", the sector number is
148 * encrypted with the bulk cipher using a salt as key. The salt
149 * should be derived from the bulk cipher's key via hashing.
151 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
152 * (needed for LRW-32-AES and possible other narrow block modes)
154 * null: the initial vector is always zero. Provides compatibility with
155 * obsolete loop_fish2 devices. Do not use for new devices.
157 * plumb: unimplemented, see:
158 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
161 static int crypt_iv_plain_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
163 memset(iv
, 0, cc
->iv_size
);
164 *(u32
*)iv
= cpu_to_le32(sector
& 0xffffffff);
169 static int crypt_iv_essiv_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
172 struct crypto_cipher
*essiv_tfm
;
173 struct crypto_hash
*hash_tfm
;
174 struct hash_desc desc
;
175 struct scatterlist sg
;
176 unsigned int saltsize
;
181 ti
->error
= "Digest algorithm missing for ESSIV mode";
185 /* Hash the cipher key with the given hash algorithm */
186 hash_tfm
= crypto_alloc_hash(opts
, 0, CRYPTO_ALG_ASYNC
);
187 if (IS_ERR(hash_tfm
)) {
188 ti
->error
= "Error initializing ESSIV hash";
189 return PTR_ERR(hash_tfm
);
192 saltsize
= crypto_hash_digestsize(hash_tfm
);
193 salt
= kmalloc(saltsize
, GFP_KERNEL
);
195 ti
->error
= "Error kmallocing salt storage in ESSIV";
196 crypto_free_hash(hash_tfm
);
200 sg_init_one(&sg
, cc
->key
, cc
->key_size
);
202 desc
.flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
203 err
= crypto_hash_digest(&desc
, &sg
, cc
->key_size
, salt
);
204 crypto_free_hash(hash_tfm
);
207 ti
->error
= "Error calculating hash in ESSIV";
212 /* Setup the essiv_tfm with the given salt */
213 essiv_tfm
= crypto_alloc_cipher(cc
->cipher
, 0, CRYPTO_ALG_ASYNC
);
214 if (IS_ERR(essiv_tfm
)) {
215 ti
->error
= "Error allocating crypto tfm for ESSIV";
217 return PTR_ERR(essiv_tfm
);
219 if (crypto_cipher_blocksize(essiv_tfm
) !=
220 crypto_ablkcipher_ivsize(cc
->tfm
)) {
221 ti
->error
= "Block size of ESSIV cipher does "
222 "not match IV size of block cipher";
223 crypto_free_cipher(essiv_tfm
);
227 err
= crypto_cipher_setkey(essiv_tfm
, salt
, saltsize
);
229 ti
->error
= "Failed to set key for ESSIV cipher";
230 crypto_free_cipher(essiv_tfm
);
236 cc
->iv_gen_private
.essiv_tfm
= essiv_tfm
;
240 static void crypt_iv_essiv_dtr(struct crypt_config
*cc
)
242 crypto_free_cipher(cc
->iv_gen_private
.essiv_tfm
);
243 cc
->iv_gen_private
.essiv_tfm
= NULL
;
246 static int crypt_iv_essiv_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
248 memset(iv
, 0, cc
->iv_size
);
249 *(u64
*)iv
= cpu_to_le64(sector
);
250 crypto_cipher_encrypt_one(cc
->iv_gen_private
.essiv_tfm
, iv
, iv
);
254 static int crypt_iv_benbi_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
257 unsigned bs
= crypto_ablkcipher_blocksize(cc
->tfm
);
260 /* we need to calculate how far we must shift the sector count
261 * to get the cipher block count, we use this shift in _gen */
263 if (1 << log
!= bs
) {
264 ti
->error
= "cypher blocksize is not a power of 2";
269 ti
->error
= "cypher blocksize is > 512";
273 cc
->iv_gen_private
.benbi_shift
= 9 - log
;
278 static void crypt_iv_benbi_dtr(struct crypt_config
*cc
)
282 static int crypt_iv_benbi_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
286 memset(iv
, 0, cc
->iv_size
- sizeof(u64
)); /* rest is cleared below */
288 val
= cpu_to_be64(((u64
)sector
<< cc
->iv_gen_private
.benbi_shift
) + 1);
289 put_unaligned(val
, (__be64
*)(iv
+ cc
->iv_size
- sizeof(u64
)));
294 static int crypt_iv_null_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
296 memset(iv
, 0, cc
->iv_size
);
301 static struct crypt_iv_operations crypt_iv_plain_ops
= {
302 .generator
= crypt_iv_plain_gen
305 static struct crypt_iv_operations crypt_iv_essiv_ops
= {
306 .ctr
= crypt_iv_essiv_ctr
,
307 .dtr
= crypt_iv_essiv_dtr
,
308 .generator
= crypt_iv_essiv_gen
311 static struct crypt_iv_operations crypt_iv_benbi_ops
= {
312 .ctr
= crypt_iv_benbi_ctr
,
313 .dtr
= crypt_iv_benbi_dtr
,
314 .generator
= crypt_iv_benbi_gen
317 static struct crypt_iv_operations crypt_iv_null_ops
= {
318 .generator
= crypt_iv_null_gen
321 static void crypt_convert_init(struct crypt_config
*cc
,
322 struct convert_context
*ctx
,
323 struct bio
*bio_out
, struct bio
*bio_in
,
326 ctx
->bio_in
= bio_in
;
327 ctx
->bio_out
= bio_out
;
330 ctx
->idx_in
= bio_in
? bio_in
->bi_idx
: 0;
331 ctx
->idx_out
= bio_out
? bio_out
->bi_idx
: 0;
332 ctx
->sector
= sector
+ cc
->iv_offset
;
333 init_completion(&ctx
->restart
);
335 * Crypto operation can be asynchronous,
336 * ctx->pending is increased after request submission.
337 * We need to ensure that we don't call the crypt finish
338 * operation before pending got incremented
339 * (dependent on crypt submission return code).
341 atomic_set(&ctx
->pending
, 2);
344 static int crypt_convert_block(struct crypt_config
*cc
,
345 struct convert_context
*ctx
,
346 struct ablkcipher_request
*req
)
348 struct bio_vec
*bv_in
= bio_iovec_idx(ctx
->bio_in
, ctx
->idx_in
);
349 struct bio_vec
*bv_out
= bio_iovec_idx(ctx
->bio_out
, ctx
->idx_out
);
350 struct dm_crypt_request
*dmreq
;
354 dmreq
= (struct dm_crypt_request
*)((char *)req
+ cc
->dmreq_start
);
355 iv
= (u8
*)ALIGN((unsigned long)(dmreq
+ 1),
356 crypto_ablkcipher_alignmask(cc
->tfm
) + 1);
358 sg_init_table(&dmreq
->sg_in
, 1);
359 sg_set_page(&dmreq
->sg_in
, bv_in
->bv_page
, 1 << SECTOR_SHIFT
,
360 bv_in
->bv_offset
+ ctx
->offset_in
);
362 sg_init_table(&dmreq
->sg_out
, 1);
363 sg_set_page(&dmreq
->sg_out
, bv_out
->bv_page
, 1 << SECTOR_SHIFT
,
364 bv_out
->bv_offset
+ ctx
->offset_out
);
366 ctx
->offset_in
+= 1 << SECTOR_SHIFT
;
367 if (ctx
->offset_in
>= bv_in
->bv_len
) {
372 ctx
->offset_out
+= 1 << SECTOR_SHIFT
;
373 if (ctx
->offset_out
>= bv_out
->bv_len
) {
378 if (cc
->iv_gen_ops
) {
379 r
= cc
->iv_gen_ops
->generator(cc
, iv
, ctx
->sector
);
384 ablkcipher_request_set_crypt(req
, &dmreq
->sg_in
, &dmreq
->sg_out
,
385 1 << SECTOR_SHIFT
, iv
);
387 if (bio_data_dir(ctx
->bio_in
) == WRITE
)
388 r
= crypto_ablkcipher_encrypt(req
);
390 r
= crypto_ablkcipher_decrypt(req
);
395 static void kcryptd_async_done(struct crypto_async_request
*async_req
,
397 static void crypt_alloc_req(struct crypt_config
*cc
,
398 struct convert_context
*ctx
)
401 cc
->req
= mempool_alloc(cc
->req_pool
, GFP_NOIO
);
402 ablkcipher_request_set_tfm(cc
->req
, cc
->tfm
);
403 ablkcipher_request_set_callback(cc
->req
, CRYPTO_TFM_REQ_MAY_BACKLOG
|
404 CRYPTO_TFM_REQ_MAY_SLEEP
,
405 kcryptd_async_done
, ctx
);
409 * Encrypt / decrypt data from one bio to another one (can be the same one)
411 static int crypt_convert(struct crypt_config
*cc
,
412 struct convert_context
*ctx
)
416 while(ctx
->idx_in
< ctx
->bio_in
->bi_vcnt
&&
417 ctx
->idx_out
< ctx
->bio_out
->bi_vcnt
) {
419 crypt_alloc_req(cc
, ctx
);
421 r
= crypt_convert_block(cc
, ctx
, cc
->req
);
425 wait_for_completion(&ctx
->restart
);
426 INIT_COMPLETION(ctx
->restart
);
429 atomic_inc(&ctx
->pending
);
442 * If there are pending crypto operation run async
443 * code. Otherwise process return code synchronously.
444 * The step of 2 ensures that async finish doesn't
445 * call crypto finish too early.
447 if (atomic_sub_return(2, &ctx
->pending
))
453 static void dm_crypt_bio_destructor(struct bio
*bio
)
455 struct dm_crypt_io
*io
= bio
->bi_private
;
456 struct crypt_config
*cc
= io
->target
->private;
458 bio_free(bio
, cc
->bs
);
462 * Generate a new unfragmented bio with the given size
463 * This should never violate the device limitations
464 * May return a smaller bio when running out of pages
466 static struct bio
*crypt_alloc_buffer(struct dm_crypt_io
*io
, unsigned size
)
468 struct crypt_config
*cc
= io
->target
->private;
470 unsigned int nr_iovecs
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
471 gfp_t gfp_mask
= GFP_NOIO
| __GFP_HIGHMEM
;
475 clone
= bio_alloc_bioset(GFP_NOIO
, nr_iovecs
, cc
->bs
);
479 clone_init(io
, clone
);
481 for (i
= 0; i
< nr_iovecs
; i
++) {
482 page
= mempool_alloc(cc
->page_pool
, gfp_mask
);
487 * if additional pages cannot be allocated without waiting,
488 * return a partially allocated bio, the caller will then try
489 * to allocate additional bios while submitting this partial bio
491 if (i
== (MIN_BIO_PAGES
- 1))
492 gfp_mask
= (gfp_mask
| __GFP_NOWARN
) & ~__GFP_WAIT
;
494 len
= (size
> PAGE_SIZE
) ? PAGE_SIZE
: size
;
496 if (!bio_add_page(clone
, page
, len
, 0)) {
497 mempool_free(page
, cc
->page_pool
);
504 if (!clone
->bi_size
) {
512 static void crypt_free_buffer_pages(struct crypt_config
*cc
, struct bio
*clone
)
517 for (i
= 0; i
< clone
->bi_vcnt
; i
++) {
518 bv
= bio_iovec_idx(clone
, i
);
519 BUG_ON(!bv
->bv_page
);
520 mempool_free(bv
->bv_page
, cc
->page_pool
);
526 * One of the bios was finished. Check for completion of
527 * the whole request and correctly clean up the buffer.
529 static void crypt_dec_pending(struct dm_crypt_io
*io
)
531 struct crypt_config
*cc
= io
->target
->private;
533 if (!atomic_dec_and_test(&io
->pending
))
536 bio_endio(io
->base_bio
, io
->error
);
537 mempool_free(io
, cc
->io_pool
);
541 * kcryptd/kcryptd_io:
543 * Needed because it would be very unwise to do decryption in an
546 * kcryptd performs the actual encryption or decryption.
548 * kcryptd_io performs the IO submission.
550 * They must be separated as otherwise the final stages could be
551 * starved by new requests which can block in the first stages due
552 * to memory allocation.
554 static void crypt_endio(struct bio
*clone
, int error
)
556 struct dm_crypt_io
*io
= clone
->bi_private
;
557 struct crypt_config
*cc
= io
->target
->private;
558 unsigned rw
= bio_data_dir(clone
);
560 if (unlikely(!bio_flagged(clone
, BIO_UPTODATE
) && !error
))
564 * free the processed pages
567 crypt_free_buffer_pages(cc
, clone
);
571 if (rw
== READ
&& !error
) {
572 kcryptd_queue_crypt(io
);
579 crypt_dec_pending(io
);
582 static void clone_init(struct dm_crypt_io
*io
, struct bio
*clone
)
584 struct crypt_config
*cc
= io
->target
->private;
586 clone
->bi_private
= io
;
587 clone
->bi_end_io
= crypt_endio
;
588 clone
->bi_bdev
= cc
->dev
->bdev
;
589 clone
->bi_rw
= io
->base_bio
->bi_rw
;
590 clone
->bi_destructor
= dm_crypt_bio_destructor
;
593 static void kcryptd_io_read(struct dm_crypt_io
*io
)
595 struct crypt_config
*cc
= io
->target
->private;
596 struct bio
*base_bio
= io
->base_bio
;
599 atomic_inc(&io
->pending
);
602 * The block layer might modify the bvec array, so always
603 * copy the required bvecs because we need the original
604 * one in order to decrypt the whole bio data *afterwards*.
606 clone
= bio_alloc_bioset(GFP_NOIO
, bio_segments(base_bio
), cc
->bs
);
607 if (unlikely(!clone
)) {
609 crypt_dec_pending(io
);
613 clone_init(io
, clone
);
615 clone
->bi_vcnt
= bio_segments(base_bio
);
616 clone
->bi_size
= base_bio
->bi_size
;
617 clone
->bi_sector
= cc
->start
+ io
->sector
;
618 memcpy(clone
->bi_io_vec
, bio_iovec(base_bio
),
619 sizeof(struct bio_vec
) * clone
->bi_vcnt
);
621 generic_make_request(clone
);
624 static void kcryptd_io_write(struct dm_crypt_io
*io
)
626 struct bio
*clone
= io
->ctx
.bio_out
;
628 generic_make_request(clone
);
631 static void kcryptd_io(struct work_struct
*work
)
633 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
635 if (bio_data_dir(io
->base_bio
) == READ
)
638 kcryptd_io_write(io
);
641 static void kcryptd_queue_io(struct dm_crypt_io
*io
)
643 struct crypt_config
*cc
= io
->target
->private;
645 INIT_WORK(&io
->work
, kcryptd_io
);
646 queue_work(cc
->io_queue
, &io
->work
);
649 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io
*io
,
650 int error
, int async
)
652 struct bio
*clone
= io
->ctx
.bio_out
;
653 struct crypt_config
*cc
= io
->target
->private;
655 if (unlikely(error
< 0)) {
656 crypt_free_buffer_pages(cc
, clone
);
662 /* crypt_convert should have filled the clone bio */
663 BUG_ON(io
->ctx
.idx_out
< clone
->bi_vcnt
);
665 clone
->bi_sector
= cc
->start
+ io
->sector
;
666 io
->sector
+= bio_sectors(clone
);
669 kcryptd_queue_io(io
);
671 atomic_inc(&io
->pending
);
672 generic_make_request(clone
);
676 static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io
*io
)
678 struct crypt_config
*cc
= io
->target
->private;
680 unsigned remaining
= io
->base_bio
->bi_size
;
684 * The allocated buffers can be smaller than the whole bio,
685 * so repeat the whole process until all the data can be handled.
688 clone
= crypt_alloc_buffer(io
, remaining
);
689 if (unlikely(!clone
)) {
694 io
->ctx
.bio_out
= clone
;
697 remaining
-= clone
->bi_size
;
699 r
= crypt_convert(cc
, &io
->ctx
);
701 if (r
!= -EINPROGRESS
) {
702 kcryptd_crypt_write_io_submit(io
, r
, 0);
706 atomic_inc(&io
->pending
);
708 /* out of memory -> run queues */
709 if (unlikely(remaining
))
710 congestion_wait(WRITE
, HZ
/100);
714 static void kcryptd_crypt_write_convert(struct dm_crypt_io
*io
)
716 struct crypt_config
*cc
= io
->target
->private;
719 * Prevent io from disappearing until this function completes.
721 atomic_inc(&io
->pending
);
723 crypt_convert_init(cc
, &io
->ctx
, NULL
, io
->base_bio
, io
->sector
);
724 kcryptd_crypt_write_convert_loop(io
);
726 crypt_dec_pending(io
);
729 static void kcryptd_crypt_read_done(struct dm_crypt_io
*io
, int error
)
731 if (unlikely(error
< 0))
734 crypt_dec_pending(io
);
737 static void kcryptd_crypt_read_convert(struct dm_crypt_io
*io
)
739 struct crypt_config
*cc
= io
->target
->private;
742 atomic_inc(&io
->pending
);
744 crypt_convert_init(cc
, &io
->ctx
, io
->base_bio
, io
->base_bio
,
747 r
= crypt_convert(cc
, &io
->ctx
);
749 if (r
!= -EINPROGRESS
)
750 kcryptd_crypt_read_done(io
, r
);
752 crypt_dec_pending(io
);
755 static void kcryptd_async_done(struct crypto_async_request
*async_req
,
758 struct convert_context
*ctx
= async_req
->data
;
759 struct dm_crypt_io
*io
= container_of(ctx
, struct dm_crypt_io
, ctx
);
760 struct crypt_config
*cc
= io
->target
->private;
762 if (error
== -EINPROGRESS
) {
763 complete(&ctx
->restart
);
767 mempool_free(ablkcipher_request_cast(async_req
), cc
->req_pool
);
769 if (!atomic_dec_and_test(&ctx
->pending
))
772 if (bio_data_dir(io
->base_bio
) == READ
)
773 kcryptd_crypt_read_done(io
, error
);
775 kcryptd_crypt_write_io_submit(io
, error
, 1);
778 static void kcryptd_crypt(struct work_struct
*work
)
780 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
782 if (bio_data_dir(io
->base_bio
) == READ
)
783 kcryptd_crypt_read_convert(io
);
785 kcryptd_crypt_write_convert(io
);
788 static void kcryptd_queue_crypt(struct dm_crypt_io
*io
)
790 struct crypt_config
*cc
= io
->target
->private;
792 INIT_WORK(&io
->work
, kcryptd_crypt
);
793 queue_work(cc
->crypt_queue
, &io
->work
);
797 * Decode key from its hex representation
799 static int crypt_decode_key(u8
*key
, char *hex
, unsigned int size
)
807 for (i
= 0; i
< size
; i
++) {
811 key
[i
] = (u8
)simple_strtoul(buffer
, &endp
, 16);
813 if (endp
!= &buffer
[2])
824 * Encode key into its hex representation
826 static void crypt_encode_key(char *hex
, u8
*key
, unsigned int size
)
830 for (i
= 0; i
< size
; i
++) {
831 sprintf(hex
, "%02x", *key
);
837 static int crypt_set_key(struct crypt_config
*cc
, char *key
)
839 unsigned key_size
= strlen(key
) >> 1;
841 if (cc
->key_size
&& cc
->key_size
!= key_size
)
844 cc
->key_size
= key_size
; /* initial settings */
846 if ((!key_size
&& strcmp(key
, "-")) ||
847 (key_size
&& crypt_decode_key(cc
->key
, key
, key_size
) < 0))
850 set_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
855 static int crypt_wipe_key(struct crypt_config
*cc
)
857 clear_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
858 memset(&cc
->key
, 0, cc
->key_size
* sizeof(u8
));
863 * Construct an encryption mapping:
864 * <cipher> <key> <iv_offset> <dev_path> <start>
866 static int crypt_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
868 struct crypt_config
*cc
;
869 struct crypto_ablkcipher
*tfm
;
875 unsigned int key_size
;
876 unsigned long long tmpll
;
879 ti
->error
= "Not enough arguments";
884 cipher
= strsep(&tmp
, "-");
885 chainmode
= strsep(&tmp
, "-");
886 ivopts
= strsep(&tmp
, "-");
887 ivmode
= strsep(&ivopts
, ":");
890 DMWARN("Unexpected additional cipher options");
892 key_size
= strlen(argv
[1]) >> 1;
894 cc
= kzalloc(sizeof(*cc
) + key_size
* sizeof(u8
), GFP_KERNEL
);
897 "Cannot allocate transparent encryption context";
901 if (crypt_set_key(cc
, argv
[1])) {
902 ti
->error
= "Error decoding key";
906 /* Compatiblity mode for old dm-crypt cipher strings */
907 if (!chainmode
|| (strcmp(chainmode
, "plain") == 0 && !ivmode
)) {
912 if (strcmp(chainmode
, "ecb") && !ivmode
) {
913 ti
->error
= "This chaining mode requires an IV mechanism";
917 if (snprintf(cc
->cipher
, CRYPTO_MAX_ALG_NAME
, "%s(%s)",
918 chainmode
, cipher
) >= CRYPTO_MAX_ALG_NAME
) {
919 ti
->error
= "Chain mode + cipher name is too long";
923 tfm
= crypto_alloc_ablkcipher(cc
->cipher
, 0, 0);
925 ti
->error
= "Error allocating crypto tfm";
929 strcpy(cc
->cipher
, cipher
);
930 strcpy(cc
->chainmode
, chainmode
);
934 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
935 * See comments at iv code
939 cc
->iv_gen_ops
= NULL
;
940 else if (strcmp(ivmode
, "plain") == 0)
941 cc
->iv_gen_ops
= &crypt_iv_plain_ops
;
942 else if (strcmp(ivmode
, "essiv") == 0)
943 cc
->iv_gen_ops
= &crypt_iv_essiv_ops
;
944 else if (strcmp(ivmode
, "benbi") == 0)
945 cc
->iv_gen_ops
= &crypt_iv_benbi_ops
;
946 else if (strcmp(ivmode
, "null") == 0)
947 cc
->iv_gen_ops
= &crypt_iv_null_ops
;
949 ti
->error
= "Invalid IV mode";
953 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->ctr
&&
954 cc
->iv_gen_ops
->ctr(cc
, ti
, ivopts
) < 0)
957 cc
->iv_size
= crypto_ablkcipher_ivsize(tfm
);
959 /* at least a 64 bit sector number should fit in our buffer */
960 cc
->iv_size
= max(cc
->iv_size
,
961 (unsigned int)(sizeof(u64
) / sizeof(u8
)));
963 if (cc
->iv_gen_ops
) {
964 DMWARN("Selected cipher does not support IVs");
965 if (cc
->iv_gen_ops
->dtr
)
966 cc
->iv_gen_ops
->dtr(cc
);
967 cc
->iv_gen_ops
= NULL
;
971 cc
->io_pool
= mempool_create_slab_pool(MIN_IOS
, _crypt_io_pool
);
973 ti
->error
= "Cannot allocate crypt io mempool";
977 cc
->dmreq_start
= sizeof(struct ablkcipher_request
);
978 cc
->dmreq_start
+= crypto_ablkcipher_reqsize(tfm
);
979 cc
->dmreq_start
= ALIGN(cc
->dmreq_start
, crypto_tfm_ctx_alignment());
980 cc
->dmreq_start
+= crypto_ablkcipher_alignmask(tfm
) &
981 ~(crypto_tfm_ctx_alignment() - 1);
983 cc
->req_pool
= mempool_create_kmalloc_pool(MIN_IOS
, cc
->dmreq_start
+
984 sizeof(struct dm_crypt_request
) + cc
->iv_size
);
986 ti
->error
= "Cannot allocate crypt request mempool";
991 cc
->page_pool
= mempool_create_page_pool(MIN_POOL_PAGES
, 0);
992 if (!cc
->page_pool
) {
993 ti
->error
= "Cannot allocate page mempool";
997 cc
->bs
= bioset_create(MIN_IOS
, MIN_IOS
);
999 ti
->error
= "Cannot allocate crypt bioset";
1003 if (crypto_ablkcipher_setkey(tfm
, cc
->key
, key_size
) < 0) {
1004 ti
->error
= "Error setting key";
1008 if (sscanf(argv
[2], "%llu", &tmpll
) != 1) {
1009 ti
->error
= "Invalid iv_offset sector";
1012 cc
->iv_offset
= tmpll
;
1014 if (sscanf(argv
[4], "%llu", &tmpll
) != 1) {
1015 ti
->error
= "Invalid device sector";
1020 if (dm_get_device(ti
, argv
[3], cc
->start
, ti
->len
,
1021 dm_table_get_mode(ti
->table
), &cc
->dev
)) {
1022 ti
->error
= "Device lookup failed";
1026 if (ivmode
&& cc
->iv_gen_ops
) {
1028 *(ivopts
- 1) = ':';
1029 cc
->iv_mode
= kmalloc(strlen(ivmode
) + 1, GFP_KERNEL
);
1031 ti
->error
= "Error kmallocing iv_mode string";
1032 goto bad_ivmode_string
;
1034 strcpy(cc
->iv_mode
, ivmode
);
1038 cc
->io_queue
= create_singlethread_workqueue("kcryptd_io");
1039 if (!cc
->io_queue
) {
1040 ti
->error
= "Couldn't create kcryptd io queue";
1044 cc
->crypt_queue
= create_singlethread_workqueue("kcryptd");
1045 if (!cc
->crypt_queue
) {
1046 ti
->error
= "Couldn't create kcryptd queue";
1047 goto bad_crypt_queue
;
1054 destroy_workqueue(cc
->io_queue
);
1058 dm_put_device(ti
, cc
->dev
);
1060 bioset_free(cc
->bs
);
1062 mempool_destroy(cc
->page_pool
);
1064 mempool_destroy(cc
->req_pool
);
1066 mempool_destroy(cc
->io_pool
);
1068 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->dtr
)
1069 cc
->iv_gen_ops
->dtr(cc
);
1071 crypto_free_ablkcipher(tfm
);
1073 /* Must zero key material before freeing */
1074 memset(cc
, 0, sizeof(*cc
) + cc
->key_size
* sizeof(u8
));
1079 static void crypt_dtr(struct dm_target
*ti
)
1081 struct crypt_config
*cc
= (struct crypt_config
*) ti
->private;
1083 destroy_workqueue(cc
->io_queue
);
1084 destroy_workqueue(cc
->crypt_queue
);
1087 mempool_free(cc
->req
, cc
->req_pool
);
1089 bioset_free(cc
->bs
);
1090 mempool_destroy(cc
->page_pool
);
1091 mempool_destroy(cc
->req_pool
);
1092 mempool_destroy(cc
->io_pool
);
1095 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->dtr
)
1096 cc
->iv_gen_ops
->dtr(cc
);
1097 crypto_free_ablkcipher(cc
->tfm
);
1098 dm_put_device(ti
, cc
->dev
);
1100 /* Must zero key material before freeing */
1101 memset(cc
, 0, sizeof(*cc
) + cc
->key_size
* sizeof(u8
));
1105 static int crypt_map(struct dm_target
*ti
, struct bio
*bio
,
1106 union map_info
*map_context
)
1108 struct crypt_config
*cc
= ti
->private;
1109 struct dm_crypt_io
*io
;
1111 io
= mempool_alloc(cc
->io_pool
, GFP_NOIO
);
1114 io
->sector
= bio
->bi_sector
- ti
->begin
;
1116 atomic_set(&io
->pending
, 0);
1118 if (bio_data_dir(io
->base_bio
) == READ
)
1119 kcryptd_queue_io(io
);
1121 kcryptd_queue_crypt(io
);
1123 return DM_MAPIO_SUBMITTED
;
1126 static int crypt_status(struct dm_target
*ti
, status_type_t type
,
1127 char *result
, unsigned int maxlen
)
1129 struct crypt_config
*cc
= (struct crypt_config
*) ti
->private;
1130 unsigned int sz
= 0;
1133 case STATUSTYPE_INFO
:
1137 case STATUSTYPE_TABLE
:
1139 DMEMIT("%s-%s-%s ", cc
->cipher
, cc
->chainmode
,
1142 DMEMIT("%s-%s ", cc
->cipher
, cc
->chainmode
);
1144 if (cc
->key_size
> 0) {
1145 if ((maxlen
- sz
) < ((cc
->key_size
<< 1) + 1))
1148 crypt_encode_key(result
+ sz
, cc
->key
, cc
->key_size
);
1149 sz
+= cc
->key_size
<< 1;
1156 DMEMIT(" %llu %s %llu", (unsigned long long)cc
->iv_offset
,
1157 cc
->dev
->name
, (unsigned long long)cc
->start
);
1163 static void crypt_postsuspend(struct dm_target
*ti
)
1165 struct crypt_config
*cc
= ti
->private;
1167 set_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
1170 static int crypt_preresume(struct dm_target
*ti
)
1172 struct crypt_config
*cc
= ti
->private;
1174 if (!test_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
)) {
1175 DMERR("aborting resume - crypt key is not set.");
1182 static void crypt_resume(struct dm_target
*ti
)
1184 struct crypt_config
*cc
= ti
->private;
1186 clear_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
1189 /* Message interface
1193 static int crypt_message(struct dm_target
*ti
, unsigned argc
, char **argv
)
1195 struct crypt_config
*cc
= ti
->private;
1200 if (!strnicmp(argv
[0], MESG_STR("key"))) {
1201 if (!test_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
)) {
1202 DMWARN("not suspended during key manipulation.");
1205 if (argc
== 3 && !strnicmp(argv
[1], MESG_STR("set")))
1206 return crypt_set_key(cc
, argv
[2]);
1207 if (argc
== 2 && !strnicmp(argv
[1], MESG_STR("wipe")))
1208 return crypt_wipe_key(cc
);
1212 DMWARN("unrecognised message received.");
1216 static struct target_type crypt_target
= {
1218 .version
= {1, 5, 0},
1219 .module
= THIS_MODULE
,
1223 .status
= crypt_status
,
1224 .postsuspend
= crypt_postsuspend
,
1225 .preresume
= crypt_preresume
,
1226 .resume
= crypt_resume
,
1227 .message
= crypt_message
,
1230 static int __init
dm_crypt_init(void)
1234 _crypt_io_pool
= KMEM_CACHE(dm_crypt_io
, 0);
1235 if (!_crypt_io_pool
)
1238 r
= dm_register_target(&crypt_target
);
1240 DMERR("register failed %d", r
);
1241 kmem_cache_destroy(_crypt_io_pool
);
1247 static void __exit
dm_crypt_exit(void)
1249 int r
= dm_unregister_target(&crypt_target
);
1252 DMERR("unregister failed %d", r
);
1254 kmem_cache_destroy(_crypt_io_pool
);
1257 module_init(dm_crypt_init
);
1258 module_exit(dm_crypt_exit
);
1260 MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
1261 MODULE_DESCRIPTION(DM_NAME
" target for transparent encryption / decryption");
1262 MODULE_LICENSE("GPL");