2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
6 * This file is released under the GPL.
9 #include <linux/completion.h>
10 #include <linux/err.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/bio.h>
15 #include <linux/blkdev.h>
16 #include <linux/mempool.h>
17 #include <linux/slab.h>
18 #include <linux/crypto.h>
19 #include <linux/workqueue.h>
20 #include <linux/backing-dev.h>
21 #include <asm/atomic.h>
22 #include <linux/scatterlist.h>
24 #include <asm/unaligned.h>
26 #include <linux/device-mapper.h>
28 #define DM_MSG_PREFIX "crypt"
29 #define MESG_STR(x) x, sizeof(x)
32 * context holding the current state of a multi-part conversion
34 struct convert_context
{
35 struct completion restart
;
38 unsigned int offset_in
;
39 unsigned int offset_out
;
47 * per bio private data
50 struct dm_target
*target
;
52 struct work_struct work
;
54 struct convert_context ctx
;
59 struct dm_crypt_io
*base_io
;
62 struct dm_crypt_request
{
63 struct convert_context
*ctx
;
64 struct scatterlist sg_in
;
65 struct scatterlist sg_out
;
70 struct crypt_iv_operations
{
71 int (*ctr
)(struct crypt_config
*cc
, struct dm_target
*ti
,
73 void (*dtr
)(struct crypt_config
*cc
);
74 int (*init
)(struct crypt_config
*cc
);
75 int (*wipe
)(struct crypt_config
*cc
);
76 int (*generator
)(struct crypt_config
*cc
, u8
*iv
, sector_t sector
);
79 struct iv_essiv_private
{
80 struct crypto_cipher
*tfm
;
81 struct crypto_hash
*hash_tfm
;
85 struct iv_benbi_private
{
90 * Crypt: maps a linear range of a block device
91 * and encrypts / decrypts at the same time.
93 enum flags
{ DM_CRYPT_SUSPENDED
, DM_CRYPT_KEY_VALID
};
99 * pool for per bio private data, crypto requests and
100 * encryption requeusts/buffer pages
104 mempool_t
*page_pool
;
107 struct workqueue_struct
*io_queue
;
108 struct workqueue_struct
*crypt_queue
;
111 * crypto related data
113 struct crypt_iv_operations
*iv_gen_ops
;
116 struct iv_essiv_private essiv
;
117 struct iv_benbi_private benbi
;
120 unsigned int iv_size
;
123 * Layout of each crypto request:
125 * struct ablkcipher_request
128 * struct dm_crypt_request
132 * The padding is added so that dm_crypt_request and the IV are
135 unsigned int dmreq_start
;
136 struct ablkcipher_request
*req
;
138 char cipher
[CRYPTO_MAX_ALG_NAME
];
139 char chainmode
[CRYPTO_MAX_ALG_NAME
];
140 struct crypto_ablkcipher
*tfm
;
142 unsigned int key_size
;
147 #define MIN_POOL_PAGES 32
148 #define MIN_BIO_PAGES 8
150 static struct kmem_cache
*_crypt_io_pool
;
152 static void clone_init(struct dm_crypt_io
*, struct bio
*);
153 static void kcryptd_queue_crypt(struct dm_crypt_io
*io
);
156 * Different IV generation algorithms:
158 * plain: the initial vector is the 32-bit little-endian version of the sector
159 * number, padded with zeros if necessary.
161 * plain64: the initial vector is the 64-bit little-endian version of the sector
162 * number, padded with zeros if necessary.
164 * essiv: "encrypted sector|salt initial vector", the sector number is
165 * encrypted with the bulk cipher using a salt as key. The salt
166 * should be derived from the bulk cipher's key via hashing.
168 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
169 * (needed for LRW-32-AES and possible other narrow block modes)
171 * null: the initial vector is always zero. Provides compatibility with
172 * obsolete loop_fish2 devices. Do not use for new devices.
174 * plumb: unimplemented, see:
175 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
178 static int crypt_iv_plain_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
180 memset(iv
, 0, cc
->iv_size
);
181 *(u32
*)iv
= cpu_to_le32(sector
& 0xffffffff);
186 static int crypt_iv_plain64_gen(struct crypt_config
*cc
, u8
*iv
,
189 memset(iv
, 0, cc
->iv_size
);
190 *(u64
*)iv
= cpu_to_le64(sector
);
195 /* Initialise ESSIV - compute salt but no local memory allocations */
196 static int crypt_iv_essiv_init(struct crypt_config
*cc
)
198 struct iv_essiv_private
*essiv
= &cc
->iv_gen_private
.essiv
;
199 struct hash_desc desc
;
200 struct scatterlist sg
;
203 sg_init_one(&sg
, cc
->key
, cc
->key_size
);
204 desc
.tfm
= essiv
->hash_tfm
;
205 desc
.flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
207 err
= crypto_hash_digest(&desc
, &sg
, cc
->key_size
, essiv
->salt
);
211 return crypto_cipher_setkey(essiv
->tfm
, essiv
->salt
,
212 crypto_hash_digestsize(essiv
->hash_tfm
));
215 /* Wipe salt and reset key derived from volume key */
216 static int crypt_iv_essiv_wipe(struct crypt_config
*cc
)
218 struct iv_essiv_private
*essiv
= &cc
->iv_gen_private
.essiv
;
219 unsigned salt_size
= crypto_hash_digestsize(essiv
->hash_tfm
);
221 memset(essiv
->salt
, 0, salt_size
);
223 return crypto_cipher_setkey(essiv
->tfm
, essiv
->salt
, salt_size
);
226 static void crypt_iv_essiv_dtr(struct crypt_config
*cc
)
228 struct iv_essiv_private
*essiv
= &cc
->iv_gen_private
.essiv
;
230 crypto_free_cipher(essiv
->tfm
);
233 crypto_free_hash(essiv
->hash_tfm
);
234 essiv
->hash_tfm
= NULL
;
240 static int crypt_iv_essiv_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
243 struct crypto_cipher
*essiv_tfm
= NULL
;
244 struct crypto_hash
*hash_tfm
= NULL
;
249 ti
->error
= "Digest algorithm missing for ESSIV mode";
253 /* Allocate hash algorithm */
254 hash_tfm
= crypto_alloc_hash(opts
, 0, CRYPTO_ALG_ASYNC
);
255 if (IS_ERR(hash_tfm
)) {
256 ti
->error
= "Error initializing ESSIV hash";
257 err
= PTR_ERR(hash_tfm
);
261 salt
= kzalloc(crypto_hash_digestsize(hash_tfm
), GFP_KERNEL
);
263 ti
->error
= "Error kmallocing salt storage in ESSIV";
268 /* Allocate essiv_tfm */
269 essiv_tfm
= crypto_alloc_cipher(cc
->cipher
, 0, CRYPTO_ALG_ASYNC
);
270 if (IS_ERR(essiv_tfm
)) {
271 ti
->error
= "Error allocating crypto tfm for ESSIV";
272 err
= PTR_ERR(essiv_tfm
);
275 if (crypto_cipher_blocksize(essiv_tfm
) !=
276 crypto_ablkcipher_ivsize(cc
->tfm
)) {
277 ti
->error
= "Block size of ESSIV cipher does "
278 "not match IV size of block cipher";
283 cc
->iv_gen_private
.essiv
.salt
= salt
;
284 cc
->iv_gen_private
.essiv
.tfm
= essiv_tfm
;
285 cc
->iv_gen_private
.essiv
.hash_tfm
= hash_tfm
;
290 if (essiv_tfm
&& !IS_ERR(essiv_tfm
))
291 crypto_free_cipher(essiv_tfm
);
292 if (hash_tfm
&& !IS_ERR(hash_tfm
))
293 crypto_free_hash(hash_tfm
);
298 static int crypt_iv_essiv_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
300 memset(iv
, 0, cc
->iv_size
);
301 *(u64
*)iv
= cpu_to_le64(sector
);
302 crypto_cipher_encrypt_one(cc
->iv_gen_private
.essiv
.tfm
, iv
, iv
);
306 static int crypt_iv_benbi_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
309 unsigned bs
= crypto_ablkcipher_blocksize(cc
->tfm
);
312 /* we need to calculate how far we must shift the sector count
313 * to get the cipher block count, we use this shift in _gen */
315 if (1 << log
!= bs
) {
316 ti
->error
= "cypher blocksize is not a power of 2";
321 ti
->error
= "cypher blocksize is > 512";
325 cc
->iv_gen_private
.benbi
.shift
= 9 - log
;
330 static void crypt_iv_benbi_dtr(struct crypt_config
*cc
)
334 static int crypt_iv_benbi_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
338 memset(iv
, 0, cc
->iv_size
- sizeof(u64
)); /* rest is cleared below */
340 val
= cpu_to_be64(((u64
)sector
<< cc
->iv_gen_private
.benbi
.shift
) + 1);
341 put_unaligned(val
, (__be64
*)(iv
+ cc
->iv_size
- sizeof(u64
)));
346 static int crypt_iv_null_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
348 memset(iv
, 0, cc
->iv_size
);
353 static struct crypt_iv_operations crypt_iv_plain_ops
= {
354 .generator
= crypt_iv_plain_gen
357 static struct crypt_iv_operations crypt_iv_plain64_ops
= {
358 .generator
= crypt_iv_plain64_gen
361 static struct crypt_iv_operations crypt_iv_essiv_ops
= {
362 .ctr
= crypt_iv_essiv_ctr
,
363 .dtr
= crypt_iv_essiv_dtr
,
364 .init
= crypt_iv_essiv_init
,
365 .wipe
= crypt_iv_essiv_wipe
,
366 .generator
= crypt_iv_essiv_gen
369 static struct crypt_iv_operations crypt_iv_benbi_ops
= {
370 .ctr
= crypt_iv_benbi_ctr
,
371 .dtr
= crypt_iv_benbi_dtr
,
372 .generator
= crypt_iv_benbi_gen
375 static struct crypt_iv_operations crypt_iv_null_ops
= {
376 .generator
= crypt_iv_null_gen
379 static void crypt_convert_init(struct crypt_config
*cc
,
380 struct convert_context
*ctx
,
381 struct bio
*bio_out
, struct bio
*bio_in
,
384 ctx
->bio_in
= bio_in
;
385 ctx
->bio_out
= bio_out
;
388 ctx
->idx_in
= bio_in
? bio_in
->bi_idx
: 0;
389 ctx
->idx_out
= bio_out
? bio_out
->bi_idx
: 0;
390 ctx
->sector
= sector
+ cc
->iv_offset
;
391 init_completion(&ctx
->restart
);
394 static struct dm_crypt_request
*dmreq_of_req(struct crypt_config
*cc
,
395 struct ablkcipher_request
*req
)
397 return (struct dm_crypt_request
*)((char *)req
+ cc
->dmreq_start
);
400 static struct ablkcipher_request
*req_of_dmreq(struct crypt_config
*cc
,
401 struct dm_crypt_request
*dmreq
)
403 return (struct ablkcipher_request
*)((char *)dmreq
- cc
->dmreq_start
);
406 static int crypt_convert_block(struct crypt_config
*cc
,
407 struct convert_context
*ctx
,
408 struct ablkcipher_request
*req
)
410 struct bio_vec
*bv_in
= bio_iovec_idx(ctx
->bio_in
, ctx
->idx_in
);
411 struct bio_vec
*bv_out
= bio_iovec_idx(ctx
->bio_out
, ctx
->idx_out
);
412 struct dm_crypt_request
*dmreq
;
416 dmreq
= dmreq_of_req(cc
, req
);
417 iv
= (u8
*)ALIGN((unsigned long)(dmreq
+ 1),
418 crypto_ablkcipher_alignmask(cc
->tfm
) + 1);
421 sg_init_table(&dmreq
->sg_in
, 1);
422 sg_set_page(&dmreq
->sg_in
, bv_in
->bv_page
, 1 << SECTOR_SHIFT
,
423 bv_in
->bv_offset
+ ctx
->offset_in
);
425 sg_init_table(&dmreq
->sg_out
, 1);
426 sg_set_page(&dmreq
->sg_out
, bv_out
->bv_page
, 1 << SECTOR_SHIFT
,
427 bv_out
->bv_offset
+ ctx
->offset_out
);
429 ctx
->offset_in
+= 1 << SECTOR_SHIFT
;
430 if (ctx
->offset_in
>= bv_in
->bv_len
) {
435 ctx
->offset_out
+= 1 << SECTOR_SHIFT
;
436 if (ctx
->offset_out
>= bv_out
->bv_len
) {
441 if (cc
->iv_gen_ops
) {
442 r
= cc
->iv_gen_ops
->generator(cc
, iv
, ctx
->sector
);
447 ablkcipher_request_set_crypt(req
, &dmreq
->sg_in
, &dmreq
->sg_out
,
448 1 << SECTOR_SHIFT
, iv
);
450 if (bio_data_dir(ctx
->bio_in
) == WRITE
)
451 r
= crypto_ablkcipher_encrypt(req
);
453 r
= crypto_ablkcipher_decrypt(req
);
458 static void kcryptd_async_done(struct crypto_async_request
*async_req
,
460 static void crypt_alloc_req(struct crypt_config
*cc
,
461 struct convert_context
*ctx
)
464 cc
->req
= mempool_alloc(cc
->req_pool
, GFP_NOIO
);
465 ablkcipher_request_set_tfm(cc
->req
, cc
->tfm
);
466 ablkcipher_request_set_callback(cc
->req
, CRYPTO_TFM_REQ_MAY_BACKLOG
|
467 CRYPTO_TFM_REQ_MAY_SLEEP
,
469 dmreq_of_req(cc
, cc
->req
));
473 * Encrypt / decrypt data from one bio to another one (can be the same one)
475 static int crypt_convert(struct crypt_config
*cc
,
476 struct convert_context
*ctx
)
480 atomic_set(&ctx
->pending
, 1);
482 while(ctx
->idx_in
< ctx
->bio_in
->bi_vcnt
&&
483 ctx
->idx_out
< ctx
->bio_out
->bi_vcnt
) {
485 crypt_alloc_req(cc
, ctx
);
487 atomic_inc(&ctx
->pending
);
489 r
= crypt_convert_block(cc
, ctx
, cc
->req
);
494 wait_for_completion(&ctx
->restart
);
495 INIT_COMPLETION(ctx
->restart
);
504 atomic_dec(&ctx
->pending
);
511 atomic_dec(&ctx
->pending
);
519 static void dm_crypt_bio_destructor(struct bio
*bio
)
521 struct dm_crypt_io
*io
= bio
->bi_private
;
522 struct crypt_config
*cc
= io
->target
->private;
524 bio_free(bio
, cc
->bs
);
528 * Generate a new unfragmented bio with the given size
529 * This should never violate the device limitations
530 * May return a smaller bio when running out of pages, indicated by
531 * *out_of_pages set to 1.
533 static struct bio
*crypt_alloc_buffer(struct dm_crypt_io
*io
, unsigned size
,
534 unsigned *out_of_pages
)
536 struct crypt_config
*cc
= io
->target
->private;
538 unsigned int nr_iovecs
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
539 gfp_t gfp_mask
= GFP_NOIO
| __GFP_HIGHMEM
;
543 clone
= bio_alloc_bioset(GFP_NOIO
, nr_iovecs
, cc
->bs
);
547 clone_init(io
, clone
);
550 for (i
= 0; i
< nr_iovecs
; i
++) {
551 page
= mempool_alloc(cc
->page_pool
, gfp_mask
);
558 * if additional pages cannot be allocated without waiting,
559 * return a partially allocated bio, the caller will then try
560 * to allocate additional bios while submitting this partial bio
562 if (i
== (MIN_BIO_PAGES
- 1))
563 gfp_mask
= (gfp_mask
| __GFP_NOWARN
) & ~__GFP_WAIT
;
565 len
= (size
> PAGE_SIZE
) ? PAGE_SIZE
: size
;
567 if (!bio_add_page(clone
, page
, len
, 0)) {
568 mempool_free(page
, cc
->page_pool
);
575 if (!clone
->bi_size
) {
583 static void crypt_free_buffer_pages(struct crypt_config
*cc
, struct bio
*clone
)
588 for (i
= 0; i
< clone
->bi_vcnt
; i
++) {
589 bv
= bio_iovec_idx(clone
, i
);
590 BUG_ON(!bv
->bv_page
);
591 mempool_free(bv
->bv_page
, cc
->page_pool
);
596 static struct dm_crypt_io
*crypt_io_alloc(struct dm_target
*ti
,
597 struct bio
*bio
, sector_t sector
)
599 struct crypt_config
*cc
= ti
->private;
600 struct dm_crypt_io
*io
;
602 io
= mempool_alloc(cc
->io_pool
, GFP_NOIO
);
608 atomic_set(&io
->pending
, 0);
613 static void crypt_inc_pending(struct dm_crypt_io
*io
)
615 atomic_inc(&io
->pending
);
619 * One of the bios was finished. Check for completion of
620 * the whole request and correctly clean up the buffer.
621 * If base_io is set, wait for the last fragment to complete.
623 static void crypt_dec_pending(struct dm_crypt_io
*io
)
625 struct crypt_config
*cc
= io
->target
->private;
626 struct bio
*base_bio
= io
->base_bio
;
627 struct dm_crypt_io
*base_io
= io
->base_io
;
628 int error
= io
->error
;
630 if (!atomic_dec_and_test(&io
->pending
))
633 mempool_free(io
, cc
->io_pool
);
635 if (likely(!base_io
))
636 bio_endio(base_bio
, error
);
638 if (error
&& !base_io
->error
)
639 base_io
->error
= error
;
640 crypt_dec_pending(base_io
);
645 * kcryptd/kcryptd_io:
647 * Needed because it would be very unwise to do decryption in an
650 * kcryptd performs the actual encryption or decryption.
652 * kcryptd_io performs the IO submission.
654 * They must be separated as otherwise the final stages could be
655 * starved by new requests which can block in the first stages due
656 * to memory allocation.
658 static void crypt_endio(struct bio
*clone
, int error
)
660 struct dm_crypt_io
*io
= clone
->bi_private
;
661 struct crypt_config
*cc
= io
->target
->private;
662 unsigned rw
= bio_data_dir(clone
);
664 if (unlikely(!bio_flagged(clone
, BIO_UPTODATE
) && !error
))
668 * free the processed pages
671 crypt_free_buffer_pages(cc
, clone
);
675 if (rw
== READ
&& !error
) {
676 kcryptd_queue_crypt(io
);
683 crypt_dec_pending(io
);
686 static void clone_init(struct dm_crypt_io
*io
, struct bio
*clone
)
688 struct crypt_config
*cc
= io
->target
->private;
690 clone
->bi_private
= io
;
691 clone
->bi_end_io
= crypt_endio
;
692 clone
->bi_bdev
= cc
->dev
->bdev
;
693 clone
->bi_rw
= io
->base_bio
->bi_rw
;
694 clone
->bi_destructor
= dm_crypt_bio_destructor
;
697 static void kcryptd_io_read(struct dm_crypt_io
*io
)
699 struct crypt_config
*cc
= io
->target
->private;
700 struct bio
*base_bio
= io
->base_bio
;
703 crypt_inc_pending(io
);
706 * The block layer might modify the bvec array, so always
707 * copy the required bvecs because we need the original
708 * one in order to decrypt the whole bio data *afterwards*.
710 clone
= bio_alloc_bioset(GFP_NOIO
, bio_segments(base_bio
), cc
->bs
);
711 if (unlikely(!clone
)) {
713 crypt_dec_pending(io
);
717 clone_init(io
, clone
);
719 clone
->bi_vcnt
= bio_segments(base_bio
);
720 clone
->bi_size
= base_bio
->bi_size
;
721 clone
->bi_sector
= cc
->start
+ io
->sector
;
722 memcpy(clone
->bi_io_vec
, bio_iovec(base_bio
),
723 sizeof(struct bio_vec
) * clone
->bi_vcnt
);
725 generic_make_request(clone
);
728 static void kcryptd_io_write(struct dm_crypt_io
*io
)
730 struct bio
*clone
= io
->ctx
.bio_out
;
731 generic_make_request(clone
);
734 static void kcryptd_io(struct work_struct
*work
)
736 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
738 if (bio_data_dir(io
->base_bio
) == READ
)
741 kcryptd_io_write(io
);
744 static void kcryptd_queue_io(struct dm_crypt_io
*io
)
746 struct crypt_config
*cc
= io
->target
->private;
748 INIT_WORK(&io
->work
, kcryptd_io
);
749 queue_work(cc
->io_queue
, &io
->work
);
752 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io
*io
,
753 int error
, int async
)
755 struct bio
*clone
= io
->ctx
.bio_out
;
756 struct crypt_config
*cc
= io
->target
->private;
758 if (unlikely(error
< 0)) {
759 crypt_free_buffer_pages(cc
, clone
);
762 crypt_dec_pending(io
);
766 /* crypt_convert should have filled the clone bio */
767 BUG_ON(io
->ctx
.idx_out
< clone
->bi_vcnt
);
769 clone
->bi_sector
= cc
->start
+ io
->sector
;
772 kcryptd_queue_io(io
);
774 generic_make_request(clone
);
777 static void kcryptd_crypt_write_convert(struct dm_crypt_io
*io
)
779 struct crypt_config
*cc
= io
->target
->private;
781 struct dm_crypt_io
*new_io
;
783 unsigned out_of_pages
= 0;
784 unsigned remaining
= io
->base_bio
->bi_size
;
785 sector_t sector
= io
->sector
;
789 * Prevent io from disappearing until this function completes.
791 crypt_inc_pending(io
);
792 crypt_convert_init(cc
, &io
->ctx
, NULL
, io
->base_bio
, sector
);
795 * The allocated buffers can be smaller than the whole bio,
796 * so repeat the whole process until all the data can be handled.
799 clone
= crypt_alloc_buffer(io
, remaining
, &out_of_pages
);
800 if (unlikely(!clone
)) {
805 io
->ctx
.bio_out
= clone
;
808 remaining
-= clone
->bi_size
;
809 sector
+= bio_sectors(clone
);
811 crypt_inc_pending(io
);
812 r
= crypt_convert(cc
, &io
->ctx
);
813 crypt_finished
= atomic_dec_and_test(&io
->ctx
.pending
);
815 /* Encryption was already finished, submit io now */
816 if (crypt_finished
) {
817 kcryptd_crypt_write_io_submit(io
, r
, 0);
820 * If there was an error, do not try next fragments.
821 * For async, error is processed in async handler.
830 * Out of memory -> run queues
831 * But don't wait if split was due to the io size restriction
833 if (unlikely(out_of_pages
))
834 congestion_wait(BLK_RW_ASYNC
, HZ
/100);
837 * With async crypto it is unsafe to share the crypto context
838 * between fragments, so switch to a new dm_crypt_io structure.
840 if (unlikely(!crypt_finished
&& remaining
)) {
841 new_io
= crypt_io_alloc(io
->target
, io
->base_bio
,
843 crypt_inc_pending(new_io
);
844 crypt_convert_init(cc
, &new_io
->ctx
, NULL
,
845 io
->base_bio
, sector
);
846 new_io
->ctx
.idx_in
= io
->ctx
.idx_in
;
847 new_io
->ctx
.offset_in
= io
->ctx
.offset_in
;
850 * Fragments after the first use the base_io
854 new_io
->base_io
= io
;
856 new_io
->base_io
= io
->base_io
;
857 crypt_inc_pending(io
->base_io
);
858 crypt_dec_pending(io
);
865 crypt_dec_pending(io
);
868 static void kcryptd_crypt_read_done(struct dm_crypt_io
*io
, int error
)
870 if (unlikely(error
< 0))
873 crypt_dec_pending(io
);
876 static void kcryptd_crypt_read_convert(struct dm_crypt_io
*io
)
878 struct crypt_config
*cc
= io
->target
->private;
881 crypt_inc_pending(io
);
883 crypt_convert_init(cc
, &io
->ctx
, io
->base_bio
, io
->base_bio
,
886 r
= crypt_convert(cc
, &io
->ctx
);
888 if (atomic_dec_and_test(&io
->ctx
.pending
))
889 kcryptd_crypt_read_done(io
, r
);
891 crypt_dec_pending(io
);
894 static void kcryptd_async_done(struct crypto_async_request
*async_req
,
897 struct dm_crypt_request
*dmreq
= async_req
->data
;
898 struct convert_context
*ctx
= dmreq
->ctx
;
899 struct dm_crypt_io
*io
= container_of(ctx
, struct dm_crypt_io
, ctx
);
900 struct crypt_config
*cc
= io
->target
->private;
902 if (error
== -EINPROGRESS
) {
903 complete(&ctx
->restart
);
907 mempool_free(req_of_dmreq(cc
, dmreq
), cc
->req_pool
);
909 if (!atomic_dec_and_test(&ctx
->pending
))
912 if (bio_data_dir(io
->base_bio
) == READ
)
913 kcryptd_crypt_read_done(io
, error
);
915 kcryptd_crypt_write_io_submit(io
, error
, 1);
918 static void kcryptd_crypt(struct work_struct
*work
)
920 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
922 if (bio_data_dir(io
->base_bio
) == READ
)
923 kcryptd_crypt_read_convert(io
);
925 kcryptd_crypt_write_convert(io
);
928 static void kcryptd_queue_crypt(struct dm_crypt_io
*io
)
930 struct crypt_config
*cc
= io
->target
->private;
932 INIT_WORK(&io
->work
, kcryptd_crypt
);
933 queue_work(cc
->crypt_queue
, &io
->work
);
937 * Decode key from its hex representation
939 static int crypt_decode_key(u8
*key
, char *hex
, unsigned int size
)
947 for (i
= 0; i
< size
; i
++) {
951 key
[i
] = (u8
)simple_strtoul(buffer
, &endp
, 16);
953 if (endp
!= &buffer
[2])
964 * Encode key into its hex representation
966 static void crypt_encode_key(char *hex
, u8
*key
, unsigned int size
)
970 for (i
= 0; i
< size
; i
++) {
971 sprintf(hex
, "%02x", *key
);
977 static int crypt_set_key(struct crypt_config
*cc
, char *key
)
979 unsigned key_size
= strlen(key
) >> 1;
981 if (cc
->key_size
&& cc
->key_size
!= key_size
)
984 cc
->key_size
= key_size
; /* initial settings */
986 if ((!key_size
&& strcmp(key
, "-")) ||
987 (key_size
&& crypt_decode_key(cc
->key
, key
, key_size
) < 0))
990 set_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
992 return crypto_ablkcipher_setkey(cc
->tfm
, cc
->key
, cc
->key_size
);
995 static int crypt_wipe_key(struct crypt_config
*cc
)
997 clear_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
998 memset(&cc
->key
, 0, cc
->key_size
* sizeof(u8
));
999 return crypto_ablkcipher_setkey(cc
->tfm
, cc
->key
, cc
->key_size
);
1003 * Construct an encryption mapping:
1004 * <cipher> <key> <iv_offset> <dev_path> <start>
1006 static int crypt_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
1008 struct crypt_config
*cc
;
1009 struct crypto_ablkcipher
*tfm
;
1015 unsigned int key_size
;
1016 unsigned long long tmpll
;
1019 ti
->error
= "Not enough arguments";
1024 cipher
= strsep(&tmp
, "-");
1025 chainmode
= strsep(&tmp
, "-");
1026 ivopts
= strsep(&tmp
, "-");
1027 ivmode
= strsep(&ivopts
, ":");
1030 DMWARN("Unexpected additional cipher options");
1032 key_size
= strlen(argv
[1]) >> 1;
1034 cc
= kzalloc(sizeof(*cc
) + key_size
* sizeof(u8
), GFP_KERNEL
);
1037 "Cannot allocate transparent encryption context";
1041 /* Compatibility mode for old dm-crypt cipher strings */
1042 if (!chainmode
|| (strcmp(chainmode
, "plain") == 0 && !ivmode
)) {
1047 if (strcmp(chainmode
, "ecb") && !ivmode
) {
1048 ti
->error
= "This chaining mode requires an IV mechanism";
1052 if (snprintf(cc
->cipher
, CRYPTO_MAX_ALG_NAME
, "%s(%s)",
1053 chainmode
, cipher
) >= CRYPTO_MAX_ALG_NAME
) {
1054 ti
->error
= "Chain mode + cipher name is too long";
1058 tfm
= crypto_alloc_ablkcipher(cc
->cipher
, 0, 0);
1060 ti
->error
= "Error allocating crypto tfm";
1064 strcpy(cc
->cipher
, cipher
);
1065 strcpy(cc
->chainmode
, chainmode
);
1068 if (crypt_set_key(cc
, argv
[1]) < 0) {
1069 ti
->error
= "Error decoding and setting key";
1074 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
1075 * See comments at iv code
1079 cc
->iv_gen_ops
= NULL
;
1080 else if (strcmp(ivmode
, "plain") == 0)
1081 cc
->iv_gen_ops
= &crypt_iv_plain_ops
;
1082 else if (strcmp(ivmode
, "plain64") == 0)
1083 cc
->iv_gen_ops
= &crypt_iv_plain64_ops
;
1084 else if (strcmp(ivmode
, "essiv") == 0)
1085 cc
->iv_gen_ops
= &crypt_iv_essiv_ops
;
1086 else if (strcmp(ivmode
, "benbi") == 0)
1087 cc
->iv_gen_ops
= &crypt_iv_benbi_ops
;
1088 else if (strcmp(ivmode
, "null") == 0)
1089 cc
->iv_gen_ops
= &crypt_iv_null_ops
;
1091 ti
->error
= "Invalid IV mode";
1095 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->ctr
&&
1096 cc
->iv_gen_ops
->ctr(cc
, ti
, ivopts
) < 0)
1099 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->init
&&
1100 cc
->iv_gen_ops
->init(cc
) < 0) {
1101 ti
->error
= "Error initialising IV";
1105 cc
->iv_size
= crypto_ablkcipher_ivsize(tfm
);
1107 /* at least a 64 bit sector number should fit in our buffer */
1108 cc
->iv_size
= max(cc
->iv_size
,
1109 (unsigned int)(sizeof(u64
) / sizeof(u8
)));
1111 if (cc
->iv_gen_ops
) {
1112 DMWARN("Selected cipher does not support IVs");
1113 if (cc
->iv_gen_ops
->dtr
)
1114 cc
->iv_gen_ops
->dtr(cc
);
1115 cc
->iv_gen_ops
= NULL
;
1119 cc
->io_pool
= mempool_create_slab_pool(MIN_IOS
, _crypt_io_pool
);
1121 ti
->error
= "Cannot allocate crypt io mempool";
1125 cc
->dmreq_start
= sizeof(struct ablkcipher_request
);
1126 cc
->dmreq_start
+= crypto_ablkcipher_reqsize(tfm
);
1127 cc
->dmreq_start
= ALIGN(cc
->dmreq_start
, crypto_tfm_ctx_alignment());
1128 cc
->dmreq_start
+= crypto_ablkcipher_alignmask(tfm
) &
1129 ~(crypto_tfm_ctx_alignment() - 1);
1131 cc
->req_pool
= mempool_create_kmalloc_pool(MIN_IOS
, cc
->dmreq_start
+
1132 sizeof(struct dm_crypt_request
) + cc
->iv_size
);
1133 if (!cc
->req_pool
) {
1134 ti
->error
= "Cannot allocate crypt request mempool";
1139 cc
->page_pool
= mempool_create_page_pool(MIN_POOL_PAGES
, 0);
1140 if (!cc
->page_pool
) {
1141 ti
->error
= "Cannot allocate page mempool";
1145 cc
->bs
= bioset_create(MIN_IOS
, 0);
1147 ti
->error
= "Cannot allocate crypt bioset";
1151 if (sscanf(argv
[2], "%llu", &tmpll
) != 1) {
1152 ti
->error
= "Invalid iv_offset sector";
1155 cc
->iv_offset
= tmpll
;
1157 if (sscanf(argv
[4], "%llu", &tmpll
) != 1) {
1158 ti
->error
= "Invalid device sector";
1163 if (dm_get_device(ti
, argv
[3], dm_table_get_mode(ti
->table
), &cc
->dev
)) {
1164 ti
->error
= "Device lookup failed";
1168 if (ivmode
&& cc
->iv_gen_ops
) {
1170 *(ivopts
- 1) = ':';
1171 cc
->iv_mode
= kmalloc(strlen(ivmode
) + 1, GFP_KERNEL
);
1173 ti
->error
= "Error kmallocing iv_mode string";
1174 goto bad_ivmode_string
;
1176 strcpy(cc
->iv_mode
, ivmode
);
1180 cc
->io_queue
= create_singlethread_workqueue("kcryptd_io");
1181 if (!cc
->io_queue
) {
1182 ti
->error
= "Couldn't create kcryptd io queue";
1186 cc
->crypt_queue
= create_singlethread_workqueue("kcryptd");
1187 if (!cc
->crypt_queue
) {
1188 ti
->error
= "Couldn't create kcryptd queue";
1189 goto bad_crypt_queue
;
1192 ti
->num_flush_requests
= 1;
1197 destroy_workqueue(cc
->io_queue
);
1201 dm_put_device(ti
, cc
->dev
);
1203 bioset_free(cc
->bs
);
1205 mempool_destroy(cc
->page_pool
);
1207 mempool_destroy(cc
->req_pool
);
1209 mempool_destroy(cc
->io_pool
);
1211 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->dtr
)
1212 cc
->iv_gen_ops
->dtr(cc
);
1214 crypto_free_ablkcipher(tfm
);
1216 /* Must zero key material before freeing */
1221 static void crypt_dtr(struct dm_target
*ti
)
1223 struct crypt_config
*cc
= (struct crypt_config
*) ti
->private;
1225 destroy_workqueue(cc
->io_queue
);
1226 destroy_workqueue(cc
->crypt_queue
);
1229 mempool_free(cc
->req
, cc
->req_pool
);
1231 bioset_free(cc
->bs
);
1232 mempool_destroy(cc
->page_pool
);
1233 mempool_destroy(cc
->req_pool
);
1234 mempool_destroy(cc
->io_pool
);
1237 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->dtr
)
1238 cc
->iv_gen_ops
->dtr(cc
);
1239 crypto_free_ablkcipher(cc
->tfm
);
1240 dm_put_device(ti
, cc
->dev
);
1242 /* Must zero key material before freeing */
1246 static int crypt_map(struct dm_target
*ti
, struct bio
*bio
,
1247 union map_info
*map_context
)
1249 struct dm_crypt_io
*io
;
1250 struct crypt_config
*cc
;
1252 if (unlikely(bio_empty_barrier(bio
))) {
1254 bio
->bi_bdev
= cc
->dev
->bdev
;
1255 return DM_MAPIO_REMAPPED
;
1258 io
= crypt_io_alloc(ti
, bio
, bio
->bi_sector
- ti
->begin
);
1260 if (bio_data_dir(io
->base_bio
) == READ
)
1261 kcryptd_queue_io(io
);
1263 kcryptd_queue_crypt(io
);
1265 return DM_MAPIO_SUBMITTED
;
1268 static int crypt_status(struct dm_target
*ti
, status_type_t type
,
1269 char *result
, unsigned int maxlen
)
1271 struct crypt_config
*cc
= (struct crypt_config
*) ti
->private;
1272 unsigned int sz
= 0;
1275 case STATUSTYPE_INFO
:
1279 case STATUSTYPE_TABLE
:
1281 DMEMIT("%s-%s-%s ", cc
->cipher
, cc
->chainmode
,
1284 DMEMIT("%s-%s ", cc
->cipher
, cc
->chainmode
);
1286 if (cc
->key_size
> 0) {
1287 if ((maxlen
- sz
) < ((cc
->key_size
<< 1) + 1))
1290 crypt_encode_key(result
+ sz
, cc
->key
, cc
->key_size
);
1291 sz
+= cc
->key_size
<< 1;
1298 DMEMIT(" %llu %s %llu", (unsigned long long)cc
->iv_offset
,
1299 cc
->dev
->name
, (unsigned long long)cc
->start
);
1305 static void crypt_postsuspend(struct dm_target
*ti
)
1307 struct crypt_config
*cc
= ti
->private;
1309 set_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
1312 static int crypt_preresume(struct dm_target
*ti
)
1314 struct crypt_config
*cc
= ti
->private;
1316 if (!test_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
)) {
1317 DMERR("aborting resume - crypt key is not set.");
1324 static void crypt_resume(struct dm_target
*ti
)
1326 struct crypt_config
*cc
= ti
->private;
1328 clear_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
1331 /* Message interface
1335 static int crypt_message(struct dm_target
*ti
, unsigned argc
, char **argv
)
1337 struct crypt_config
*cc
= ti
->private;
1343 if (!strnicmp(argv
[0], MESG_STR("key"))) {
1344 if (!test_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
)) {
1345 DMWARN("not suspended during key manipulation.");
1348 if (argc
== 3 && !strnicmp(argv
[1], MESG_STR("set"))) {
1349 ret
= crypt_set_key(cc
, argv
[2]);
1352 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->init
)
1353 ret
= cc
->iv_gen_ops
->init(cc
);
1356 if (argc
== 2 && !strnicmp(argv
[1], MESG_STR("wipe"))) {
1357 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->wipe
) {
1358 ret
= cc
->iv_gen_ops
->wipe(cc
);
1362 return crypt_wipe_key(cc
);
1367 DMWARN("unrecognised message received.");
1371 static int crypt_merge(struct dm_target
*ti
, struct bvec_merge_data
*bvm
,
1372 struct bio_vec
*biovec
, int max_size
)
1374 struct crypt_config
*cc
= ti
->private;
1375 struct request_queue
*q
= bdev_get_queue(cc
->dev
->bdev
);
1377 if (!q
->merge_bvec_fn
)
1380 bvm
->bi_bdev
= cc
->dev
->bdev
;
1381 bvm
->bi_sector
= cc
->start
+ bvm
->bi_sector
- ti
->begin
;
1383 return min(max_size
, q
->merge_bvec_fn(q
, bvm
, biovec
));
1386 static int crypt_iterate_devices(struct dm_target
*ti
,
1387 iterate_devices_callout_fn fn
, void *data
)
1389 struct crypt_config
*cc
= ti
->private;
1391 return fn(ti
, cc
->dev
, cc
->start
, ti
->len
, data
);
1394 static struct target_type crypt_target
= {
1396 .version
= {1, 7, 0},
1397 .module
= THIS_MODULE
,
1401 .status
= crypt_status
,
1402 .postsuspend
= crypt_postsuspend
,
1403 .preresume
= crypt_preresume
,
1404 .resume
= crypt_resume
,
1405 .message
= crypt_message
,
1406 .merge
= crypt_merge
,
1407 .iterate_devices
= crypt_iterate_devices
,
1410 static int __init
dm_crypt_init(void)
1414 _crypt_io_pool
= KMEM_CACHE(dm_crypt_io
, 0);
1415 if (!_crypt_io_pool
)
1418 r
= dm_register_target(&crypt_target
);
1420 DMERR("register failed %d", r
);
1421 kmem_cache_destroy(_crypt_io_pool
);
1427 static void __exit
dm_crypt_exit(void)
1429 dm_unregister_target(&crypt_target
);
1430 kmem_cache_destroy(_crypt_io_pool
);
1433 module_init(dm_crypt_init
);
1434 module_exit(dm_crypt_exit
);
1436 MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
1437 MODULE_DESCRIPTION(DM_NAME
" target for transparent encryption / decryption");
1438 MODULE_LICENSE("GPL");