4 * Support for VIA PadLock hardware crypto engine.
6 * Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
15 #include <crypto/algapi.h>
16 #include <linux/err.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/errno.h>
20 #include <linux/cryptohash.h>
21 #include <linux/interrupt.h>
22 #include <linux/kernel.h>
23 #include <linux/scatterlist.h>
26 #define SHA1_DEFAULT_FALLBACK "sha1-generic"
27 #define SHA1_DIGEST_SIZE 20
28 #define SHA1_HMAC_BLOCK_SIZE 64
30 #define SHA256_DEFAULT_FALLBACK "sha256-generic"
31 #define SHA256_DIGEST_SIZE 32
32 #define SHA256_HMAC_BLOCK_SIZE 64
34 struct padlock_sha_ctx
{
38 void (*f_sha_padlock
)(const char *in
, char *out
, int count
);
39 struct hash_desc fallback
;
42 static inline struct padlock_sha_ctx
*ctx(struct crypto_tfm
*tfm
)
44 return crypto_tfm_ctx(tfm
);
47 /* We'll need aligned address on the stack */
48 #define NEAREST_ALIGNED(ptr) \
49 ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT))
51 static struct crypto_alg sha1_alg
, sha256_alg
;
53 static void padlock_sha_bypass(struct crypto_tfm
*tfm
)
58 crypto_hash_init(&ctx(tfm
)->fallback
);
59 if (ctx(tfm
)->data
&& ctx(tfm
)->used
) {
60 struct scatterlist sg
;
62 sg_set_buf(&sg
, ctx(tfm
)->data
, ctx(tfm
)->used
);
63 crypto_hash_update(&ctx(tfm
)->fallback
, &sg
, sg
.length
);
70 static void padlock_sha_init(struct crypto_tfm
*tfm
)
76 static void padlock_sha_update(struct crypto_tfm
*tfm
,
77 const uint8_t *data
, unsigned int length
)
79 /* Our buffer is always one page. */
80 if (unlikely(!ctx(tfm
)->bypass
&&
81 (ctx(tfm
)->used
+ length
> PAGE_SIZE
)))
82 padlock_sha_bypass(tfm
);
84 if (unlikely(ctx(tfm
)->bypass
)) {
85 struct scatterlist sg
;
86 sg_set_buf(&sg
, (uint8_t *)data
, length
);
87 crypto_hash_update(&ctx(tfm
)->fallback
, &sg
, length
);
91 memcpy(ctx(tfm
)->data
+ ctx(tfm
)->used
, data
, length
);
92 ctx(tfm
)->used
+= length
;
95 static inline void padlock_output_block(uint32_t *src
,
96 uint32_t *dst
, size_t count
)
99 *dst
++ = swab32(*src
++);
102 static void padlock_do_sha1(const char *in
, char *out
, int count
)
104 /* We can't store directly to *out as it may be unaligned. */
105 /* BTW Don't reduce the buffer size below 128 Bytes!
106 * PadLock microcode needs it that big. */
108 char *result
= NEAREST_ALIGNED(buf
);
110 ((uint32_t *)result
)[0] = 0x67452301;
111 ((uint32_t *)result
)[1] = 0xEFCDAB89;
112 ((uint32_t *)result
)[2] = 0x98BADCFE;
113 ((uint32_t *)result
)[3] = 0x10325476;
114 ((uint32_t *)result
)[4] = 0xC3D2E1F0;
116 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
117 : "+S"(in
), "+D"(result
)
118 : "c"(count
), "a"(0));
120 padlock_output_block((uint32_t *)result
, (uint32_t *)out
, 5);
123 static void padlock_do_sha256(const char *in
, char *out
, int count
)
125 /* We can't store directly to *out as it may be unaligned. */
126 /* BTW Don't reduce the buffer size below 128 Bytes!
127 * PadLock microcode needs it that big. */
129 char *result
= NEAREST_ALIGNED(buf
);
131 ((uint32_t *)result
)[0] = 0x6A09E667;
132 ((uint32_t *)result
)[1] = 0xBB67AE85;
133 ((uint32_t *)result
)[2] = 0x3C6EF372;
134 ((uint32_t *)result
)[3] = 0xA54FF53A;
135 ((uint32_t *)result
)[4] = 0x510E527F;
136 ((uint32_t *)result
)[5] = 0x9B05688C;
137 ((uint32_t *)result
)[6] = 0x1F83D9AB;
138 ((uint32_t *)result
)[7] = 0x5BE0CD19;
140 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
141 : "+S"(in
), "+D"(result
)
142 : "c"(count
), "a"(0));
144 padlock_output_block((uint32_t *)result
, (uint32_t *)out
, 8);
147 static void padlock_sha_final(struct crypto_tfm
*tfm
, uint8_t *out
)
149 if (unlikely(ctx(tfm
)->bypass
)) {
150 crypto_hash_final(&ctx(tfm
)->fallback
, out
);
151 ctx(tfm
)->bypass
= 0;
155 /* Pass the input buffer to PadLock microcode... */
156 ctx(tfm
)->f_sha_padlock(ctx(tfm
)->data
, out
, ctx(tfm
)->used
);
161 static int padlock_cra_init(struct crypto_tfm
*tfm
)
163 const char *fallback_driver_name
= tfm
->__crt_alg
->cra_name
;
164 struct crypto_hash
*fallback_tfm
;
166 /* For now we'll allocate one page. This
167 * could eventually be configurable one day. */
168 ctx(tfm
)->data
= (char *)__get_free_page(GFP_KERNEL
);
172 /* Allocate a fallback and abort if it failed. */
173 fallback_tfm
= crypto_alloc_hash(fallback_driver_name
, 0,
175 CRYPTO_ALG_NEED_FALLBACK
);
176 if (IS_ERR(fallback_tfm
)) {
177 printk(KERN_WARNING PFX
"Fallback driver '%s' could not be loaded!\n",
178 fallback_driver_name
);
179 free_page((unsigned long)(ctx(tfm
)->data
));
180 return PTR_ERR(fallback_tfm
);
183 ctx(tfm
)->fallback
.tfm
= fallback_tfm
;
187 static int padlock_sha1_cra_init(struct crypto_tfm
*tfm
)
189 ctx(tfm
)->f_sha_padlock
= padlock_do_sha1
;
191 return padlock_cra_init(tfm
);
194 static int padlock_sha256_cra_init(struct crypto_tfm
*tfm
)
196 ctx(tfm
)->f_sha_padlock
= padlock_do_sha256
;
198 return padlock_cra_init(tfm
);
201 static void padlock_cra_exit(struct crypto_tfm
*tfm
)
203 if (ctx(tfm
)->data
) {
204 free_page((unsigned long)(ctx(tfm
)->data
));
205 ctx(tfm
)->data
= NULL
;
208 crypto_free_hash(ctx(tfm
)->fallback
.tfm
);
209 ctx(tfm
)->fallback
.tfm
= NULL
;
212 static struct crypto_alg sha1_alg
= {
214 .cra_driver_name
= "sha1-padlock",
215 .cra_priority
= PADLOCK_CRA_PRIORITY
,
216 .cra_flags
= CRYPTO_ALG_TYPE_DIGEST
|
217 CRYPTO_ALG_NEED_FALLBACK
,
218 .cra_blocksize
= SHA1_HMAC_BLOCK_SIZE
,
219 .cra_ctxsize
= sizeof(struct padlock_sha_ctx
),
220 .cra_module
= THIS_MODULE
,
221 .cra_list
= LIST_HEAD_INIT(sha1_alg
.cra_list
),
222 .cra_init
= padlock_sha1_cra_init
,
223 .cra_exit
= padlock_cra_exit
,
226 .dia_digestsize
= SHA1_DIGEST_SIZE
,
227 .dia_init
= padlock_sha_init
,
228 .dia_update
= padlock_sha_update
,
229 .dia_final
= padlock_sha_final
,
234 static struct crypto_alg sha256_alg
= {
235 .cra_name
= "sha256",
236 .cra_driver_name
= "sha256-padlock",
237 .cra_priority
= PADLOCK_CRA_PRIORITY
,
238 .cra_flags
= CRYPTO_ALG_TYPE_DIGEST
|
239 CRYPTO_ALG_NEED_FALLBACK
,
240 .cra_blocksize
= SHA256_HMAC_BLOCK_SIZE
,
241 .cra_ctxsize
= sizeof(struct padlock_sha_ctx
),
242 .cra_module
= THIS_MODULE
,
243 .cra_list
= LIST_HEAD_INIT(sha256_alg
.cra_list
),
244 .cra_init
= padlock_sha256_cra_init
,
245 .cra_exit
= padlock_cra_exit
,
248 .dia_digestsize
= SHA256_DIGEST_SIZE
,
249 .dia_init
= padlock_sha_init
,
250 .dia_update
= padlock_sha_update
,
251 .dia_final
= padlock_sha_final
,
256 static void __init
padlock_sha_check_fallbacks(void)
258 if (!crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC
|
259 CRYPTO_ALG_NEED_FALLBACK
))
260 printk(KERN_WARNING PFX
261 "Couldn't load fallback module for sha1.\n");
263 if (!crypto_has_hash("sha256", 0, CRYPTO_ALG_ASYNC
|
264 CRYPTO_ALG_NEED_FALLBACK
))
265 printk(KERN_WARNING PFX
266 "Couldn't load fallback module for sha256.\n");
269 static int __init
padlock_init(void)
274 printk(KERN_ERR PFX
"VIA PadLock Hash Engine not detected.\n");
278 if (!cpu_has_phe_enabled
) {
279 printk(KERN_ERR PFX
"VIA PadLock detected, but not enabled. Hmm, strange...\n");
283 padlock_sha_check_fallbacks();
285 rc
= crypto_register_alg(&sha1_alg
);
289 rc
= crypto_register_alg(&sha256_alg
);
293 printk(KERN_NOTICE PFX
"Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
298 crypto_unregister_alg(&sha1_alg
);
300 printk(KERN_ERR PFX
"VIA PadLock SHA1/SHA256 initialization failed.\n");
304 static void __exit
padlock_fini(void)
306 crypto_unregister_alg(&sha1_alg
);
307 crypto_unregister_alg(&sha256_alg
);
310 module_init(padlock_init
);
311 module_exit(padlock_fini
);
313 MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
314 MODULE_LICENSE("GPL");
315 MODULE_AUTHOR("Michal Ludvig");
317 MODULE_ALIAS("sha1-padlock");
318 MODULE_ALIAS("sha256-padlock");