GRUB-1.98 changes
[grub2/jjazz.git] / lib / libgcrypt-grub / cipher / rijndael.c
blobe2746105819623bdeb969a34618a85237981f604
1 /* This file was automatically imported with
2 import_gcry.py. Please don't modify it */
3 /* Rijndael (AES) for GnuPG
4 * Copyright (C) 2000, 2001, 2002, 2003, 2007,
5 * 2008 Free Software Foundation, Inc.
7 * This file is part of Libgcrypt.
9 * Libgcrypt is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as
11 * published by the Free Software Foundation; either version 2.1 of
12 * the License, or (at your option) any later version.
14 * Libgcrypt is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
21 *******************************************************************
22 * The code here is based on the optimized implementation taken from
23 * http://www.esat.kuleuven.ac.be/~rijmen/rijndael/ on Oct 2, 2000,
24 * which carries this notice:
25 *------------------------------------------
26 * rijndael-alg-fst.c v2.3 April '2000
28 * Optimised ANSI C code
30 * authors: v1.0: Antoon Bosselaers
31 * v2.0: Vincent Rijmen
32 * v2.3: Paulo Barreto
34 * This code is placed in the public domain.
35 *------------------------------------------
37 * The SP800-38a document is available at:
38 * http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
43 #include "types.h" /* for byte and u32 typedefs */
44 #include "g10lib.h"
45 #include "cipher.h"
47 #define MAXKC (256/32)
48 #define MAXROUNDS 14
49 #define BLOCKSIZE (128/8)
52 /* USE_PADLOCK indicates whether to compile the padlock specific
53 code. */
54 #undef USE_PADLOCK
55 #ifdef ENABLE_PADLOCK_SUPPORT
56 # if defined (__i386__) && SIZEOF_UNSIGNED_LONG == 4 && defined (__GNUC__)
57 # define USE_PADLOCK
58 # endif
59 #endif /*ENABLE_PADLOCK_SUPPORT*/
62 typedef struct
64 int ROUNDS; /* Key-length-dependent number of rounds. */
65 int decryption_prepared; /* The decryption key schedule is available. */
66 #ifdef USE_PADLOCK
67 int use_padlock; /* Padlock shall be used. */
68 /* The key as passed to the padlock engine. */
69 unsigned char padlock_key[16] __attribute__ ((aligned (16)));
70 #endif
71 union
73 PROPERLY_ALIGNED_TYPE dummy;
74 byte keyschedule[MAXROUNDS+1][4][4];
75 } u1;
76 union
78 PROPERLY_ALIGNED_TYPE dummy;
79 byte keyschedule[MAXROUNDS+1][4][4];
80 } u2;
81 } RIJNDAEL_context;
83 #define keySched u1.keyschedule
84 #define keySched2 u2.keyschedule
86 /* All the numbers. */
87 #include "rijndael-tables.h"
90 /* Perform the key setup. */
91 static gcry_err_code_t
92 do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
94 static int initialized = 0;
95 static const char *selftest_failed=0;
96 int ROUNDS;
97 int i,j, r, t, rconpointer = 0;
98 int KC;
99 union
101 PROPERLY_ALIGNED_TYPE dummy;
102 byte k[MAXKC][4];
103 } k;
104 #define k k.k
105 union
107 PROPERLY_ALIGNED_TYPE dummy;
108 byte tk[MAXKC][4];
109 } tk;
110 #define tk tk.tk
112 /* The on-the-fly self tests are only run in non-fips mode. In fips
113 mode explicit self-tests are required. Actually the on-the-fly
114 self-tests are not fully thread-safe and it might happen that a
115 failed self-test won't get noticed in another thread.
117 FIXME: We might want to have a central registry of succeeded
118 self-tests. */
119 if (!fips_mode () && !initialized)
121 initialized = 1;
122 selftest_failed = selftest ();
123 if (selftest_failed)
124 log_error ("%s\n", selftest_failed );
126 if (selftest_failed)
127 return GPG_ERR_SELFTEST_FAILED;
129 ctx->decryption_prepared = 0;
130 #ifdef USE_PADLOCK
131 ctx->use_padlock = 0;
132 #endif
134 if( keylen == 128/8 )
136 ROUNDS = 10;
137 KC = 4;
138 #ifdef USE_PADLOCK
139 if ((_gcry_get_hw_features () & HWF_PADLOCK_AES))
141 ctx->use_padlock = 1;
142 memcpy (ctx->padlock_key, key, keylen);
144 #endif
146 else if ( keylen == 192/8 )
148 ROUNDS = 12;
149 KC = 6;
151 else if ( keylen == 256/8 )
153 ROUNDS = 14;
154 KC = 8;
156 else
157 return GPG_ERR_INV_KEYLEN;
159 ctx->ROUNDS = ROUNDS;
161 #ifdef USE_PADLOCK
162 if (ctx->use_padlock)
164 /* Nothing to do as we support only hardware key generation for
165 now. */
167 else
168 #endif /*USE_PADLOCK*/
170 #define W (ctx->keySched)
171 for (i = 0; i < keylen; i++)
173 k[i >> 2][i & 3] = key[i];
176 for (j = KC-1; j >= 0; j--)
178 *((u32*)tk[j]) = *((u32*)k[j]);
180 r = 0;
181 t = 0;
182 /* Copy values into round key array. */
183 for (j = 0; (j < KC) && (r < ROUNDS + 1); )
185 for (; (j < KC) && (t < 4); j++, t++)
187 *((u32*)W[r][t]) = *((u32*)tk[j]);
189 if (t == 4)
191 r++;
192 t = 0;
196 while (r < ROUNDS + 1)
198 /* While not enough round key material calculated calculate
199 new values. */
200 tk[0][0] ^= S[tk[KC-1][1]];
201 tk[0][1] ^= S[tk[KC-1][2]];
202 tk[0][2] ^= S[tk[KC-1][3]];
203 tk[0][3] ^= S[tk[KC-1][0]];
204 tk[0][0] ^= rcon[rconpointer++];
206 if (KC != 8)
208 for (j = 1; j < KC; j++)
210 *((u32*)tk[j]) ^= *((u32*)tk[j-1]);
213 else
215 for (j = 1; j < KC/2; j++)
217 *((u32*)tk[j]) ^= *((u32*)tk[j-1]);
219 tk[KC/2][0] ^= S[tk[KC/2 - 1][0]];
220 tk[KC/2][1] ^= S[tk[KC/2 - 1][1]];
221 tk[KC/2][2] ^= S[tk[KC/2 - 1][2]];
222 tk[KC/2][3] ^= S[tk[KC/2 - 1][3]];
223 for (j = KC/2 + 1; j < KC; j++)
225 *((u32*)tk[j]) ^= *((u32*)tk[j-1]);
229 /* Copy values into round key array. */
230 for (j = 0; (j < KC) && (r < ROUNDS + 1); )
232 for (; (j < KC) && (t < 4); j++, t++)
234 *((u32*)W[r][t]) = *((u32*)tk[j]);
236 if (t == 4)
238 r++;
239 t = 0;
243 #undef W
246 return 0;
247 #undef tk
248 #undef k
252 static gcry_err_code_t
253 rijndael_setkey (void *context, const byte *key, const unsigned keylen)
255 RIJNDAEL_context *ctx = context;
257 int rc = do_setkey (ctx, key, keylen);
258 _gcry_burn_stack ( 100 + 16*sizeof(int));
259 return rc;
263 /* Make a decryption key from an encryption key. */
264 static void
265 prepare_decryption( RIJNDAEL_context *ctx )
267 int r;
268 union
270 PROPERLY_ALIGNED_TYPE dummy;
271 byte *w;
272 } w;
273 #define w w.w
275 for (r=0; r < MAXROUNDS+1; r++ )
277 *((u32*)ctx->keySched2[r][0]) = *((u32*)ctx->keySched[r][0]);
278 *((u32*)ctx->keySched2[r][1]) = *((u32*)ctx->keySched[r][1]);
279 *((u32*)ctx->keySched2[r][2]) = *((u32*)ctx->keySched[r][2]);
280 *((u32*)ctx->keySched2[r][3]) = *((u32*)ctx->keySched[r][3]);
282 #define W (ctx->keySched2)
283 for (r = 1; r < ctx->ROUNDS; r++)
285 w = W[r][0];
286 *((u32*)w) = *((u32*)U1[w[0]]) ^ *((u32*)U2[w[1]])
287 ^ *((u32*)U3[w[2]]) ^ *((u32*)U4[w[3]]);
289 w = W[r][1];
290 *((u32*)w) = *((u32*)U1[w[0]]) ^ *((u32*)U2[w[1]])
291 ^ *((u32*)U3[w[2]]) ^ *((u32*)U4[w[3]]);
293 w = W[r][2];
294 *((u32*)w) = *((u32*)U1[w[0]]) ^ *((u32*)U2[w[1]])
295 ^ *((u32*)U3[w[2]]) ^ *((u32*)U4[w[3]]);
297 w = W[r][3];
298 *((u32*)w) = *((u32*)U1[w[0]]) ^ *((u32*)U2[w[1]])
299 ^ *((u32*)U3[w[2]]) ^ *((u32*)U4[w[3]]);
301 #undef W
302 #undef w
307 /* Encrypt one block. A and B need to be aligned on a 4 byte
308 boundary. A and B may be the same. */
309 static void
310 do_encrypt_aligned (const RIJNDAEL_context *ctx,
311 unsigned char *b, const unsigned char *a)
313 #define rk (ctx->keySched)
314 int ROUNDS = ctx->ROUNDS;
315 int r;
316 union
318 u32 tempu32[4]; /* Force correct alignment. */
319 byte temp[4][4];
320 } u;
322 *((u32*)u.temp[0]) = *((u32*)(a )) ^ *((u32*)rk[0][0]);
323 *((u32*)u.temp[1]) = *((u32*)(a+ 4)) ^ *((u32*)rk[0][1]);
324 *((u32*)u.temp[2]) = *((u32*)(a+ 8)) ^ *((u32*)rk[0][2]);
325 *((u32*)u.temp[3]) = *((u32*)(a+12)) ^ *((u32*)rk[0][3]);
326 *((u32*)(b )) = (*((u32*)T1[u.temp[0][0]])
327 ^ *((u32*)T2[u.temp[1][1]])
328 ^ *((u32*)T3[u.temp[2][2]])
329 ^ *((u32*)T4[u.temp[3][3]]));
330 *((u32*)(b + 4)) = (*((u32*)T1[u.temp[1][0]])
331 ^ *((u32*)T2[u.temp[2][1]])
332 ^ *((u32*)T3[u.temp[3][2]])
333 ^ *((u32*)T4[u.temp[0][3]]));
334 *((u32*)(b + 8)) = (*((u32*)T1[u.temp[2][0]])
335 ^ *((u32*)T2[u.temp[3][1]])
336 ^ *((u32*)T3[u.temp[0][2]])
337 ^ *((u32*)T4[u.temp[1][3]]));
338 *((u32*)(b +12)) = (*((u32*)T1[u.temp[3][0]])
339 ^ *((u32*)T2[u.temp[0][1]])
340 ^ *((u32*)T3[u.temp[1][2]])
341 ^ *((u32*)T4[u.temp[2][3]]));
343 for (r = 1; r < ROUNDS-1; r++)
345 *((u32*)u.temp[0]) = *((u32*)(b )) ^ *((u32*)rk[r][0]);
346 *((u32*)u.temp[1]) = *((u32*)(b+ 4)) ^ *((u32*)rk[r][1]);
347 *((u32*)u.temp[2]) = *((u32*)(b+ 8)) ^ *((u32*)rk[r][2]);
348 *((u32*)u.temp[3]) = *((u32*)(b+12)) ^ *((u32*)rk[r][3]);
350 *((u32*)(b )) = (*((u32*)T1[u.temp[0][0]])
351 ^ *((u32*)T2[u.temp[1][1]])
352 ^ *((u32*)T3[u.temp[2][2]])
353 ^ *((u32*)T4[u.temp[3][3]]));
354 *((u32*)(b + 4)) = (*((u32*)T1[u.temp[1][0]])
355 ^ *((u32*)T2[u.temp[2][1]])
356 ^ *((u32*)T3[u.temp[3][2]])
357 ^ *((u32*)T4[u.temp[0][3]]));
358 *((u32*)(b + 8)) = (*((u32*)T1[u.temp[2][0]])
359 ^ *((u32*)T2[u.temp[3][1]])
360 ^ *((u32*)T3[u.temp[0][2]])
361 ^ *((u32*)T4[u.temp[1][3]]));
362 *((u32*)(b +12)) = (*((u32*)T1[u.temp[3][0]])
363 ^ *((u32*)T2[u.temp[0][1]])
364 ^ *((u32*)T3[u.temp[1][2]])
365 ^ *((u32*)T4[u.temp[2][3]]));
368 /* Last round is special. */
369 *((u32*)u.temp[0]) = *((u32*)(b )) ^ *((u32*)rk[ROUNDS-1][0]);
370 *((u32*)u.temp[1]) = *((u32*)(b+ 4)) ^ *((u32*)rk[ROUNDS-1][1]);
371 *((u32*)u.temp[2]) = *((u32*)(b+ 8)) ^ *((u32*)rk[ROUNDS-1][2]);
372 *((u32*)u.temp[3]) = *((u32*)(b+12)) ^ *((u32*)rk[ROUNDS-1][3]);
373 b[ 0] = T1[u.temp[0][0]][1];
374 b[ 1] = T1[u.temp[1][1]][1];
375 b[ 2] = T1[u.temp[2][2]][1];
376 b[ 3] = T1[u.temp[3][3]][1];
377 b[ 4] = T1[u.temp[1][0]][1];
378 b[ 5] = T1[u.temp[2][1]][1];
379 b[ 6] = T1[u.temp[3][2]][1];
380 b[ 7] = T1[u.temp[0][3]][1];
381 b[ 8] = T1[u.temp[2][0]][1];
382 b[ 9] = T1[u.temp[3][1]][1];
383 b[10] = T1[u.temp[0][2]][1];
384 b[11] = T1[u.temp[1][3]][1];
385 b[12] = T1[u.temp[3][0]][1];
386 b[13] = T1[u.temp[0][1]][1];
387 b[14] = T1[u.temp[1][2]][1];
388 b[15] = T1[u.temp[2][3]][1];
389 *((u32*)(b )) ^= *((u32*)rk[ROUNDS][0]);
390 *((u32*)(b+ 4)) ^= *((u32*)rk[ROUNDS][1]);
391 *((u32*)(b+ 8)) ^= *((u32*)rk[ROUNDS][2]);
392 *((u32*)(b+12)) ^= *((u32*)rk[ROUNDS][3]);
393 #undef rk
397 static void
398 do_encrypt (const RIJNDAEL_context *ctx,
399 unsigned char *bx, const unsigned char *ax)
401 /* BX and AX are not necessary correctly aligned. Thus we need to
402 copy them here. */
403 union
405 u32 dummy[4];
406 byte a[16];
407 } a;
408 union
410 u32 dummy[4];
411 byte b[16];
412 } b;
414 memcpy (a.a, ax, 16);
415 do_encrypt_aligned (ctx, b.b, a.a);
416 memcpy (bx, b.b, 16);
420 /* Encrypt or decrypt one block using the padlock engine. A and B may
421 be the same. */
422 #ifdef USE_PADLOCK
423 static void
424 do_padlock (const RIJNDAEL_context *ctx, int decrypt_flag,
425 unsigned char *bx, const unsigned char *ax)
427 /* BX and AX are not necessary correctly aligned. Thus we need to
428 copy them here. */
429 unsigned char a[16] __attribute__ ((aligned (16)));
430 unsigned char b[16] __attribute__ ((aligned (16)));
431 unsigned int cword[4] __attribute__ ((aligned (16)));
433 /* The control word fields are:
434 127:12 11:10 9 8 7 6 5 4 3:0
435 RESERVED KSIZE CRYPT INTER KEYGN CIPHR ALIGN DGEST ROUND */
436 cword[0] = (ctx->ROUNDS & 15); /* (The mask is just a safeguard.) */
437 cword[1] = 0;
438 cword[2] = 0;
439 cword[3] = 0;
440 if (decrypt_flag)
441 cword[0] |= 0x00000200;
443 memcpy (a, ax, 16);
445 asm volatile
446 ("pushfl\n\t" /* Force key reload. */
447 "popfl\n\t"
448 "xchg %3, %%ebx\n\t" /* Load key. */
449 "movl $1, %%ecx\n\t" /* Init counter for just one block. */
450 ".byte 0xf3, 0x0f, 0xa7, 0xc8\n\t" /* REP XSTORE ECB. */
451 "xchg %3, %%ebx\n" /* Restore GOT register. */
452 : /* No output */
453 : "S" (a), "D" (b), "d" (cword), "r" (ctx->padlock_key)
454 : "%ecx", "cc", "memory"
457 memcpy (bx, b, 16);
460 #endif /*USE_PADLOCK*/
463 static void
464 rijndael_encrypt (void *context, byte *b, const byte *a)
466 RIJNDAEL_context *ctx = context;
468 #ifdef USE_PADLOCK
469 if (ctx->use_padlock)
471 do_padlock (ctx, 0, b, a);
472 _gcry_burn_stack (48 + 15 /* possible padding for alignment */);
474 else
475 #endif /*USE_PADLOCK*/
477 do_encrypt (ctx, b, a);
478 _gcry_burn_stack (48 + 2*sizeof(int));
483 /* Bulk encryption of complete blocks in CFB mode. Caller needs to
484 make sure that IV is aligned on an unsigned long boundary. This
485 function is only intended for the bulk encryption feature of
486 cipher.c. */
489 /* Bulk encryption of complete blocks in CBC mode. Caller needs to
490 make sure that IV is aligned on an unsigned long boundary. This
491 function is only intended for the bulk encryption feature of
492 cipher.c. */
496 /* Decrypt one block. A and B need to be aligned on a 4 byte boundary
497 and the decryption must have been prepared. A and B may be the
498 same. */
499 static void
500 do_decrypt_aligned (RIJNDAEL_context *ctx,
501 unsigned char *b, const unsigned char *a)
503 #define rk (ctx->keySched2)
504 int ROUNDS = ctx->ROUNDS;
505 int r;
506 union
508 u32 tempu32[4]; /* Force correct alignment. */
509 byte temp[4][4];
510 } u;
513 *((u32*)u.temp[0]) = *((u32*)(a )) ^ *((u32*)rk[ROUNDS][0]);
514 *((u32*)u.temp[1]) = *((u32*)(a+ 4)) ^ *((u32*)rk[ROUNDS][1]);
515 *((u32*)u.temp[2]) = *((u32*)(a+ 8)) ^ *((u32*)rk[ROUNDS][2]);
516 *((u32*)u.temp[3]) = *((u32*)(a+12)) ^ *((u32*)rk[ROUNDS][3]);
518 *((u32*)(b )) = (*((u32*)T5[u.temp[0][0]])
519 ^ *((u32*)T6[u.temp[3][1]])
520 ^ *((u32*)T7[u.temp[2][2]])
521 ^ *((u32*)T8[u.temp[1][3]]));
522 *((u32*)(b+ 4)) = (*((u32*)T5[u.temp[1][0]])
523 ^ *((u32*)T6[u.temp[0][1]])
524 ^ *((u32*)T7[u.temp[3][2]])
525 ^ *((u32*)T8[u.temp[2][3]]));
526 *((u32*)(b+ 8)) = (*((u32*)T5[u.temp[2][0]])
527 ^ *((u32*)T6[u.temp[1][1]])
528 ^ *((u32*)T7[u.temp[0][2]])
529 ^ *((u32*)T8[u.temp[3][3]]));
530 *((u32*)(b+12)) = (*((u32*)T5[u.temp[3][0]])
531 ^ *((u32*)T6[u.temp[2][1]])
532 ^ *((u32*)T7[u.temp[1][2]])
533 ^ *((u32*)T8[u.temp[0][3]]));
535 for (r = ROUNDS-1; r > 1; r--)
537 *((u32*)u.temp[0]) = *((u32*)(b )) ^ *((u32*)rk[r][0]);
538 *((u32*)u.temp[1]) = *((u32*)(b+ 4)) ^ *((u32*)rk[r][1]);
539 *((u32*)u.temp[2]) = *((u32*)(b+ 8)) ^ *((u32*)rk[r][2]);
540 *((u32*)u.temp[3]) = *((u32*)(b+12)) ^ *((u32*)rk[r][3]);
541 *((u32*)(b )) = (*((u32*)T5[u.temp[0][0]])
542 ^ *((u32*)T6[u.temp[3][1]])
543 ^ *((u32*)T7[u.temp[2][2]])
544 ^ *((u32*)T8[u.temp[1][3]]));
545 *((u32*)(b+ 4)) = (*((u32*)T5[u.temp[1][0]])
546 ^ *((u32*)T6[u.temp[0][1]])
547 ^ *((u32*)T7[u.temp[3][2]])
548 ^ *((u32*)T8[u.temp[2][3]]));
549 *((u32*)(b+ 8)) = (*((u32*)T5[u.temp[2][0]])
550 ^ *((u32*)T6[u.temp[1][1]])
551 ^ *((u32*)T7[u.temp[0][2]])
552 ^ *((u32*)T8[u.temp[3][3]]));
553 *((u32*)(b+12)) = (*((u32*)T5[u.temp[3][0]])
554 ^ *((u32*)T6[u.temp[2][1]])
555 ^ *((u32*)T7[u.temp[1][2]])
556 ^ *((u32*)T8[u.temp[0][3]]));
559 /* Last round is special. */
560 *((u32*)u.temp[0]) = *((u32*)(b )) ^ *((u32*)rk[1][0]);
561 *((u32*)u.temp[1]) = *((u32*)(b+ 4)) ^ *((u32*)rk[1][1]);
562 *((u32*)u.temp[2]) = *((u32*)(b+ 8)) ^ *((u32*)rk[1][2]);
563 *((u32*)u.temp[3]) = *((u32*)(b+12)) ^ *((u32*)rk[1][3]);
564 b[ 0] = S5[u.temp[0][0]];
565 b[ 1] = S5[u.temp[3][1]];
566 b[ 2] = S5[u.temp[2][2]];
567 b[ 3] = S5[u.temp[1][3]];
568 b[ 4] = S5[u.temp[1][0]];
569 b[ 5] = S5[u.temp[0][1]];
570 b[ 6] = S5[u.temp[3][2]];
571 b[ 7] = S5[u.temp[2][3]];
572 b[ 8] = S5[u.temp[2][0]];
573 b[ 9] = S5[u.temp[1][1]];
574 b[10] = S5[u.temp[0][2]];
575 b[11] = S5[u.temp[3][3]];
576 b[12] = S5[u.temp[3][0]];
577 b[13] = S5[u.temp[2][1]];
578 b[14] = S5[u.temp[1][2]];
579 b[15] = S5[u.temp[0][3]];
580 *((u32*)(b )) ^= *((u32*)rk[0][0]);
581 *((u32*)(b+ 4)) ^= *((u32*)rk[0][1]);
582 *((u32*)(b+ 8)) ^= *((u32*)rk[0][2]);
583 *((u32*)(b+12)) ^= *((u32*)rk[0][3]);
584 #undef rk
588 /* Decrypt one block. AX and BX may be the same. */
589 static void
590 do_decrypt (RIJNDAEL_context *ctx, byte *bx, const byte *ax)
592 /* BX and AX are not necessary correctly aligned. Thus we need to
593 copy them here. */
594 union
596 u32 dummy[4];
597 byte a[16];
598 } a;
599 union
601 u32 dummy[4];
602 byte b[16];
603 } b;
605 if ( !ctx->decryption_prepared )
607 prepare_decryption ( ctx );
608 _gcry_burn_stack (64);
609 ctx->decryption_prepared = 1;
612 memcpy (a.a, ax, 16);
613 do_decrypt_aligned (ctx, b.b, a.a);
614 memcpy (bx, b.b, 16);
615 #undef rk
621 static void
622 rijndael_decrypt (void *context, byte *b, const byte *a)
624 RIJNDAEL_context *ctx = context;
626 #ifdef USE_PADLOCK
627 if (ctx->use_padlock)
629 do_padlock (ctx, 1, b, a);
630 _gcry_burn_stack (48 + 2*sizeof(int) /* FIXME */);
632 else
633 #endif /*USE_PADLOCK*/
635 do_decrypt (ctx, b, a);
636 _gcry_burn_stack (48+2*sizeof(int));
641 /* Bulk decryption of complete blocks in CFB mode. Caller needs to
642 make sure that IV is aligned on an unisgned lonhg boundary. This
643 function is only intended for the bulk encryption feature of
644 cipher.c. */
647 /* Bulk decryption of complete blocks in CBC mode. Caller needs to
648 make sure that IV is aligned on an unsigned long boundary. This
649 function is only intended for the bulk encryption feature of
650 cipher.c. */
655 /* Run the self-tests for AES 128. Returns NULL on success. */
657 /* Run the self-tests for AES 192. Returns NULL on success. */
660 /* Run the self-tests for AES 256. Returns NULL on success. */
662 /* Run all the self-tests and return NULL on success. This function
663 is used for the on-the-fly self-tests. */
666 /* SP800-38a.pdf for AES-128. */
669 /* Complete selftest for AES-128 with all modes and driver code. */
671 /* Complete selftest for AES-192. */
674 /* Complete selftest for AES-256. */
678 /* Run a full self-test for ALGO and return 0 on success. */
683 static const char *rijndael_names[] =
685 "RIJNDAEL",
686 "AES128",
687 "AES-128",
688 NULL
691 static gcry_cipher_oid_spec_t rijndael_oids[] =
693 { "2.16.840.1.101.3.4.1.1", GCRY_CIPHER_MODE_ECB },
694 { "2.16.840.1.101.3.4.1.2", GCRY_CIPHER_MODE_CBC },
695 { "2.16.840.1.101.3.4.1.3", GCRY_CIPHER_MODE_OFB },
696 { "2.16.840.1.101.3.4.1.4", GCRY_CIPHER_MODE_CFB },
697 { NULL }
700 gcry_cipher_spec_t _gcry_cipher_spec_aes =
702 "AES", rijndael_names, rijndael_oids, 16, 128, sizeof (RIJNDAEL_context),
703 rijndael_setkey, rijndael_encrypt, rijndael_decrypt
706 static const char *rijndael192_names[] =
708 "RIJNDAEL192",
709 "AES-192",
710 NULL
713 static gcry_cipher_oid_spec_t rijndael192_oids[] =
715 { "2.16.840.1.101.3.4.1.21", GCRY_CIPHER_MODE_ECB },
716 { "2.16.840.1.101.3.4.1.22", GCRY_CIPHER_MODE_CBC },
717 { "2.16.840.1.101.3.4.1.23", GCRY_CIPHER_MODE_OFB },
718 { "2.16.840.1.101.3.4.1.24", GCRY_CIPHER_MODE_CFB },
719 { NULL }
722 gcry_cipher_spec_t _gcry_cipher_spec_aes192 =
724 "AES192", rijndael192_names, rijndael192_oids, 16, 192, sizeof (RIJNDAEL_context),
725 rijndael_setkey, rijndael_encrypt, rijndael_decrypt
728 static const char *rijndael256_names[] =
730 "RIJNDAEL256",
731 "AES-256",
732 NULL
735 static gcry_cipher_oid_spec_t rijndael256_oids[] =
737 { "2.16.840.1.101.3.4.1.41", GCRY_CIPHER_MODE_ECB },
738 { "2.16.840.1.101.3.4.1.42", GCRY_CIPHER_MODE_CBC },
739 { "2.16.840.1.101.3.4.1.43", GCRY_CIPHER_MODE_OFB },
740 { "2.16.840.1.101.3.4.1.44", GCRY_CIPHER_MODE_CFB },
741 { NULL }
744 gcry_cipher_spec_t _gcry_cipher_spec_aes256 =
746 "AES256", rijndael256_names, rijndael256_oids, 16, 256,
747 sizeof (RIJNDAEL_context),
748 rijndael_setkey, rijndael_encrypt, rijndael_decrypt
753 GRUB_MOD_INIT(gcry_rijndael)
755 grub_cipher_register (&_gcry_cipher_spec_aes);
756 grub_cipher_register (&_gcry_cipher_spec_aes192);
757 grub_cipher_register (&_gcry_cipher_spec_aes256);
760 GRUB_MOD_FINI(gcry_rijndael)
762 grub_cipher_unregister (&_gcry_cipher_spec_aes);
763 grub_cipher_unregister (&_gcry_cipher_spec_aes192);
764 grub_cipher_unregister (&_gcry_cipher_spec_aes256);