Samba Patch - Denial of service - CPU loop and memory allocation.
[tomato.git] / release / src / router / nettle / serpent-decrypt.c
bloba7ae661cb3096a203d92a9c045de5a2502a5a9da
1 /* serpent-decrypt.c
3 * The serpent block cipher.
5 * For more details on this algorithm, see the Serpent website at
6 * http://www.cl.cam.ac.uk/~rja14/serpent.html
7 */
9 /* nettle, low-level cryptographics library
11 * Copyright (C) 2011 Niels Möller
12 * Copyright (C) 2010, 2011 Simon Josefsson
13 * Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
15 * The nettle library is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU Lesser General Public License as published by
17 * the Free Software Foundation; either version 2.1 of the License, or (at your
18 * option) any later version.
20 * The nettle library is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
22 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
23 * License for more details.
25 * You should have received a copy of the GNU Lesser General Public License
26 * along with the nettle library; see the file COPYING.LIB. If not, write to
27 * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
28 * MA 02111-1301, USA.
31 /* This file is derived from cipher/serpent.c in Libgcrypt v1.4.6.
32 The adaption to Nettle was made by Simon Josefsson on 2010-12-07
33 with final touches on 2011-05-30. Changes include replacing
34 libgcrypt with nettle in the license template, renaming
35 serpent_context to serpent_ctx, renaming u32 to uint32_t, removing
36 libgcrypt stubs and selftests, modifying entry function prototypes,
37 using FOR_BLOCKS to iterate through data in encrypt/decrypt, using
38 LE_READ_UINT32 and LE_WRITE_UINT32 to access data in
39 encrypt/decrypt, and running indent on the code. */
41 #if HAVE_CONFIG_H
42 #include "config.h"
43 #endif
45 #include <assert.h>
46 #include <limits.h>
48 #include "serpent.h"
50 #include "macros.h"
51 #include "serpent-internal.h"
53 /* These are the S-Boxes of Serpent. They are copied from Serpents
54 reference implementation (the optimized one, contained in
55 `floppy2') and are therefore:
57 Copyright (C) 1998 Ross Anderson, Eli Biham, Lars Knudsen.
59 To quote the Serpent homepage
60 (http://www.cl.cam.ac.uk/~rja14/serpent.html):
62 "Serpent is now completely in the public domain, and we impose no
63 restrictions on its use. This was announced on the 21st August at
64 the First AES Candidate Conference. The optimised implementations
65 in the submission package are now under the GNU PUBLIC LICENSE
66 (GPL), although some comments in the code still say otherwise. You
67 are welcome to use Serpent for any application." */
69 /* S0 inverse: 13 3 11 0 10 6 5 12 1 14 4 7 15 9 8 2 */
70 /* Original single-assignment form:
72 t01 = x2 ^ x3;
73 t02 = x0 | x1;
74 t03 = x1 | x2;
75 t04 = x2 & t01;
76 t05 = t02 ^ t01;
77 t06 = x0 | t04;
78 y2 = ~ t05;
79 t08 = x1 ^ x3;
80 t09 = t03 & t08;
81 t10 = x3 | y2;
82 y1 = t09 ^ t06;
83 t12 = x0 | t05;
84 t13 = y1 ^ t12;
85 t14 = t03 ^ t10;
86 t15 = x0 ^ x2;
87 y3 = t14 ^ t13;
88 t17 = t05 & t13;
89 t18 = t14 | t17;
90 y0 = t15 ^ t18;
92 #define SBOX0_INVERSE(x0, x1, x2, x3, y0, y1, y2, y3) \
93 do { \
94 y0 = x0 ^ x2; \
95 y2 = x0 | x1; \
96 y1 = x2 ^ x3; \
97 y2 ^= y1; \
98 y1 &= x2; \
99 x2 |= x1; \
100 x1 ^= x3; \
101 y1 |= x0; \
102 x1 &= x2; \
103 y1 ^= x1; \
104 x0 |= y2; \
105 x0 ^= y1; \
106 x1 = y2 & x0; \
107 y2 = ~ y2; \
108 x3 |= y2; \
109 x3 ^= x2; \
110 y3 = x3 ^ x0; \
111 x1 |= x3; \
112 y0 ^= x1; \
113 } while (0)
115 /* S1 inverse: 5 8 2 14 15 6 12 3 11 4 7 9 1 13 10 0 */
116 /* Original single-assignment form:
117 t01 = x0 ^ x1;
118 t02 = x1 | x3;
119 t03 = x0 & x2;
120 t04 = x2 ^ t02;
121 t05 = x0 | t04;
122 t06 = t01 & t05;
123 t07 = x3 | t03;
124 t08 = x1 ^ t06;
125 t09 = t07 ^ t06;
126 t10 = t04 | t03;
127 t11 = x3 & t08;
128 y2 = ~ t09;
129 y1 = t10 ^ t11;
130 t14 = x0 | y2;
131 t15 = t06 ^ y1;
132 y3 = t01 ^ t04;
133 t17 = x2 ^ t15;
134 y0 = t14 ^ t17;
136 #define SBOX1_INVERSE(x0, x1, x2, x3, y0, y1, y2, y3) \
137 do { \
138 y1 = x1 | x3; \
139 y1 ^= x2; \
140 y3 = x0 ^ x1; \
141 y0 = x0 | y1; \
142 y0 &= y3; \
143 x1 ^= y0; \
144 y3 ^= y1; \
145 x1 &= x3; \
146 y2 = x0 & x2; \
147 y1 |= y2; \
148 y2 |= x3; \
149 y2 ^= y0; \
150 y2 = ~ y2; \
151 y1 ^= x1; \
152 y0 ^= y1; \
153 y0 ^= x2; \
154 x0 |= y2; \
155 y0 ^= x0; \
156 } while (0)
158 /* S2 inverse: 12 9 15 4 11 14 1 2 0 3 6 13 5 8 10 7 */
159 /* Original single-assignment form:
160 t01 = x0 ^ x3;
161 t02 = x2 ^ x3;
162 t03 = x0 & x2;
163 t04 = x1 | t02;
164 y0 = t01 ^ t04;
165 t06 = x0 | x2;
166 t07 = x3 | y0;
167 t08 = ~ x3;
168 t09 = x1 & t06;
169 t10 = t08 | t03;
170 t11 = x1 & t07;
171 t12 = t06 & t02;
172 y3 = t09 ^ t10;
173 y1 = t12 ^ t11;
174 t15 = x2 & y3;
175 t16 = y0 ^ y1;
176 t17 = t10 ^ t15;
177 y2 = t16 ^ t17;
179 #define SBOX2_INVERSE(x0, x1, x2, x3, y0, y1, y2, y3) \
180 do { \
181 y0 = x0 ^ x3; \
182 y2 = x2 ^ x3; \
183 y1 = x1 | y2; \
184 y0 ^= y1; \
185 y1 = x3 | y0; \
186 y1 &= x1; \
187 x3 = ~ x3; \
188 y3 = x0 | x2; \
189 y2 &= y3; \
190 y1 ^= y2; \
191 y3 &= x1; \
192 x0 &= x2; \
193 x0 |= x3; \
194 y3 ^= x0; \
195 x2 &= y3; \
196 x2 ^= x0; \
197 y2 = y0 ^ y1; \
198 y2 ^= x2; \
199 } while (0)
201 /* S3 inverse: 0 9 10 7 11 14 6 13 3 5 12 2 4 8 15 1 */
202 /* Original single-assignment form:
203 t01 = x2 | x3;
204 t02 = x0 | x3;
205 t03 = x2 ^ t02;
206 t04 = x1 ^ t02;
207 t05 = x0 ^ x3;
208 t06 = t04 & t03;
209 t07 = x1 & t01;
210 y2 = t05 ^ t06;
211 t09 = x0 ^ t03;
212 y0 = t07 ^ t03;
213 t11 = y0 | t05;
214 t12 = t09 & t11;
215 t13 = x0 & y2;
216 t14 = t01 ^ t05;
217 y1 = x1 ^ t12;
218 t16 = x1 | t13;
219 y3 = t14 ^ t16;
221 #define SBOX3_INVERSE(x0, x1, x2, x3, y0, y1, y2, y3) \
222 do { \
223 y3 = x2 | x3; \
224 y0 = x1 & y3; \
225 y2 = x0 | x3; \
226 y1 = x2 ^ y2; \
227 y0 ^= y1; \
228 x3 ^= x0; \
229 y3 ^= x3; \
230 y2 ^= x1; \
231 y2 &= y1; \
232 y2 ^= x3; \
233 y1 ^= x0; \
234 x3 |= y0; \
235 y1 &= x3; \
236 y1 ^= x1; \
237 x0 &= y2; \
238 x0 |= x1; \
239 y3 ^= x0; \
240 } while (0)
242 /* S4 inverse: 5 0 8 3 10 9 7 14 2 12 11 6 4 15 13 1 */
243 /* Original single-assignment form:
244 t01 = x1 | x3;
245 t02 = x2 | x3;
246 t03 = x0 & t01;
247 t04 = x1 ^ t02;
248 t05 = x2 ^ x3;
249 t06 = ~ t03;
250 t07 = x0 & t04;
251 y1 = t05 ^ t07;
252 t09 = y1 | t06;
253 t10 = x0 ^ t07;
254 t11 = t01 ^ t09;
255 t12 = x3 ^ t04;
256 t13 = x2 | t10;
257 y3 = t03 ^ t12;
258 t15 = x0 ^ t04;
259 y2 = t11 ^ t13;
260 y0 = t15 ^ t09;
262 #define SBOX4_INVERSE(x0, x1, x2, x3, y0, y1, y2, y3) \
263 do { \
264 y1 = x2 ^ x3; \
265 y2 = x2 | x3; \
266 y2 ^= x1; \
267 x1 |= x3; \
268 y0 = x0 ^ y2; \
269 x3 ^= y2; \
270 y2 &= x0; \
271 y1 ^= y2; \
272 y2 ^= x0; \
273 y2 |= x2; \
274 x0 &= x1; \
275 y3 = x0 ^ x3; \
276 x0 = ~ x0; \
277 x0 |= y1; \
278 y0 ^= x0; \
279 x0 ^= x1; \
280 y2 ^= x0; \
281 } while (0)
283 /* S5 inverse: 8 15 2 9 4 1 13 14 11 6 5 3 7 12 10 0 */
284 /* Original single-assignment form:
285 t01 = x0 & x3;
286 t02 = x2 ^ t01;
287 t03 = x0 ^ x3;
288 t04 = x1 & t02;
289 t05 = x0 & x2;
290 y0 = t03 ^ t04;
291 t07 = x0 & y0;
292 t08 = t01 ^ y0;
293 t09 = x1 | t05;
294 t10 = ~ x1;
295 y1 = t08 ^ t09;
296 t12 = t10 | t07;
297 t13 = y0 | y1;
298 y3 = t02 ^ t12;
299 t15 = t02 ^ t13;
300 t16 = x1 ^ x3;
301 y2 = t16 ^ t15;
303 #define SBOX5_INVERSE(x0, x1, x2, x3, y0, y1, y2, y3) \
304 do { \
305 y1 = x0 & x3; \
306 y3 = x2 ^ y1; \
307 y0 = x1 & y3; \
308 y2 = x0 ^ x3; \
309 x3 ^= x1; \
310 y0 ^= y2; \
311 x2 &= x0; \
312 x0 &= y0; \
313 x2 |= x1; \
314 y1 ^= y0; \
315 y1 ^= x2; \
316 y2 = y0 | y1; \
317 y2 ^= y3; \
318 y2 ^= x3; \
319 x1 = ~ x1; \
320 x1 |= x0; \
321 y3 ^= x1; \
322 } while (0)
324 /* S6 inverse: 15 10 1 13 5 3 6 0 4 9 14 7 2 12 8 11 */
325 /* Original single-assignment form:
326 t01 = x0 ^ x2;
327 t02 = ~ x2;
328 t03 = x1 & t01;
329 t04 = x1 | t02;
330 t05 = x3 | t03;
331 t06 = x1 ^ x3;
332 t07 = x0 & t04;
333 t08 = x0 | t02;
334 t09 = t07 ^ t05;
335 y1 = t06 ^ t08;
336 y0 = ~ t09;
337 t12 = x1 & y0;
338 t13 = t01 & t05;
339 t14 = t01 ^ t12;
340 t15 = t07 ^ t13;
341 t16 = x3 | t02;
342 t17 = x0 ^ y1;
343 y3 = t17 ^ t15;
344 y2 = t16 ^ t14;
346 #define SBOX6_INVERSE(x0, x1, x2, x3, y0, y1, y2, y3) \
347 do { \
348 y2 = x0 ^ x2; \
349 x2 = ~ x2; \
350 y0 = x1 ^ x3; \
351 y1 = x0 | x2; \
352 y1 ^= y0; \
353 y3 = x1 & y2; \
354 y3 |= x3; \
355 x3 |= x2; \
356 x2 |= x1; \
357 x2 &= x0; \
358 y0 = x2 ^ y3; \
359 y0 = ~ y0; \
360 y3 &= y2; \
361 y3 ^= x2; \
362 x0 ^= y1; \
363 y3 ^= x0; \
364 x1 &= y0; \
365 y2 ^= x1; \
366 y2 ^= x3; \
367 } while (0)
369 /* S7 inverse: 3 0 6 13 9 14 15 8 5 12 11 7 10 1 4 2 */
370 /* Original single-assignment form:
371 t01 = x0 & x1;
372 t02 = x0 | x1;
373 t03 = x2 | t01;
374 t04 = x3 & t02;
375 y3 = t03 ^ t04;
376 t06 = x1 ^ t04;
377 t07 = x3 ^ y3;
378 t08 = ~ t07;
379 t09 = t06 | t08;
380 t10 = x1 ^ x3;
381 t11 = x0 | x3;
382 y1 = x0 ^ t09;
383 t13 = x2 ^ t06;
384 t14 = x2 & t11;
385 t15 = x3 | y1;
386 t16 = t01 | t10;
387 y0 = t13 ^ t15;
388 y2 = t14 ^ t16;
390 #define SBOX7_INVERSE(x0, x1, x2, x3, y0, y1, y2, y3) \
391 do { \
392 y3 = x0 & x1; \
393 y2 = x1 ^ x3; \
394 y2 |= y3; \
395 y1 = x0 | x3; \
396 y1 &= x2; \
397 y2 ^= y1; \
398 y3 |= x2; \
399 y0 = x0 | x1; \
400 y0 &= x3; \
401 y3 ^= y0; \
402 y0 ^= x1; \
403 y1 = x3 ^ y3; \
404 y1 = ~ y1; \
405 y1 |= y0; \
406 y0 ^= x2; \
407 y1 ^= x0; \
408 x3 |= y1; \
409 y0 ^= x3; \
410 } while (0)
412 /* In-place inverse linear transformation. */
413 #define LINEAR_TRANSFORMATION_INVERSE(x0,x1,x2,x3) \
414 do { \
415 x2 = ROTL32 (10, x2); \
416 x0 = ROTL32 (27, x0); \
417 x2 = x2 ^ x3 ^ (x1 << 7); \
418 x0 = x0 ^ x1 ^ x3; \
419 x3 = ROTL32 (25, x3); \
420 x1 = ROTL32 (31, x1); \
421 x3 = x3 ^ x2 ^ (x0 << 3); \
422 x1 = x1 ^ x0 ^ x2; \
423 x2 = ROTL32 (29, x2); \
424 x0 = ROTL32 (19, x0); \
425 } while (0)
427 /* Round inputs are x0,x1,x2,x3 (destroyed), and round outputs are
428 y0,y1,y2,y3. */
429 #define ROUND_INVERSE(which, subkey, x0,x1,x2,x3, y0,y1,y2,y3) \
430 do { \
431 LINEAR_TRANSFORMATION_INVERSE (x0,x1,x2,x3); \
432 SBOX##which##_INVERSE(x0,x1,x2,x3, y0,y1,y2,y3); \
433 KEYXOR(y0,y1,y2,y3, subkey); \
434 } while (0)
436 #if HAVE_NATIVE_64_BIT
438 /* In-place inverse linear transformation. */
439 #define LINEAR_TRANSFORMATION64_INVERSE(x0,x1,x2,x3) \
440 do { \
441 x2 = DROTL32 (10, x2); \
442 x0 = DROTL32 (27, x0); \
443 x2 = x2 ^ x3 ^ DRSHIFT32(7, x1); \
444 x0 = x0 ^ x1 ^ x3; \
445 x3 = DROTL32 (25, x3); \
446 x1 = DROTL32 (31, x1); \
447 x3 = x3 ^ x2 ^ DRSHIFT32(3, x0); \
448 x1 = x1 ^ x0 ^ x2; \
449 x2 = DROTL32 (29, x2); \
450 x0 = DROTL32 (19, x0); \
451 } while (0)
453 #define ROUND64_INVERSE(which, subkey, x0,x1,x2,x3, y0,y1,y2,y3) \
454 do { \
455 LINEAR_TRANSFORMATION64_INVERSE (x0,x1,x2,x3); \
456 SBOX##which##_INVERSE(x0,x1,x2,x3, y0,y1,y2,y3); \
457 KEYXOR64(y0,y1,y2,y3, subkey); \
458 } while (0)
460 #endif /* HAVE_NATIVE_64_BIT */
462 void
463 serpent_decrypt (const struct serpent_ctx *ctx,
464 unsigned length, uint8_t * dst, const uint8_t * src)
466 assert( !(length % SERPENT_BLOCK_SIZE));
468 #if HAVE_NATIVE_64_BIT
469 if (length & SERPENT_BLOCK_SIZE)
470 #else
471 while (length >= SERPENT_BLOCK_SIZE)
472 #endif
474 uint32_t x0,x1,x2,x3, y0,y1,y2,y3;
475 unsigned k;
477 x0 = LE_READ_UINT32 (src);
478 x1 = LE_READ_UINT32 (src + 4);
479 x2 = LE_READ_UINT32 (src + 8);
480 x3 = LE_READ_UINT32 (src + 12);
482 /* Inverse of special round */
483 KEYXOR (x0,x1,x2,x3, ctx->keys[32]);
484 SBOX7_INVERSE (x0,x1,x2,x3, y0,y1,y2,y3);
485 KEYXOR (y0,y1,y2,y3, ctx->keys[31]);
487 k = 24;
488 goto start32;
489 while (k > 0)
491 k -= 8;
492 ROUND_INVERSE (7, ctx->keys[k+7], x0,x1,x2,x3, y0,y1,y2,y3);
493 start32:
494 ROUND_INVERSE (6, ctx->keys[k+6], y0,y1,y2,y3, x0,x1,x2,x3);
495 ROUND_INVERSE (5, ctx->keys[k+5], x0,x1,x2,x3, y0,y1,y2,y3);
496 ROUND_INVERSE (4, ctx->keys[k+4], y0,y1,y2,y3, x0,x1,x2,x3);
497 ROUND_INVERSE (3, ctx->keys[k+3], x0,x1,x2,x3, y0,y1,y2,y3);
498 ROUND_INVERSE (2, ctx->keys[k+2], y0,y1,y2,y3, x0,x1,x2,x3);
499 ROUND_INVERSE (1, ctx->keys[k+1], x0,x1,x2,x3, y0,y1,y2,y3);
500 ROUND_INVERSE (0, ctx->keys[k], y0,y1,y2,y3, x0,x1,x2,x3);
503 LE_WRITE_UINT32 (dst, x0);
504 LE_WRITE_UINT32 (dst + 4, x1);
505 LE_WRITE_UINT32 (dst + 8, x2);
506 LE_WRITE_UINT32 (dst + 12, x3);
508 src += SERPENT_BLOCK_SIZE;
509 dst += SERPENT_BLOCK_SIZE;
510 length -= SERPENT_BLOCK_SIZE;
512 #if HAVE_NATIVE_64_BIT
513 FOR_BLOCKS(length, dst, src, 2*SERPENT_BLOCK_SIZE)
515 uint64_t x0,x1,x2,x3, y0,y1,y2,y3;
516 unsigned k;
518 x0 = LE_READ_UINT32 (src);
519 x1 = LE_READ_UINT32 (src + 4);
520 x2 = LE_READ_UINT32 (src + 8);
521 x3 = LE_READ_UINT32 (src + 12);
523 x0 <<= 32; x0 |= LE_READ_UINT32 (src + 16);
524 x1 <<= 32; x1 |= LE_READ_UINT32 (src + 20);
525 x2 <<= 32; x2 |= LE_READ_UINT32 (src + 24);
526 x3 <<= 32; x3 |= LE_READ_UINT32 (src + 28);
528 /* Inverse of special round */
529 KEYXOR64 (x0,x1,x2,x3, ctx->keys[32]);
530 SBOX7_INVERSE (x0,x1,x2,x3, y0,y1,y2,y3);
531 KEYXOR64 (y0,y1,y2,y3, ctx->keys[31]);
533 k = 24;
534 goto start64;
535 while (k > 0)
537 k -= 8;
538 ROUND64_INVERSE (7, ctx->keys[k+7], x0,x1,x2,x3, y0,y1,y2,y3);
539 start64:
540 ROUND64_INVERSE (6, ctx->keys[k+6], y0,y1,y2,y3, x0,x1,x2,x3);
541 ROUND64_INVERSE (5, ctx->keys[k+5], x0,x1,x2,x3, y0,y1,y2,y3);
542 ROUND64_INVERSE (4, ctx->keys[k+4], y0,y1,y2,y3, x0,x1,x2,x3);
543 ROUND64_INVERSE (3, ctx->keys[k+3], x0,x1,x2,x3, y0,y1,y2,y3);
544 ROUND64_INVERSE (2, ctx->keys[k+2], y0,y1,y2,y3, x0,x1,x2,x3);
545 ROUND64_INVERSE (1, ctx->keys[k+1], x0,x1,x2,x3, y0,y1,y2,y3);
546 ROUND64_INVERSE (0, ctx->keys[k], y0,y1,y2,y3, x0,x1,x2,x3);
549 LE_WRITE_UINT32 (dst + 16, x0);
550 LE_WRITE_UINT32 (dst + 20, x1);
551 LE_WRITE_UINT32 (dst + 24, x2);
552 LE_WRITE_UINT32 (dst + 28, x3);
553 x0 >>= 32; LE_WRITE_UINT32 (dst, x0);
554 x1 >>= 32; LE_WRITE_UINT32 (dst + 4, x1);
555 x2 >>= 32; LE_WRITE_UINT32 (dst + 8, x2);
556 x3 >>= 32; LE_WRITE_UINT32 (dst + 12, x3);
558 #endif /* HAVE_NATIVE_64_BIT */