1 /***************************************************************************
2 * Copyright (C) 2006 by Joachim Fritschi, <jfritschi@freenet.de> *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
18 ***************************************************************************/
20 .file "twofish-i586-asm.S"
23 #include <asm/asm-offsets.h>
25 /* return adress at 0 */
27 #define in_blk 12 /* input byte array address parameter*/
28 #define out_blk 8 /* output byte array address parameter*/
29 #define tfm 4 /* Twofish context structure */
36 /* Structure of the crypto context struct*/
38 #define s0 0 /* S0 Array 256 Words each */
39 #define s1 1024 /* S1 Array */
40 #define s2 2048 /* S2 Array */
41 #define s3 3072 /* S3 Array */
42 #define w 4096 /* 8 whitening keys (word) */
43 #define k 4128 /* key 1-32 ( word ) */
45 /* define a few register aliases to allow macro substitution */
64 /* performs input whitening */
65 #define input_whitening(src,context,offset)\
66 xor w+offset(context), src;
68 /* performs input whitening */
69 #define output_whitening(src,context,offset)\
70 xor w+16+offset(context), src;
73 * a input register containing a (rotated 16)
74 * b input register containing b
75 * c input register containing c
76 * d input register containing d (already rol $1)
77 * operations on a and b are interleaved to increase performance
79 #define encrypt_round(a,b,c,d,round)\
82 mov s1(%ebp,%edi,4),d ## D;\
84 mov s2(%ebp,%edi,4),%esi;\
87 xor s2(%ebp,%edi,4),d ## D;\
90 xor s3(%ebp,%edi,4),%esi;\
92 xor s3(%ebp,%edi,4),d ## D;\
94 xor (%ebp,%edi,4), %esi;\
97 xor (%ebp,%edi,4), d ## D;\
99 xor s1(%ebp,%edi,4),%esi;\
103 add k+round(%ebp), %esi;\
106 add k+4+round(%ebp),d ## D;\
110 * a input register containing a (rotated 16)
111 * b input register containing b
112 * c input register containing c
113 * d input register containing d (already rol $1)
114 * operations on a and b are interleaved to increase performance
115 * last round has different rotations for the output preparation
117 #define encrypt_last_round(a,b,c,d,round)\
120 mov s1(%ebp,%edi,4),d ## D;\
122 mov s2(%ebp,%edi,4),%esi;\
125 xor s2(%ebp,%edi,4),d ## D;\
128 xor s3(%ebp,%edi,4),%esi;\
130 xor s3(%ebp,%edi,4),d ## D;\
132 xor (%ebp,%edi,4), %esi;\
135 xor (%ebp,%edi,4), d ## D;\
137 xor s1(%ebp,%edi,4),%esi;\
141 add k+round(%ebp), %esi;\
144 add k+4+round(%ebp),d ## D;\
148 * a input register containing a
149 * b input register containing b (rotated 16)
150 * c input register containing c
151 * d input register containing d (already rol $1)
152 * operations on a and b are interleaved to increase performance
154 #define decrypt_round(a,b,c,d,round)\
157 mov (%ebp,%edi,4), c ## D;\
159 mov s3(%ebp,%edi,4),%esi;\
162 xor s1(%ebp,%edi,4),c ## D;\
165 xor (%ebp,%edi,4), %esi;\
167 xor s2(%ebp,%edi,4),c ## D;\
169 xor s1(%ebp,%edi,4),%esi;\
172 xor s3(%ebp,%edi,4),c ## D;\
174 xor s2(%ebp,%edi,4),%esi;\
178 add k+round(%ebp), c ## D;\
180 add k+4+round(%ebp),%esi;\
185 * a input register containing a
186 * b input register containing b (rotated 16)
187 * c input register containing c
188 * d input register containing d (already rol $1)
189 * operations on a and b are interleaved to increase performance
190 * last round has different rotations for the output preparation
192 #define decrypt_last_round(a,b,c,d,round)\
195 mov (%ebp,%edi,4), c ## D;\
197 mov s3(%ebp,%edi,4),%esi;\
200 xor s1(%ebp,%edi,4),c ## D;\
203 xor (%ebp,%edi,4), %esi;\
205 xor s2(%ebp,%edi,4),c ## D;\
207 xor s1(%ebp,%edi,4),%esi;\
210 xor s3(%ebp,%edi,4),c ## D;\
212 xor s2(%ebp,%edi,4),%esi;\
216 add k+round(%ebp), c ## D;\
218 add k+4+round(%ebp),%esi;\
223 .global twofish_enc_blk
224 .global twofish_dec_blk
227 push %ebp /* save registers according to calling convention*/
232 mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */
233 add $crypto_tfm_ctx_offset, %ebp /* ctx adress */
234 mov in_blk+16(%esp),%edi /* input adress in edi */
237 mov b_offset(%edi), %ebx
238 mov c_offset(%edi), %ecx
239 mov d_offset(%edi), %edx
240 input_whitening(%eax,%ebp,a_offset)
242 input_whitening(%ebx,%ebp,b_offset)
243 input_whitening(%ecx,%ebp,c_offset)
244 input_whitening(%edx,%ebp,d_offset)
247 encrypt_round(R0,R1,R2,R3,0);
248 encrypt_round(R2,R3,R0,R1,8);
249 encrypt_round(R0,R1,R2,R3,2*8);
250 encrypt_round(R2,R3,R0,R1,3*8);
251 encrypt_round(R0,R1,R2,R3,4*8);
252 encrypt_round(R2,R3,R0,R1,5*8);
253 encrypt_round(R0,R1,R2,R3,6*8);
254 encrypt_round(R2,R3,R0,R1,7*8);
255 encrypt_round(R0,R1,R2,R3,8*8);
256 encrypt_round(R2,R3,R0,R1,9*8);
257 encrypt_round(R0,R1,R2,R3,10*8);
258 encrypt_round(R2,R3,R0,R1,11*8);
259 encrypt_round(R0,R1,R2,R3,12*8);
260 encrypt_round(R2,R3,R0,R1,13*8);
261 encrypt_round(R0,R1,R2,R3,14*8);
262 encrypt_last_round(R2,R3,R0,R1,15*8);
264 output_whitening(%eax,%ebp,c_offset)
265 output_whitening(%ebx,%ebp,d_offset)
266 output_whitening(%ecx,%ebp,a_offset)
267 output_whitening(%edx,%ebp,b_offset)
268 mov out_blk+16(%esp),%edi;
269 mov %eax, c_offset(%edi)
270 mov %ebx, d_offset(%edi)
272 mov %edx, b_offset(%edi)
282 push %ebp /* save registers according to calling convention*/
288 mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */
289 add $crypto_tfm_ctx_offset, %ebp /* ctx adress */
290 mov in_blk+16(%esp),%edi /* input adress in edi */
293 mov b_offset(%edi), %ebx
294 mov c_offset(%edi), %ecx
295 mov d_offset(%edi), %edx
296 output_whitening(%eax,%ebp,a_offset)
297 output_whitening(%ebx,%ebp,b_offset)
299 output_whitening(%ecx,%ebp,c_offset)
300 output_whitening(%edx,%ebp,d_offset)
303 decrypt_round(R0,R1,R2,R3,15*8);
304 decrypt_round(R2,R3,R0,R1,14*8);
305 decrypt_round(R0,R1,R2,R3,13*8);
306 decrypt_round(R2,R3,R0,R1,12*8);
307 decrypt_round(R0,R1,R2,R3,11*8);
308 decrypt_round(R2,R3,R0,R1,10*8);
309 decrypt_round(R0,R1,R2,R3,9*8);
310 decrypt_round(R2,R3,R0,R1,8*8);
311 decrypt_round(R0,R1,R2,R3,7*8);
312 decrypt_round(R2,R3,R0,R1,6*8);
313 decrypt_round(R0,R1,R2,R3,5*8);
314 decrypt_round(R2,R3,R0,R1,4*8);
315 decrypt_round(R0,R1,R2,R3,3*8);
316 decrypt_round(R2,R3,R0,R1,2*8);
317 decrypt_round(R0,R1,R2,R3,1*8);
318 decrypt_last_round(R2,R3,R0,R1,0);
320 input_whitening(%eax,%ebp,c_offset)
321 input_whitening(%ebx,%ebp,d_offset)
322 input_whitening(%ecx,%ebp,a_offset)
323 input_whitening(%edx,%ebp,b_offset)
324 mov out_blk+16(%esp),%edi;
325 mov %eax, c_offset(%edi)
326 mov %ebx, d_offset(%edi)
328 mov %edx, b_offset(%edi)