1 /***************************************************************************
2 * Copyright (C) 2006 by Joachim Fritschi, <jfritschi@freenet.de> *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
18 ***************************************************************************/
20 .file "twofish-i586-asm.S"
23 #include <linux/linkage.h>
24 #include <asm/asm-offsets.h>
26 /* return address at 0 */
28 #define in_blk 12 /* input byte array address parameter*/
29 #define out_blk 8 /* output byte array address parameter*/
30 #define ctx 4 /* Twofish context structure */
37 /* Structure of the crypto context struct*/
39 #define s0 0 /* S0 Array 256 Words each */
40 #define s1 1024 /* S1 Array */
41 #define s2 2048 /* S2 Array */
42 #define s3 3072 /* S3 Array */
43 #define w 4096 /* 8 whitening keys (word) */
44 #define k 4128 /* key 1-32 ( word ) */
46 /* define a few register aliases to allow macro substitution */
65 /* performs input whitening */
66 #define input_whitening(src,context,offset)\
67 xor w+offset(context), src;
69 /* performs input whitening */
70 #define output_whitening(src,context,offset)\
71 xor w+16+offset(context), src;
74 * a input register containing a (rotated 16)
75 * b input register containing b
76 * c input register containing c
77 * d input register containing d (already rol $1)
78 * operations on a and b are interleaved to increase performance
80 #define encrypt_round(a,b,c,d,round)\
83 mov s1(%ebp,%edi,4),d ## D;\
85 mov s2(%ebp,%edi,4),%esi;\
88 xor s2(%ebp,%edi,4),d ## D;\
91 xor s3(%ebp,%edi,4),%esi;\
93 xor s3(%ebp,%edi,4),d ## D;\
95 xor (%ebp,%edi,4), %esi;\
98 xor (%ebp,%edi,4), d ## D;\
100 xor s1(%ebp,%edi,4),%esi;\
104 add k+round(%ebp), %esi;\
107 add k+4+round(%ebp),d ## D;\
111 * a input register containing a (rotated 16)
112 * b input register containing b
113 * c input register containing c
114 * d input register containing d (already rol $1)
115 * operations on a and b are interleaved to increase performance
116 * last round has different rotations for the output preparation
118 #define encrypt_last_round(a,b,c,d,round)\
121 mov s1(%ebp,%edi,4),d ## D;\
123 mov s2(%ebp,%edi,4),%esi;\
126 xor s2(%ebp,%edi,4),d ## D;\
129 xor s3(%ebp,%edi,4),%esi;\
131 xor s3(%ebp,%edi,4),d ## D;\
133 xor (%ebp,%edi,4), %esi;\
136 xor (%ebp,%edi,4), d ## D;\
138 xor s1(%ebp,%edi,4),%esi;\
142 add k+round(%ebp), %esi;\
145 add k+4+round(%ebp),d ## D;\
149 * a input register containing a
150 * b input register containing b (rotated 16)
151 * c input register containing c
152 * d input register containing d (already rol $1)
153 * operations on a and b are interleaved to increase performance
155 #define decrypt_round(a,b,c,d,round)\
158 mov (%ebp,%edi,4), c ## D;\
160 mov s3(%ebp,%edi,4),%esi;\
163 xor s1(%ebp,%edi,4),c ## D;\
166 xor (%ebp,%edi,4), %esi;\
168 xor s2(%ebp,%edi,4),c ## D;\
170 xor s1(%ebp,%edi,4),%esi;\
173 xor s3(%ebp,%edi,4),c ## D;\
175 xor s2(%ebp,%edi,4),%esi;\
179 add k+round(%ebp), c ## D;\
181 add k+4+round(%ebp),%esi;\
186 * a input register containing a
187 * b input register containing b (rotated 16)
188 * c input register containing c
189 * d input register containing d (already rol $1)
190 * operations on a and b are interleaved to increase performance
191 * last round has different rotations for the output preparation
193 #define decrypt_last_round(a,b,c,d,round)\
196 mov (%ebp,%edi,4), c ## D;\
198 mov s3(%ebp,%edi,4),%esi;\
201 xor s1(%ebp,%edi,4),c ## D;\
204 xor (%ebp,%edi,4), %esi;\
206 xor s2(%ebp,%edi,4),c ## D;\
208 xor s1(%ebp,%edi,4),%esi;\
211 xor s3(%ebp,%edi,4),c ## D;\
213 xor s2(%ebp,%edi,4),%esi;\
217 add k+round(%ebp), c ## D;\
219 add k+4+round(%ebp),%esi;\
223 ENTRY(twofish_enc_blk)
224 push %ebp /* save registers according to calling convention*/
229 mov ctx + 16(%esp), %ebp /* abuse the base pointer: set new base
230 * pointer to the ctx address */
231 mov in_blk+16(%esp),%edi /* input address in edi */
234 mov b_offset(%edi), %ebx
235 mov c_offset(%edi), %ecx
236 mov d_offset(%edi), %edx
237 input_whitening(%eax,%ebp,a_offset)
239 input_whitening(%ebx,%ebp,b_offset)
240 input_whitening(%ecx,%ebp,c_offset)
241 input_whitening(%edx,%ebp,d_offset)
244 encrypt_round(R0,R1,R2,R3,0);
245 encrypt_round(R2,R3,R0,R1,8);
246 encrypt_round(R0,R1,R2,R3,2*8);
247 encrypt_round(R2,R3,R0,R1,3*8);
248 encrypt_round(R0,R1,R2,R3,4*8);
249 encrypt_round(R2,R3,R0,R1,5*8);
250 encrypt_round(R0,R1,R2,R3,6*8);
251 encrypt_round(R2,R3,R0,R1,7*8);
252 encrypt_round(R0,R1,R2,R3,8*8);
253 encrypt_round(R2,R3,R0,R1,9*8);
254 encrypt_round(R0,R1,R2,R3,10*8);
255 encrypt_round(R2,R3,R0,R1,11*8);
256 encrypt_round(R0,R1,R2,R3,12*8);
257 encrypt_round(R2,R3,R0,R1,13*8);
258 encrypt_round(R0,R1,R2,R3,14*8);
259 encrypt_last_round(R2,R3,R0,R1,15*8);
261 output_whitening(%eax,%ebp,c_offset)
262 output_whitening(%ebx,%ebp,d_offset)
263 output_whitening(%ecx,%ebp,a_offset)
264 output_whitening(%edx,%ebp,b_offset)
265 mov out_blk+16(%esp),%edi;
266 mov %eax, c_offset(%edi)
267 mov %ebx, d_offset(%edi)
269 mov %edx, b_offset(%edi)
277 ENDPROC(twofish_enc_blk)
279 ENTRY(twofish_dec_blk)
280 push %ebp /* save registers according to calling convention*/
286 mov ctx + 16(%esp), %ebp /* abuse the base pointer: set new base
287 * pointer to the ctx address */
288 mov in_blk+16(%esp),%edi /* input address in edi */
291 mov b_offset(%edi), %ebx
292 mov c_offset(%edi), %ecx
293 mov d_offset(%edi), %edx
294 output_whitening(%eax,%ebp,a_offset)
295 output_whitening(%ebx,%ebp,b_offset)
297 output_whitening(%ecx,%ebp,c_offset)
298 output_whitening(%edx,%ebp,d_offset)
301 decrypt_round(R0,R1,R2,R3,15*8);
302 decrypt_round(R2,R3,R0,R1,14*8);
303 decrypt_round(R0,R1,R2,R3,13*8);
304 decrypt_round(R2,R3,R0,R1,12*8);
305 decrypt_round(R0,R1,R2,R3,11*8);
306 decrypt_round(R2,R3,R0,R1,10*8);
307 decrypt_round(R0,R1,R2,R3,9*8);
308 decrypt_round(R2,R3,R0,R1,8*8);
309 decrypt_round(R0,R1,R2,R3,7*8);
310 decrypt_round(R2,R3,R0,R1,6*8);
311 decrypt_round(R0,R1,R2,R3,5*8);
312 decrypt_round(R2,R3,R0,R1,4*8);
313 decrypt_round(R0,R1,R2,R3,3*8);
314 decrypt_round(R2,R3,R0,R1,2*8);
315 decrypt_round(R0,R1,R2,R3,1*8);
316 decrypt_last_round(R2,R3,R0,R1,0);
318 input_whitening(%eax,%ebp,c_offset)
319 input_whitening(%ebx,%ebp,d_offset)
320 input_whitening(%ecx,%ebp,a_offset)
321 input_whitening(%edx,%ebp,b_offset)
322 mov out_blk+16(%esp),%edi;
323 mov %eax, c_offset(%edi)
324 mov %ebx, d_offset(%edi)
326 mov %edx, b_offset(%edi)
334 ENDPROC(twofish_dec_blk)