1 /* crypto/md32_common.h */
2 /* ====================================================================
3 * Copyright (c) 1999-2002 The OpenSSL Project. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
17 * 3. All advertising materials mentioning features or use of this
18 * software must display the following acknowledgment:
19 * "This product includes software developed by the OpenSSL Project
20 * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
22 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
23 * endorse or promote products derived from this software without
24 * prior written permission. For written permission, please contact
25 * licensing@OpenSSL.org.
27 * 5. Products derived from this software may not be called "OpenSSL"
28 * nor may "OpenSSL" appear in their names without prior written
29 * permission of the OpenSSL Project.
31 * 6. Redistributions of any form whatsoever must retain the following
33 * "This product includes software developed by the OpenSSL Project
34 * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
36 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
37 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
39 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
40 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
41 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
42 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
43 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
45 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
46 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
47 * OF THE POSSIBILITY OF SUCH DAMAGE.
48 * ====================================================================
50 * This product includes cryptographic software written by Eric Young
51 * (eay@cryptsoft.com). This product includes software written by Tim
52 * Hudson (tjh@cryptsoft.com).
57 * This is a generic 32 bit "collector" for message digest algorithms.
58 * Whenever needed it collects input character stream into chunks of
59 * 32 bit values and invokes a block function that performs actual hash
66 * DATA_ORDER_IS_BIG_ENDIAN or DATA_ORDER_IS_LITTLE_ENDIAN
67 * this macro defines byte order of input stream.
69 * size of a unit chunk HASH_BLOCK operates on.
71 * has to be at lest 32 bit wide, if it's wider, then
72 * HASH_LONG_LOG2 *has to* be defined along
74 * context structure that at least contains following
79 * HASH_LONG data[HASH_LBLOCK];
84 * name of "Update" function, implemented here.
86 * name of "Transform" function, implemented here.
88 * name of "Final" function, implemented here.
89 * HASH_BLOCK_HOST_ORDER
90 * name of "block" function treating *aligned* input message
91 * in host byte order, implemented externally.
92 * HASH_BLOCK_DATA_ORDER
93 * name of "block" function treating *unaligned* input message
94 * in original (data) byte order, implemented externally (it
95 * actually is optional if data and host are of the same
98 * macro convering context variables to an ASCII hash string.
102 * B_ENDIAN or L_ENDIAN
103 * defines host byte-order.
105 * defaults to 2 if not states otherwise.
107 * assumed to be HASH_CBLOCK/4 if not stated otherwise.
108 * HASH_BLOCK_DATA_ORDER_ALIGNED
109 * alternative "block" function capable of treating
110 * aligned input message in original (data) order,
111 * implemented externally.
115 * #define DATA_ORDER_IS_LITTLE_ENDIAN
117 * #define HASH_LONG MD5_LONG
118 * #define HASH_LONG_LOG2 MD5_LONG_LOG2
119 * #define HASH_CTX MD5_CTX
120 * #define HASH_CBLOCK MD5_CBLOCK
121 * #define HASH_LBLOCK MD5_LBLOCK
122 * #define HASH_UPDATE MD5_Update
123 * #define HASH_TRANSFORM MD5_Transform
124 * #define HASH_FINAL MD5_Final
125 * #define HASH_BLOCK_HOST_ORDER md5_block_host_order
126 * #define HASH_BLOCK_DATA_ORDER md5_block_data_order
128 * <appro@fy.chalmers.se>
133 #if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN)
134 #error "DATA_ORDER must be defined!"
138 #error "HASH_CBLOCK must be defined!"
141 #error "HASH_LONG must be defined!"
144 #error "HASH_CTX must be defined!"
148 #error "HASH_UPDATE must be defined!"
150 #ifndef HASH_TRANSFORM
151 #error "HASH_TRANSFORM must be defined!"
154 #error "HASH_FINAL must be defined!"
157 #ifndef HASH_BLOCK_HOST_ORDER
158 #error "HASH_BLOCK_HOST_ORDER must be defined!"
163 #define HASH_LBLOCK (HASH_CBLOCK/4)
166 #ifndef HASH_LONG_LOG2
167 #define HASH_LONG_LOG2 2
171 #define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
175 * Make some obvious choices. E.g., HASH_BLOCK_DATA_ORDER_ALIGNED
176 * and HASH_BLOCK_HOST_ORDER ought to be the same if input data
177 * and host are of the same "endianess". It's possible to mask
178 * this with blank #define HASH_BLOCK_DATA_ORDER though...
180 * <appro@fy.chalmers.se>
182 #if defined(B_ENDIAN)
183 # if defined(DATA_ORDER_IS_BIG_ENDIAN)
184 # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
185 # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER
188 #elif defined(L_ENDIAN)
189 # if defined(DATA_ORDER_IS_LITTLE_ENDIAN)
190 # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
191 # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER
196 #if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
197 #ifndef HASH_BLOCK_DATA_ORDER
198 #error "HASH_BLOCK_DATA_ORDER must be defined!"
202 #if defined(DATA_ORDER_IS_BIG_ENDIAN)
205 # if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && \
206 !defined(OPENSSL_NO_INLINE_ASM)
207 # if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \
208 (defined(__x86_64) || defined(__x86_64__))
210 * This gives ~30-40% performance improvement in SHA-256 compiled
211 * with gcc [on P4]. Well, first macro to be frank. We can pull
212 * this trick on x86* platforms only, because these CPUs can fetch
213 * unaligned data without raising an exception.
215 # define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \
216 asm ("bswapl %0":"=r"(r):"0"(r)); \
219 # define HOST_l2c(l,c) ({ unsigned int r=(l); \
220 asm ("bswapl %0":"=r"(r):"0"(r)); \
221 *((unsigned int *)(c))=r; (c)+=4; })
223 # define HOST_l2c(l,c) ({ unsigned int r=(l); \
224 asm ("bswapl %0":"=r"(r):"0"(r)); \
225 *((unsigned int *)(c))=r; (c)+=4; r; })
232 #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++)))<<24), \
233 l|=(((unsigned long)(*((c)++)))<<16), \
234 l|=(((unsigned long)(*((c)++)))<< 8), \
235 l|=(((unsigned long)(*((c)++))) ), \
238 #define HOST_p_c2l(c,l,n) { \
240 case 0: l =((unsigned long)(*((c)++)))<<24; \
241 case 1: l|=((unsigned long)(*((c)++)))<<16; \
242 case 2: l|=((unsigned long)(*((c)++)))<< 8; \
243 case 3: l|=((unsigned long)(*((c)++))); \
245 #define HOST_p_c2l_p(c,l,sc,len) { \
247 case 0: l =((unsigned long)(*((c)++)))<<24; \
248 if (--len == 0) break; \
249 case 1: l|=((unsigned long)(*((c)++)))<<16; \
250 if (--len == 0) break; \
251 case 2: l|=((unsigned long)(*((c)++)))<< 8; \
253 /* NOTE the pointer is not incremented at the end of this */
254 #define HOST_c2l_p(c,l,n) { \
257 case 3: l =((unsigned long)(*(--(c))))<< 8; \
258 case 2: l|=((unsigned long)(*(--(c))))<<16; \
259 case 1: l|=((unsigned long)(*(--(c))))<<24; \
262 #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \
263 *((c)++)=(unsigned char)(((l)>>16)&0xff), \
264 *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
265 *((c)++)=(unsigned char)(((l) )&0xff), \
269 #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
271 #if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
273 /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */
274 # define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, l)
275 # define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, l)
280 #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \
281 l|=(((unsigned long)(*((c)++)))<< 8), \
282 l|=(((unsigned long)(*((c)++)))<<16), \
283 l|=(((unsigned long)(*((c)++)))<<24), \
286 #define HOST_p_c2l(c,l,n) { \
288 case 0: l =((unsigned long)(*((c)++))); \
289 case 1: l|=((unsigned long)(*((c)++)))<< 8; \
290 case 2: l|=((unsigned long)(*((c)++)))<<16; \
291 case 3: l|=((unsigned long)(*((c)++)))<<24; \
293 #define HOST_p_c2l_p(c,l,sc,len) { \
295 case 0: l =((unsigned long)(*((c)++))); \
296 if (--len == 0) break; \
297 case 1: l|=((unsigned long)(*((c)++)))<< 8; \
298 if (--len == 0) break; \
299 case 2: l|=((unsigned long)(*((c)++)))<<16; \
301 /* NOTE the pointer is not incremented at the end of this */
302 #define HOST_c2l_p(c,l,n) { \
305 case 3: l =((unsigned long)(*(--(c))))<<16; \
306 case 2: l|=((unsigned long)(*(--(c))))<< 8; \
307 case 1: l|=((unsigned long)(*(--(c)))); \
310 #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \
311 *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
312 *((c)++)=(unsigned char)(((l)>>16)&0xff), \
313 *((c)++)=(unsigned char)(((l)>>24)&0xff), \
320 * Time for some action:-)
323 int HASH_UPDATE (HASH_CTX
*c
, const void *data_
, size_t len
)
325 const unsigned char *data
=data_
;
326 register HASH_LONG
* p
;
327 register HASH_LONG l
;
330 if (len
==0) return 1;
332 l
=(c
->Nl
+(((HASH_LONG
)len
)<<3))&0xffffffffUL
;
333 /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
334 * Wei Dai <weidai@eskimo.com> for pointing it out. */
335 if (l
< c
->Nl
) /* overflow */
337 c
->Nh
+=(unsigned int)(len
>>29); /* might cause compiler warning on 16-bit */
346 if ((c
->num
+len
) >= HASH_CBLOCK
)
348 l
=p
[sw
]; HOST_p_c2l(data
,l
,sc
); p
[sw
++]=l
;
349 for (; sw
<HASH_LBLOCK
; sw
++)
351 HOST_c2l(data
,l
); p
[sw
]=l
;
353 HASH_BLOCK_HOST_ORDER (c
,p
,1);
354 len
-=(HASH_CBLOCK
-c
->num
);
356 /* drop through and do the rest */
360 c
->num
+=(unsigned int)len
;
361 if ((sc
+len
) < 4) /* ugly, add char's to a word */
363 l
=p
[sw
]; HOST_p_c2l_p(data
,l
,sc
,len
); p
[sw
]=l
;
371 HOST_p_c2l(data
,l
,sc
);
373 for (; sw
< ew
; sw
++)
375 HOST_c2l(data
,l
); p
[sw
]=l
;
379 HOST_c2l_p(data
,l
,ec
); p
[sw
]=l
;
389 #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
391 * Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined
392 * only if sizeof(HASH_LONG)==4.
394 if ((((size_t)data
)%4) == 0)
396 /* data is properly aligned so that we can cast it: */
397 HASH_BLOCK_DATA_ORDER_ALIGNED (c
,(const HASH_LONG
*)data
,sw
);
403 #if !defined(HASH_BLOCK_DATA_ORDER)
406 memcpy (p
=c
->data
,data
,HASH_CBLOCK
);
407 HASH_BLOCK_DATA_ORDER_ALIGNED(c
,p
,1);
413 #if defined(HASH_BLOCK_DATA_ORDER)
415 HASH_BLOCK_DATA_ORDER(c
,data
,sw
);
426 c
->num
= (unsigned int)len
;
427 ew
=len
>>2; /* words to copy */
431 HOST_c2l(data
,l
); *p
=l
;
433 HOST_c2l_p(data
,l
,ec
);
440 void HASH_TRANSFORM (HASH_CTX
*c
, const unsigned char *data
)
442 #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
443 if ((((size_t)data
)%4) == 0)
444 /* data is properly aligned so that we can cast it: */
445 HASH_BLOCK_DATA_ORDER_ALIGNED (c
,(const HASH_LONG
*)data
,1);
447 #if !defined(HASH_BLOCK_DATA_ORDER)
449 memcpy (c
->data
,data
,HASH_CBLOCK
);
450 HASH_BLOCK_DATA_ORDER_ALIGNED (c
,c
->data
,1);
454 #if defined(HASH_BLOCK_DATA_ORDER)
455 HASH_BLOCK_DATA_ORDER (c
,data
,1);
460 int HASH_FINAL (unsigned char *md
, HASH_CTX
*c
)
462 register HASH_LONG
*p
;
463 register unsigned long l
;
465 static const unsigned char end
[4]={0x80,0x00,0x00,0x00};
466 const unsigned char *cp
=end
;
468 /* c->num should definitly have room for at least one more byte. */
473 l
= (j
==0) ? 0 : p
[i
];
474 HOST_p_c2l(cp
,l
,j
); p
[i
++]=l
; /* i is the next 'undefined word' */
476 if (i
>(HASH_LBLOCK
-2)) /* save room for Nl and Nh */
478 if (i
<HASH_LBLOCK
) p
[i
]=0;
479 HASH_BLOCK_HOST_ORDER (c
,p
,1);
482 for (; i
<(HASH_LBLOCK
-2); i
++)
485 #if defined(DATA_ORDER_IS_BIG_ENDIAN)
486 p
[HASH_LBLOCK
-2]=c
->Nh
;
487 p
[HASH_LBLOCK
-1]=c
->Nl
;
488 #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
489 p
[HASH_LBLOCK
-2]=c
->Nl
;
490 p
[HASH_LBLOCK
-1]=c
->Nh
;
492 HASH_BLOCK_HOST_ORDER (c
,p
,1);
494 #ifndef HASH_MAKE_STRING
495 #error "HASH_MAKE_STRING must be defined!"
497 HASH_MAKE_STRING(c
,md
);
501 /* clear stuff, HASH_BLOCK may be leaving some stuff on the stack
502 * but I'm not worried :-)
503 OPENSSL_cleanse((void *)c,sizeof(HASH_CTX));
509 #define MD32_REG_T long
511 * This comment was originaly written for MD5, which is why it
512 * discusses A-D. But it basically applies to all 32-bit digests,
513 * which is why it was moved to common header file.
515 * In case you wonder why A-D are declared as long and not
516 * as MD5_LONG. Doing so results in slight performance
517 * boost on LP64 architectures. The catch is we don't
518 * really care if 32 MSBs of a 64-bit register get polluted
519 * with eventual overflows as we *save* only 32 LSBs in
520 * *either* case. Now declaring 'em long excuses the compiler
521 * from keeping 32 MSBs zeroed resulting in 13% performance
522 * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
523 * Well, to be honest it should say that this *prevents*
524 * performance degradation.
525 * <appro@fy.chalmers.se>
526 * Apparently there're LP64 compilers that generate better
527 * code if A-D are declared int. Most notably GCC-x86_64
528 * generates better code.
529 * <appro@fy.chalmers.se>