1 /* md.c - message digest dispatcher
2 * Copyright (C) 1998, 1999, 2002, 2003 Free Software Foundation, Inc.
4 * This file is part of Libgcrypt.
6 * Libgcrypt is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU Lesser general Public License as
8 * published by the Free Software Foundation; either version 2.1 of
9 * the License, or (at your option) any later version.
11 * Libgcrypt is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
35 const char *oidstring
;
38 /* iso.member-body.us.rsadsi.pkcs.pkcs-1.5 (sha1WithRSAEncryption) */
39 { "1.2.840.113549.1.1.5", GCRY_MD_SHA1
},
40 /* iso.member-body.us.rsadsi.pkcs.pkcs-1.4 (md5WithRSAEncryption) */
41 { "1.2.840.113549.1.1.4", GCRY_MD_MD5
},
42 /* iso.member-body.us.x9-57.x9cm.3 (dsaWithSha1)*/
43 { "1.2.840.10040.4.3", GCRY_MD_SHA1
},
44 /* from NIST's OIW (sha1) */
45 { "1.3.14.3.2.26", GCRY_MD_SHA1
},
46 /* rsaSignatureWithripemd160 */
47 { "1.3.36.3.3.1.2", GCRY_MD_RMD160
},
48 /* RSADSI digestAlgorithm MD5 */
49 { "1.2.840.113549.2.5", GCRY_MD_MD5
},
50 /* GNU.digestAlgorithm TIGER */
51 { "1.3.6.1.4.1.11591.12.2", GCRY_MD_TIGER
},
52 /* iso.member-body.us.rsadsi.digestAlgorithm.md4 */
53 { "1.2.840.113549.2.4", GCRY_MD_MD4
},
54 /* from NIST OIW (sha-1WithRSAEncryption) */
55 { "1.3.14.3.2.29", GCRY_MD_SHA1
},
56 /* According to the OpenPGG draft rfc2440-bis06 */
57 { "2.16.840.1.101.3.4.2.1", GCRY_MD_SHA256
},
58 { "2.16.840.1.101.3.4.2.2", GCRY_MD_SHA384
},
59 { "2.16.840.1.101.3.4.2.3", GCRY_MD_SHA512
},
66 struct md_digest_list_s
;
68 /* this structure is put right after the GCRY_MD_HD buffer, so that
69 * only one memory block is needed. */
70 struct gcry_md_context
{
75 struct md_digest_list_s
*list
;
78 #define CTX_MAGIC_NORMAL 0x11071961
79 #define CTX_MAGIC_SECURE 0x16917011
81 static const char * digest_algo_to_string( int algo
);
82 static int check_digest_algo( int algo
);
83 static GCRY_MD_HD
md_open( int algo
, int secure
, int hmac
);
84 static int md_enable( GCRY_MD_HD hd
, int algo
);
85 static GCRY_MD_HD
md_copy( GCRY_MD_HD a
);
86 static void md_close(GCRY_MD_HD a
);
87 static void md_write( GCRY_MD_HD a
, byte
*inbuf
, size_t inlen
);
88 static void md_final(GCRY_MD_HD a
);
89 static byte
*md_read( GCRY_MD_HD a
, int algo
);
90 static int md_get_algo( GCRY_MD_HD a
);
91 static int md_digest_length( int algo
);
92 static const byte
*md_asn_oid( int algo
, size_t *asnlen
, size_t *mdlen
);
93 static void md_start_debug( GCRY_MD_HD a
, const char *suffix
);
94 static void md_stop_debug( GCRY_MD_HD a
);
97 * This structure is used for the list of available algorithms
98 * and for the list of algorithms in GCRY_MD_HD.
100 struct md_digest_list_s
{
101 struct md_digest_list_s
*next
;
107 void (*init
)( void *c
);
108 void (*write
)( void *c
, byte
*buf
, size_t nbytes
);
109 void (*final
)( void *c
);
110 byte
*(*read
)( void *c
);
111 size_t contextsize
; /* allocate this amount of context */
112 PROPERLY_ALIGNED_TYPE context
;
115 static struct md_digest_list_s
*digest_list
;
119 static struct md_digest_list_s
*
120 new_list_item( int algo
,
121 const char *(*get_info
)( int, size_t*,byte
**, int*, int*,
123 void (**)(void*,byte
*,size_t),
124 void (**)(void*),byte
*(**)(void*)) )
126 struct md_digest_list_s
*r
;
128 r
= gcry_xcalloc( 1, sizeof *r
);
130 r
->name
= (*get_info
)( algo
, &r
->contextsize
,
131 &r
->asnoid
, &r
->asnlen
, &r
->mdlen
,
132 &r
->init
, &r
->write
, &r
->final
, &r
->read
);
143 * Try to load the modules with the requested algorithm
144 * and return true if new modules are available
145 * If req_alog is -1 try to load all digest algorithms.
148 load_digest_module( int req_algo
)
150 static int initialized
= 0;
151 static u32 checked_algos
[512/32];
152 static int checked_all
= 0;
153 struct md_digest_list_s
*r
;
154 void *context
= NULL
;
157 const char *(*get_info
)( int, size_t*,byte
**, int*, int*,
159 void (**)(void*,byte
*,size_t),
160 void (**)(void*),byte
*(**)(void*));
163 _gcry_cipher_modules_constructor();
167 if( algo
> 511 || !algo
)
168 return 0; /* algorithm number too high (does not fit into out bitmap)*/
170 return 0; /* already called with -1 */
173 else if( (checked_algos
[algo
/32] & (1 << (algo
%32))) )
174 return 0; /* already checked and not found */
176 checked_algos
[algo
/32] |= (1 << (algo
%32));
178 while( _gcry_enum_gnupgext_digests( &context
, &algo
, &get_info
) ) {
179 if( req_algo
!= -1 && algo
!= req_algo
)
181 for(r
=digest_list
; r
; r
= r
->next
)
182 if( r
->algo
== algo
)
185 log_info("skipping digest %d: already loaded\n", algo
);
188 r
= new_list_item( algo
, get_info
);
190 log_info("skipping digest %d: no name\n", algo
);
193 /* put it into the list */
194 if( _gcry_log_verbosity( 2 ) )
195 log_info("loaded digest %d\n", algo
);
196 r
->next
= digest_list
;
202 _gcry_enum_gnupgext_digests( &context
, NULL
, NULL
);
209 * Map a string to the digest algo
212 gcry_md_map_name( const char *string
)
214 struct md_digest_list_s
*r
;
219 /* If the string starts with a digit (optionally prefixed with
220 either "OID." or "oid."), we first look into our table of ASN.1
221 object identifiers to figure out the algorithm */
223 || !strncmp (string
, "oid.", 4)
224 || !strncmp (string
, "OID.", 4) )
227 const char *s
= digitp(string
)? string
: (string
+4);
229 for (i
=0; oid_table
[i
].oidstring
; i
++)
231 if (!strcmp (s
, oid_table
[i
].oidstring
))
232 return oid_table
[i
].algo
;
237 for(r
= digest_list
; r
; r
= r
->next
)
238 if( !stricmp( r
->name
, string
) )
240 } while( !r
&& load_digest_module(-1) );
246 * Map a digest algo to a string
249 digest_algo_to_string( int algo
)
251 struct md_digest_list_s
*r
;
254 for(r
= digest_list
; r
; r
= r
->next
)
255 if( r
->algo
== algo
)
257 } while( !r
&& load_digest_module( algo
) );
262 * This function simply returns the name of the algorithm or some constant
263 * string when there is no algo. It will never return NULL.
264 * Use the macro gcry_md_test_algo() to check whether the algorithm
268 gcry_md_algo_name( int algo
)
270 const char *s
= digest_algo_to_string( algo
);
276 check_digest_algo( int algo
)
278 struct md_digest_list_s
*r
;
281 for(r
= digest_list
; r
; r
= r
->next
)
282 if( r
->algo
== algo
)
284 } while( !r
&& load_digest_module(algo
) );
285 return GCRYERR_INV_MD_ALGO
;
291 * Open a message digest handle for use with algorithm ALGO.
292 * More algorithms may be added by md_enable(). The initial algorithm
296 md_open( int algo
, int secure
, int hmac
)
299 struct gcry_md_context
*ctx
;
300 int bufsize
= secure
? 512 : 1024;
303 /* Allocate a memory area to hold the caller visible buffer with it's
304 * control information and the data required by this module. Set the
305 * context pointer at the beginning to this area.
306 * We have to use this strange scheme because we want to hide the
307 * internal data but have a variable sized buffer.
309 * +---+------+---........------+-------------+
310 * !ctx! bctl ! buffer ! private !
311 * +---+------+---........------+-------------+
313 * !---------------------------!
315 * We have to make sture that private is well aligned.
317 n
= sizeof( struct gcry_md_handle
) + bufsize
;
318 n
= ((n
+ sizeof(PROPERLY_ALIGNED_TYPE
)-1)
319 / sizeof(PROPERLY_ALIGNED_TYPE
) ) * sizeof(PROPERLY_ALIGNED_TYPE
);
321 /* allocate and set the Context pointer to the private data */
322 hd
= secure
? gcry_malloc_secure( n
+ sizeof( struct gcry_md_context
) )
323 : gcry_malloc( n
+ sizeof( struct gcry_md_context
) );
325 set_lasterr( GCRYERR_NO_MEM
);
329 hd
->ctx
= ctx
= (struct gcry_md_context
*)( (char*)hd
+ n
);
330 /* setup the globally visible data (bctl in the diagram)*/
331 hd
->bufsize
= n
- sizeof( struct gcry_md_handle
) + 1;
333 /* initialize the private data */
334 memset( hd
->ctx
, 0, sizeof *hd
->ctx
);
335 ctx
->magic
= secure
? CTX_MAGIC_SECURE
: CTX_MAGIC_NORMAL
;
336 ctx
->secure
= secure
;
338 ctx
->macpads
= gcry_malloc_secure( 128 );
339 if( !ctx
->macpads
) {
341 set_lasterr( GCRYERR_NO_MEM
);
345 fast_random_poll(); /* FIXME: should we really do that? */
346 if( algo
&& md_enable( hd
, algo
) ) {
354 /* Create a message digest object for algorithm ALGO. FLAGS may be
355 given as an bitwise OR of the gcry_md_flags values. ALGO may be
356 given as 0 if the algorithms to be used are later set using
359 gcry_md_open (int algo
, unsigned int flags
)
363 if ((flags
& ~(GCRY_MD_FLAG_SECURE
| GCRY_MD_FLAG_HMAC
)))
365 set_lasterr (GCRYERR_INV_ARG
);
368 hd
= md_open (algo
, (flags
& GCRY_MD_FLAG_SECURE
),
369 (flags
& GCRY_MD_FLAG_HMAC
));
376 md_enable( GCRY_MD_HD hd
, int algo
)
378 struct gcry_md_context
*h
= hd
->ctx
;
379 struct md_digest_list_s
*r
, *ac
;
381 for( ac
=h
->list
; ac
; ac
= ac
->next
)
382 if( ac
->algo
== algo
)
383 return 0; /* already enabled */
384 /* find the algorithm */
386 for(r
= digest_list
; r
; r
= r
->next
)
387 if( r
->algo
== algo
)
389 } while( !r
&& load_digest_module( algo
) );
391 log_debug("md_enable: algorithm %d not available\n", algo
);
392 return set_lasterr( GCRYERR_INV_MD_ALGO
);
394 /* and allocate a new list entry */
395 ac
= h
->secure
? gcry_malloc_secure( sizeof *ac
+ r
->contextsize
396 - sizeof(r
->context
) )
397 : gcry_malloc( sizeof *ac
+ r
->contextsize
398 - sizeof(r
->context
) );
400 return set_lasterr( GCRYERR_NO_MEM
);
405 /* and init this instance */
406 (*ac
->init
)( &ac
->context
.c
);
412 gcry_md_enable( GCRY_MD_HD hd
, int algo
)
414 return md_enable( hd
, algo
);
418 md_copy( GCRY_MD_HD ahd
)
420 struct gcry_md_context
*a
= ahd
->ctx
;
421 struct gcry_md_context
*b
;
423 struct md_digest_list_s
*ar
, *br
;
427 md_write( ahd
, NULL
, 0 );
429 n
= (char*)ahd
->ctx
- (char*)ahd
;
430 bhd
= a
->secure
? gcry_malloc_secure( n
+ sizeof( struct gcry_md_context
) )
431 : gcry_malloc( n
+ sizeof( struct gcry_md_context
) );
433 set_lasterr( GCRYERR_NO_MEM
);
437 bhd
->ctx
= b
= (struct gcry_md_context
*)( (char*)bhd
+ n
);
438 /* no need to copy the buffer due to the write above */
439 assert( ahd
->bufsize
== (n
- sizeof( struct gcry_md_handle
) + 1) );
440 bhd
->bufsize
= ahd
->bufsize
;
441 bhd
->bufpos
= 0; assert( !ahd
->bufpos
);
442 memcpy( b
, a
, sizeof *a
);
446 b
->macpads
= gcry_malloc_secure( 128 );
447 memcpy( b
->macpads
, a
->macpads
, 128 );
449 /* and now copy the complete list of algorithms */
450 /* I know that the copied list is reversed, but that doesn't matter */
451 for( ar
=a
->list
; ar
; ar
= ar
->next
) {
452 br
= a
->secure
? gcry_xmalloc_secure( sizeof *br
+ ar
->contextsize
453 - sizeof(ar
->context
) )
454 : gcry_xmalloc( sizeof *br
+ ar
->contextsize
455 - sizeof(ar
->context
) );
456 memcpy( br
, ar
, sizeof(*br
) + ar
->contextsize
457 - sizeof(ar
->context
) );
463 md_start_debug( bhd
, "unknown" );
468 gcry_md_copy( GCRY_MD_HD hd
)
470 return md_copy( hd
);
474 * Reset all contexts and discard any buffered stuff. This may be used
475 * instead of a md_close(); md_open().
478 gcry_md_reset( GCRY_MD_HD a
)
480 struct md_digest_list_s
*r
;
482 a
->bufpos
= a
->ctx
->finalized
= 0;
483 for( r
=a
->ctx
->list
; r
; r
= r
->next
) {
484 memset( r
->context
.c
, 0, r
->contextsize
);
485 (*r
->init
)( &r
->context
.c
);
487 if( a
->ctx
->macpads
) {
488 md_write( a
, a
->ctx
->macpads
, 64 ); /* inner pad */
494 md_close(GCRY_MD_HD a
)
496 struct md_digest_list_s
*r
, *r2
;
502 for(r
=a
->ctx
->list
; r
; r
= r2
) {
506 gcry_free(a
->ctx
->macpads
);
512 gcry_md_close( GCRY_MD_HD hd
)
519 md_write( GCRY_MD_HD a
, byte
*inbuf
, size_t inlen
)
521 struct md_digest_list_s
*r
;
523 if( a
->ctx
->debug
) {
524 if( a
->bufpos
&& fwrite(a
->buf
, a
->bufpos
, 1, a
->ctx
->debug
) != 1 )
526 if( inlen
&& fwrite(inbuf
, inlen
, 1, a
->ctx
->debug
) != 1 )
529 for(r
=a
->ctx
->list
; r
; r
= r
->next
) {
531 (*r
->write
)( &r
->context
.c
, a
->buf
, a
->bufpos
);
532 (*r
->write
)( &r
->context
.c
, inbuf
, inlen
);
539 gcry_md_write( GCRY_MD_HD hd
, const void *inbuf
, size_t inlen
)
541 md_write( hd
, (unsigned char *)inbuf
, inlen
);
547 md_final(GCRY_MD_HD a
)
549 struct md_digest_list_s
*r
;
551 if( a
->ctx
->finalized
)
555 md_write( a
, NULL
, 0 );
557 for(r
=a
->ctx
->list
; r
; r
= r
->next
) {
558 (*r
->final
)( &r
->context
.c
);
560 a
->ctx
->finalized
= 1;
561 if( a
->ctx
->macpads
) { /* finish the hmac */
562 int algo
= md_get_algo( a
);
563 byte
*p
= md_read( a
, algo
);
564 size_t dlen
= md_digest_length(algo
);
566 GCRY_MD_HD om
= md_open( algo
, a
->ctx
->secure
, 0 );
568 _gcry_fatal_error( gcry_errno(), NULL
);
569 md_write( om
, a
->ctx
->macpads
+64, 64 );
570 md_write( om
, p
, dlen
);
572 /* replace our digest with the mac (they have the same size) */
573 memcpy( p
, md_read( om
, algo
), dlen
);
581 prepare_macpads( GCRY_MD_HD hd
, const byte
*key
, size_t keylen
)
584 int algo
= md_get_algo( hd
);
585 byte
*helpkey
= NULL
;
589 return GCRYERR_INV_MD_ALGO
; /* i.e. no algo enabled */
592 helpkey
= gcry_malloc_secure( md_digest_length( algo
) );
594 return GCRYERR_NO_MEM
;
595 gcry_md_hash_buffer( algo
, helpkey
, key
, keylen
);
597 keylen
= md_digest_length( algo
);
598 assert( keylen
<= 64 );
601 memset( hd
->ctx
->macpads
, 0, 128 );
602 ipad
= hd
->ctx
->macpads
;
603 opad
= hd
->ctx
->macpads
+64;
604 memcpy( ipad
, key
, keylen
);
605 memcpy( opad
, key
, keylen
);
606 for(i
=0; i
< 64; i
++ ) {
610 gcry_free( helpkey
);
615 gcry_md_ctl( GCRY_MD_HD hd
, int cmd
, byte
*buffer
, size_t buflen
)
621 case GCRYCTL_FINALIZE
:
624 case GCRYCTL_SET_KEY
:
625 rc
= gcry_md_setkey (hd
, buffer
, buflen
);
627 case GCRYCTL_START_DUMP
:
628 md_start_debug (hd
, buffer
);
630 case GCRYCTL_STOP_DUMP
:
636 return set_lasterr( rc
);
640 gcry_md_setkey( GCRY_MD_HD hd
, const void *key
, size_t keylen
)
644 if( !(hd
->ctx
->macpads
) )
645 rc
= GCRYERR_CONFLICT
;
646 else if ( !(rc
= prepare_macpads( hd
, key
, keylen
)) )
654 * if ALGO is null get the digest for the used algo (which should be only one)
657 md_read( GCRY_MD_HD a
, int algo
)
659 struct md_digest_list_s
*r
;
661 if( !algo
) { /* return the first algorithm */
662 if( (r
=a
->ctx
->list
) ) {
664 log_debug("more than algorithm in md_read(0)\n");
665 return (*r
->read
)( &r
->context
.c
);
669 for(r
=a
->ctx
->list
; r
; r
= r
->next
)
670 if( r
->algo
== algo
)
671 return (*r
->read
)( &r
->context
.c
);
678 * Read out the complete digest, this function implictly finalizes
682 gcry_md_read( GCRY_MD_HD hd
, int algo
)
684 gcry_md_ctl( hd
, GCRYCTL_FINALIZE
, NULL
, 0 );
685 return md_read( hd
, algo
);
690 * This function combines md_final and md_read but keeps the context
691 * intact. This function can be used to calculate intermediate
692 * digests. The digest is copied into buffer and the digestlength is
693 * returned. If buffer is NULL only the needed size for buffer is returned.
694 * buflen gives the max size of buffer. If the buffer is too shourt to
695 * hold the complete digest, the buffer is filled with as many bytes are
696 * possible and this value is returned.
700 md_digest( GCRY_MD_HD a
, int algo
, byte
*buffer
, int buflen
)
702 struct md_digest_list_s
*r
= NULL
;
707 md_write( a
, NULL
, 0 );
709 if( !algo
) { /* return digest for the first algorithm */
710 if( (r
=a
->ctx
->list
) && r
->next
)
711 log_debug("more than algorithm in md_digest(0)\n");
714 for(r
=a
->ctx
->list
; r
; r
= r
->next
)
715 if( r
->algo
== algo
)
724 /* I don't want to change the interface, so I simply work on a copy
725 * of the context (extra overhead - should be fixed)*/
726 context
= a
->ctx
->secure
? gcry_xmalloc_secure( r
->contextsize
)
727 : gcry_xmalloc( r
->contextsize
);
728 memcpy( context
, r
->context
.c
, r
->contextsize
);
729 (*r
->final
)( context
);
730 digest
= (*r
->read
)( context
);
732 if( buflen
> r
->mdlen
)
734 memcpy( buffer
, digest
, buflen
);
742 * Read out an intermediate digest.
745 gcry_md_get( GCRY_MD_HD hd
, int algo
, byte
*buffer
, int buflen
)
748 return GCRYERR_INTERNAL
;
753 * Shortcut function to hash a buffer with a given algo. The only supported
754 * algorithm is RIPE-MD. The supplied digest buffer must be large enough
755 * to store the resulting hash. No error is returned, the function will
756 * abort on an invalid algo. DISABLED_ALGOS are ignored here.
759 gcry_md_hash_buffer( int algo
, void *digest
, const void *buffer
, size_t length
)
761 if( algo
== GCRY_MD_RMD160
)
762 _gcry_rmd160_hash_buffer( digest
, buffer
, length
);
763 else { /* for the others we do not have a fast function, so
764 * we use the normal functions to do it */
765 GCRY_MD_HD h
= md_open( algo
, 0, 0 );
767 BUG(); /* algo not available */
768 md_write( h
, (byte
*)buffer
, length
);
770 memcpy( digest
, md_read( h
, algo
), md_digest_length( algo
) );
776 md_get_algo( GCRY_MD_HD a
)
778 struct md_digest_list_s
*r
;
780 if( (r
=a
->ctx
->list
) ) {
782 log_error("WARNING: more than algorithm in md_get_algo()\n");
790 gcry_md_get_algo (GCRY_MD_HD hd
)
792 int algo
= md_get_algo (hd
);
795 set_lasterr (GCRYERR_GENERAL
);
803 * Return the length of the digest
806 md_digest_length( int algo
)
808 struct md_digest_list_s
*r
;
811 for(r
= digest_list
; r
; r
= r
->next
) {
812 if( r
->algo
== algo
)
815 } while( !r
&& load_digest_module( algo
) );
820 * Return the length of the digest in bytes.
821 * This function will return 0 in case of errors.
824 gcry_md_get_algo_dlen( int algo
)
826 /* we cheat a little bit */
830 case GCRY_MD_MD5
: return 16;
832 case GCRY_MD_RMD160
: return 20;
833 case GCRY_MD_SHA256
: return 32;
834 case GCRY_MD_SHA384
: return 48;
835 case GCRY_MD_SHA512
: return 64;
837 case GCRY_MD_CRC32_RFC1510
: return 4;
838 case GCRY_MD_CRC24_RFC2440
: return 3;
841 int len
= md_digest_length( algo
);
843 set_lasterr( GCRYERR_INV_MD_ALGO
);
850 /* Hmmm: add a mode to enumerate the OIDs
851 * to make g10/sig-check.c more portable */
853 md_asn_oid( int algo
, size_t *asnlen
, size_t *mdlen
)
855 struct md_digest_list_s
*r
;
858 for(r
= digest_list
; r
; r
= r
->next
) {
859 if( r
->algo
== algo
) {
867 } while( !r
&& load_digest_module( algo
) );
868 log_bug("no asn for md algo %d\n", algo
);
875 * Return information about the given cipher algorithm
876 * WHAT select the kind of information returned:
878 * Returns 0 when the specified algorithm is available for use.
879 * buffer and nbytes must be zero.
880 * GCRYCTL_GET_ASNOID:
881 * Return the ASNOID of the algorithm in buffer. if buffer is NULL, only
882 * the required length is returned.
884 * On error the value -1 is returned and the error reason may be
885 * retrieved by gcry_errno().
886 * Note: Because this function is in most cases used to return an
887 * integer value, we can make it easier for the caller to just look at
888 * the return value. The caller will in all cases consult the value
889 * and thereby detecting whether a error occured or not (i.e. while checking
893 gcry_md_algo_info( int algo
, int what
, void *buffer
, size_t *nbytes
)
896 case GCRYCTL_TEST_ALGO
:
897 if( buffer
|| nbytes
) {
898 set_lasterr( GCRYERR_INV_ARG
);
901 if( check_digest_algo( algo
) ) {
902 set_lasterr( GCRYERR_INV_MD_ALGO
);
907 case GCRYCTL_GET_ASNOID
: {
909 const char *asn
= md_asn_oid( algo
, &asnlen
, NULL
);
910 if( buffer
&& *nbytes
>= asnlen
) {
911 memcpy( buffer
, asn
, asnlen
);
915 if( !buffer
&& nbytes
) {
919 set_lasterr( buffer
? GCRYERR_TOO_SHORT
: GCRYERR_INV_ARG
);
925 set_lasterr( GCRYERR_INV_OP
);
935 md_start_debug( GCRY_MD_HD md
, const char *suffix
)
940 if( md
->ctx
->debug
) {
941 log_debug("Oops: md debug already started\n");
945 sprintf(buf
, "dbgmd-%05d.%.10s", idx
, suffix
);
946 md
->ctx
->debug
= fopen(buf
, "w");
947 if( !md
->ctx
->debug
)
948 log_debug("md debug: can't open %s\n", buf
);
952 md_stop_debug( GCRY_MD_HD md
)
954 if( md
->ctx
->debug
) {
956 md_write( md
, NULL
, 0 );
957 fclose(md
->ctx
->debug
);
958 md
->ctx
->debug
= NULL
;
960 #ifdef HAVE_U64_TYPEDEF
961 { /* a kludge to pull in the __muldi3 for Solaris */
962 volatile u32 a
= (u32
)(ulong
)md
;
973 * Return information about the digest handle.
975 * Returns 1 when the handle works on secured memory
976 * otherwise 0 is returned. There is no error return.
977 * GCRYCTL_IS_ALGO_ENABLED:
978 * Returns 1 if the algo is enanled for that handle.
979 * The algo must be passed as the address of an int.
982 gcry_md_info( GCRY_MD_HD h
, int cmd
, void *buffer
, size_t *nbytes
)
986 case GCRYCTL_IS_SECURE
:
987 return h
->ctx
->secure
;
989 case GCRYCTL_IS_ALGO_ENABLED
:
992 struct md_digest_list_s
*r
;
994 if (!buffer
|| (nbytes
&& *nbytes
!= sizeof (int))) {
995 set_lasterr (GCRYERR_INV_ARG
);
998 algo
= *(int*)buffer
;
999 for(r
=h
->ctx
->list
; r
; r
= r
->next
) {
1000 if( r
->algo
== algo
)
1007 set_lasterr( GCRYERR_INV_OP
);