Allow KDC to always return the salt in the PA-ETYPE-INFO[2]
[heimdal.git] / lib / krb5 / crypto-evp.c
bloba16b83cb0e070f37deb43634daf6bdf8e31f6927
1 /*
2 * Copyright (c) 1997 - 2008 Kungliga Tekniska Högskolan
3 * (Royal Institute of Technology, Stockholm, Sweden).
4 * All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the Institute nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
34 #include "krb5_locl.h"
36 void
37 _krb5_evp_schedule(krb5_context context,
38 struct _krb5_key_type *kt,
39 struct _krb5_key_data *kd)
41 struct _krb5_evp_schedule *key = kd->schedule->data;
42 const EVP_CIPHER *c = (*kt->evp)();
44 EVP_CIPHER_CTX_init(&key->ectx);
45 EVP_CIPHER_CTX_init(&key->dctx);
47 EVP_CipherInit_ex(&key->ectx, c, NULL, kd->key->keyvalue.data, NULL, 1);
48 EVP_CipherInit_ex(&key->dctx, c, NULL, kd->key->keyvalue.data, NULL, 0);
51 void
52 _krb5_evp_cleanup(krb5_context context, struct _krb5_key_data *kd)
54 struct _krb5_evp_schedule *key = kd->schedule->data;
55 EVP_CIPHER_CTX_cleanup(&key->ectx);
56 EVP_CIPHER_CTX_cleanup(&key->dctx);
59 int
60 _krb5_evp_digest_iov(krb5_crypto crypto,
61 const struct krb5_crypto_iov *iov,
62 int niov,
63 void *hash,
64 unsigned int *hsize,
65 const EVP_MD *md,
66 ENGINE *engine)
68 EVP_MD_CTX *ctx;
69 int ret, i;
70 krb5_data current = {0,0};
72 if (crypto != NULL) {
73 if (crypto->mdctx == NULL)
74 crypto->mdctx = EVP_MD_CTX_create();
75 if (crypto->mdctx == NULL)
76 return 0;
77 ctx = crypto->mdctx;
78 } else
79 ctx = EVP_MD_CTX_create();
81 ret = EVP_DigestInit_ex(ctx, md, engine);
82 if (ret != 1)
83 goto out;
85 for (i = 0; i < niov; i++) {
86 if (_krb5_crypto_iov_should_sign(&iov[i])) {
87 if ((char *)current.data + current.length == iov[i].data.data) {
88 current.length += iov[i].data.length;
89 } else {
90 if (current.data) {
91 ret = EVP_DigestUpdate(ctx, current.data, current.length);
92 if (ret != 1)
93 goto out;
95 current = iov[i].data;
100 if (current.data) {
101 ret = EVP_DigestUpdate(ctx, current.data, current.length);
102 if (ret != 1)
103 goto out;
106 ret = EVP_DigestFinal_ex(ctx, hash, hsize);
108 out:
109 if (crypto == NULL)
110 EVP_MD_CTX_destroy(ctx);
112 return ret;
115 krb5_error_code
116 _krb5_evp_hmac_iov(krb5_context context,
117 krb5_crypto crypto,
118 struct _krb5_key_data *key,
119 const struct krb5_crypto_iov *iov,
120 int niov,
121 void *hmac,
122 unsigned int *hmaclen,
123 const EVP_MD *md,
124 ENGINE *engine)
126 HMAC_CTX *ctx;
127 krb5_data current = {0, 0};
128 int i;
130 if (crypto != NULL) {
131 if (crypto->hmacctx == NULL)
132 crypto->hmacctx = HMAC_CTX_new();
133 ctx = crypto->hmacctx;
134 } else {
135 ctx = HMAC_CTX_new();
137 if (ctx == NULL)
138 return krb5_enomem(context);
140 HMAC_Init_ex(ctx, key->key->keyvalue.data, key->key->keyvalue.length,
141 md, engine);
143 for (i = 0; i < niov; i++) {
144 if (_krb5_crypto_iov_should_sign(&iov[i])) {
145 if ((char *)current.data + current.length == iov[i].data.data) {
146 current.length += iov[i].data.length;
147 } else {
148 if (current.data)
149 HMAC_Update(ctx, current.data, current.length);
150 current = iov[i].data;
155 if (current.data)
156 HMAC_Update(ctx, current.data, current.length);
158 HMAC_Final(ctx, hmac, hmaclen);
160 if (crypto == NULL)
161 HMAC_CTX_free(ctx);
163 return 0;
166 krb5_error_code
167 _krb5_evp_encrypt(krb5_context context,
168 struct _krb5_key_data *key,
169 void *data,
170 size_t len,
171 krb5_boolean encryptp,
172 int usage,
173 void *ivec)
175 struct _krb5_evp_schedule *ctx = key->schedule->data;
176 EVP_CIPHER_CTX *c;
177 c = encryptp ? &ctx->ectx : &ctx->dctx;
178 if (ivec == NULL) {
179 /* alloca ? */
180 size_t len2 = EVP_CIPHER_CTX_iv_length(c);
181 void *loiv = malloc(len2);
182 if (loiv == NULL)
183 return krb5_enomem(context);
184 memset(loiv, 0, len2);
185 EVP_CipherInit_ex(c, NULL, NULL, NULL, loiv, -1);
186 free(loiv);
187 } else
188 EVP_CipherInit_ex(c, NULL, NULL, NULL, ivec, -1);
189 EVP_Cipher(c, data, data, len);
190 return 0;
193 struct _krb5_evp_iov_cursor
195 struct krb5_crypto_iov *iov;
196 int niov;
197 krb5_data current;
198 int nextidx;
201 static const unsigned char zero_ivec[EVP_MAX_BLOCK_LENGTH] = { 0 };
203 static inline int
204 _krb5_evp_iov_should_encrypt(struct krb5_crypto_iov *iov)
206 return (iov->flags == KRB5_CRYPTO_TYPE_DATA
207 || iov->flags == KRB5_CRYPTO_TYPE_HEADER
208 || iov->flags == KRB5_CRYPTO_TYPE_PADDING);
211 * If we have a group of iovecs which have been split up from
212 * a single common buffer, expand the 'current' iovec out to
213 * be as large as possible.
216 static inline void
217 _krb5_evp_iov_cursor_expand(struct _krb5_evp_iov_cursor *cursor)
219 if (cursor->nextidx == cursor->niov)
220 return;
222 while (_krb5_evp_iov_should_encrypt(&cursor->iov[cursor->nextidx])) {
223 if (cursor->iov[cursor->nextidx].data.length != 0 &&
224 ((char *)cursor->current.data + cursor->current.length
225 != cursor->iov[cursor->nextidx].data.data)) {
226 return;
228 cursor->current.length += cursor->iov[cursor->nextidx].data.length;
229 cursor->nextidx++;
232 return;
235 /* Move the cursor along to the start of the next block to be
236 * encrypted */
237 static inline void
238 _krb5_evp_iov_cursor_nextcrypt(struct _krb5_evp_iov_cursor *cursor)
240 for (; cursor->nextidx < cursor->niov; cursor->nextidx++) {
241 if (_krb5_evp_iov_should_encrypt(&cursor->iov[cursor->nextidx])
242 && cursor->iov[cursor->nextidx].data.length != 0) {
243 cursor->current = cursor->iov[cursor->nextidx].data;
244 cursor->nextidx++;
245 _krb5_evp_iov_cursor_expand(cursor);
246 return;
250 cursor->current.length = 0; /* No matches, so we're done here */
253 static inline void
254 _krb5_evp_iov_cursor_init(struct _krb5_evp_iov_cursor *cursor,
255 struct krb5_crypto_iov *iov, int niov)
257 memset(cursor, 0, sizeof(struct _krb5_evp_iov_cursor));
259 cursor->iov = iov;
260 cursor->niov = niov;
261 cursor->nextidx = 0;
263 /* Move along to the first block we're going to be encrypting */
264 _krb5_evp_iov_cursor_nextcrypt(cursor);
267 static inline void
268 _krb5_evp_iov_cursor_advance(struct _krb5_evp_iov_cursor *cursor,
269 size_t amount)
271 while (amount > 0) {
272 if (cursor->current.length > amount) {
273 cursor->current.data = (char *)cursor->current.data + amount;
274 cursor->current.length -= amount;
275 return;
277 amount -= cursor->current.length;
278 _krb5_evp_iov_cursor_nextcrypt(cursor);
282 static inline int
283 _krb5_evp_iov_cursor_done(struct _krb5_evp_iov_cursor *cursor)
285 return (cursor->nextidx == cursor->niov && cursor->current.length == 0);
288 /* Fill a memory buffer with data from one or more iovecs. Doesn't
289 * advance the passed in cursor - use outcursor for the position
290 * at the end
292 static inline void
293 _krb5_evp_iov_cursor_fillbuf(struct _krb5_evp_iov_cursor *cursor,
294 unsigned char *buf, size_t length,
295 struct _krb5_evp_iov_cursor *outcursor)
297 struct _krb5_evp_iov_cursor cursorint;
299 cursorint = *cursor;
301 while (length > 0 && !_krb5_evp_iov_cursor_done(&cursorint)) {
302 if (cursorint.current.length > length) {
303 memcpy(buf, cursorint.current.data, length);
304 _krb5_evp_iov_cursor_advance(&cursorint, length);
305 length = 0;
306 } else {
307 memcpy(buf, cursorint.current.data, cursorint.current.length);
308 length -= cursorint.current.length;
309 buf += cursorint.current.length;
310 _krb5_evp_iov_cursor_nextcrypt(&cursorint);
314 if (outcursor != NULL)
315 *outcursor = cursorint;
318 /* Fill an iovec from a memory buffer. Always advances the cursor to
319 * the end of the filled region
321 static inline void
322 _krb5_evp_iov_cursor_fillvec(struct _krb5_evp_iov_cursor *cursor,
323 unsigned char *buf, size_t length)
325 while (length > 0 && !_krb5_evp_iov_cursor_done(cursor)) {
326 if (cursor->current.length > length) {
327 memcpy(cursor->current.data, buf, length);
328 _krb5_evp_iov_cursor_advance(cursor, length);
329 length = 0;
330 } else {
331 memcpy(cursor->current.data, buf, cursor->current.length);
332 length -= cursor->current.length;
333 buf += cursor->current.length;
334 _krb5_evp_iov_cursor_nextcrypt(cursor);
339 static size_t
340 _krb5_evp_iov_cryptlength(struct krb5_crypto_iov *iov, int niov)
342 int i;
343 size_t length = 0;
345 for (i = 0; i < niov; i++) {
346 if (_krb5_evp_iov_should_encrypt(&iov[i]))
347 length += iov[i].data.length;
350 return length;
354 _krb5_evp_encrypt_iov(krb5_context context,
355 struct _krb5_key_data *key,
356 struct krb5_crypto_iov *iov,
357 int niov,
358 krb5_boolean encryptp,
359 int usage,
360 void *ivec)
362 size_t blocksize, blockmask, wholeblocks;
363 struct _krb5_evp_schedule *ctx = key->schedule->data;
364 unsigned char tmp[EVP_MAX_BLOCK_LENGTH];
365 EVP_CIPHER_CTX *c;
366 struct _krb5_evp_iov_cursor cursor;
368 c = encryptp ? &ctx->ectx : &ctx->dctx;
370 blocksize = EVP_CIPHER_CTX_block_size(c);
372 blockmask = ~(blocksize - 1);
374 if (ivec)
375 EVP_CipherInit_ex(c, NULL, NULL, NULL, ivec, -1);
376 else
377 EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1);
379 _krb5_evp_iov_cursor_init(&cursor, iov, niov);
381 while (!_krb5_evp_iov_cursor_done(&cursor)) {
383 /* Number of bytes of data in this iovec that are in whole blocks */
384 wholeblocks = cursor.current.length & ~blockmask;
386 if (wholeblocks != 0) {
387 EVP_Cipher(c, cursor.current.data,
388 cursor.current.data, wholeblocks);
389 _krb5_evp_iov_cursor_advance(&cursor, wholeblocks);
392 /* If there's a partial block of data remaining in the current
393 * iovec, steal enough from subsequent iovecs to form a whole block */
394 if (cursor.current.length > 0 && cursor.current.length < blocksize) {
395 /* Build up a block's worth of data in tmp, leaving the cursor
396 * pointing at where we started */
397 _krb5_evp_iov_cursor_fillbuf(&cursor, tmp, blocksize, NULL);
399 EVP_Cipher(c, tmp, tmp, blocksize);
401 /* Copy the data in tmp back into the iovecs that it came from,
402 * advancing the cursor */
403 _krb5_evp_iov_cursor_fillvec(&cursor, tmp, blocksize);
407 return 0;
411 _krb5_evp_encrypt_iov_cts(krb5_context context,
412 struct _krb5_key_data *key,
413 struct krb5_crypto_iov *iov,
414 int niov,
415 krb5_boolean encryptp,
416 int usage,
417 void *ivec)
419 size_t blocksize, blockmask, wholeblocks, length;
420 size_t remaining, partiallen;
421 struct _krb5_evp_iov_cursor cursor, lastpos;
422 struct _krb5_evp_schedule *ctx = key->schedule->data;
423 unsigned char tmp[EVP_MAX_BLOCK_LENGTH], tmp2[EVP_MAX_BLOCK_LENGTH];
424 unsigned char tmp3[EVP_MAX_BLOCK_LENGTH], ivec2[EVP_MAX_BLOCK_LENGTH];
425 EVP_CIPHER_CTX *c;
426 int i;
428 c = encryptp ? &ctx->ectx : &ctx->dctx;
430 blocksize = EVP_CIPHER_CTX_block_size(c);
431 blockmask = ~(blocksize - 1);
433 length = _krb5_evp_iov_cryptlength(iov, niov);
435 if (length < blocksize) {
436 krb5_set_error_message(context, EINVAL,
437 "message block too short");
438 return EINVAL;
441 if (length == blocksize)
442 return _krb5_evp_encrypt_iov(context, key, iov, niov,
443 encryptp, usage, ivec);
445 if (ivec)
446 EVP_CipherInit_ex(c, NULL, NULL, NULL, ivec, -1);
447 else
448 EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1);
450 if (encryptp) {
451 /* On our first pass, we want to process everything but the
452 * final partial block */
453 remaining = ((length - 1) & blockmask);
454 partiallen = length - remaining;
456 memset(&lastpos, 0, sizeof(lastpos)); /* Keep the compiler happy */
457 } else {
458 /* Decryption needs to leave 2 whole blocks and a partial for
459 * further processing */
460 if (length > 2 * blocksize) {
461 remaining = (((length - 1) / blocksize) * blocksize) - (blocksize*2);
462 partiallen = length - remaining - (blocksize * 2);
463 } else {
464 remaining = 0;
465 partiallen = length - blocksize;
469 _krb5_evp_iov_cursor_init(&cursor, iov, niov);
470 while (remaining > 0) {
471 /* If the iovec has more data than we need, just use it */
472 if (cursor.current.length >= remaining) {
473 EVP_Cipher(c, cursor.current.data, cursor.current.data, remaining);
475 if (encryptp) {
476 /* We've just encrypted the last block of data. Make a copy
477 * of it (and its location) for the CTS dance, below */
478 lastpos = cursor;
479 _krb5_evp_iov_cursor_advance(&lastpos, remaining - blocksize);
480 memcpy(ivec2, lastpos.current.data, blocksize);
483 _krb5_evp_iov_cursor_advance(&cursor, remaining);
484 remaining = 0;
485 } else {
486 /* Use as much as we can, firstly all of the whole blocks */
487 wholeblocks = cursor.current.length & blockmask;
489 if (wholeblocks > 0) {
490 EVP_Cipher(c, cursor.current.data, cursor.current.data,
491 wholeblocks);
492 _krb5_evp_iov_cursor_advance(&cursor, wholeblocks);
493 remaining -= wholeblocks;
496 /* Then, if we have partial data left, steal enough from subsequent
497 * iovecs to make a whole block */
498 if (cursor.current.length > 0 && cursor.current.length < blocksize) {
499 if (encryptp && remaining == blocksize)
500 lastpos = cursor;
502 _krb5_evp_iov_cursor_fillbuf(&cursor, ivec2, blocksize, NULL);
503 EVP_Cipher(c, ivec2, ivec2, blocksize);
504 _krb5_evp_iov_cursor_fillvec(&cursor, ivec2, blocksize);
506 remaining -= blocksize;
511 /* Encryption */
512 if (encryptp) {
513 /* Copy the partial block into tmp */
514 _krb5_evp_iov_cursor_fillbuf(&cursor, tmp, partiallen, NULL);
516 /* XOR the final partial block with ivec2 */
517 for (i = 0; i < partiallen; i++)
518 tmp[i] = tmp[i] ^ ivec2[i];
519 for (; i < blocksize; i++)
520 tmp[i] = 0 ^ ivec2[i]; /* XOR 0s if partial block exhausted */
522 EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1);
523 EVP_Cipher(c, tmp, tmp, blocksize);
525 _krb5_evp_iov_cursor_fillvec(&lastpos, tmp, blocksize);
526 _krb5_evp_iov_cursor_fillvec(&cursor, ivec2, partiallen);
528 if (ivec)
529 memcpy(ivec, tmp, blocksize);
531 return 0;
534 /* Decryption */
536 /* Make a copy of the 2nd last full ciphertext block in ivec2 before
537 * decrypting it. If no such block exists, use ivec or zero_ivec */
538 if (length <= blocksize * 2) {
539 if (ivec)
540 memcpy(ivec2, ivec, blocksize);
541 else
542 memcpy(ivec2, zero_ivec, blocksize);
543 } else {
544 _krb5_evp_iov_cursor_fillbuf(&cursor, ivec2, blocksize, NULL);
545 EVP_Cipher(c, tmp, ivec2, blocksize);
546 _krb5_evp_iov_cursor_fillvec(&cursor, tmp, blocksize);
549 lastpos = cursor; /* Remember where the last block is */
550 _krb5_evp_iov_cursor_fillbuf(&cursor, tmp, blocksize, &cursor);
551 EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1);
552 EVP_Cipher(c, tmp2, tmp, blocksize); /* tmp eventually becomes output ivec */
554 _krb5_evp_iov_cursor_fillbuf(&cursor, tmp3, partiallen, NULL);
556 memcpy(tmp3 + partiallen, tmp2 + partiallen, blocksize - partiallen); /* xor 0 */
557 for (i = 0; i < partiallen; i++)
558 tmp2[i] = tmp2[i] ^ tmp3[i];
560 _krb5_evp_iov_cursor_fillvec(&cursor, tmp2, partiallen);
562 EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1);
563 EVP_Cipher(c, tmp3, tmp3, blocksize);
565 for (i = 0; i < blocksize; i++)
566 tmp3[i] ^= ivec2[i];
568 _krb5_evp_iov_cursor_fillvec(&lastpos, tmp3, blocksize);
570 if (ivec)
571 memcpy(ivec, tmp, blocksize);
573 return 0;
576 krb5_error_code
577 _krb5_evp_encrypt_cts(krb5_context context,
578 struct _krb5_key_data *key,
579 void *data,
580 size_t len,
581 krb5_boolean encryptp,
582 int usage,
583 void *ivec)
585 size_t i, blocksize;
586 struct _krb5_evp_schedule *ctx = key->schedule->data;
587 unsigned char tmp[EVP_MAX_BLOCK_LENGTH], ivec2[EVP_MAX_BLOCK_LENGTH];
588 EVP_CIPHER_CTX *c;
589 unsigned char *p;
591 c = encryptp ? &ctx->ectx : &ctx->dctx;
593 blocksize = EVP_CIPHER_CTX_block_size(c);
595 if (len < blocksize) {
596 krb5_set_error_message(context, EINVAL,
597 "message block too short");
598 return EINVAL;
599 } else if (len == blocksize) {
600 EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1);
601 EVP_Cipher(c, data, data, len);
602 return 0;
605 if (ivec)
606 EVP_CipherInit_ex(c, NULL, NULL, NULL, ivec, -1);
607 else
608 EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1);
610 if (encryptp) {
612 p = data;
613 i = ((len - 1) / blocksize) * blocksize;
614 EVP_Cipher(c, p, p, i);
615 p += i - blocksize;
616 len -= i;
617 memcpy(ivec2, p, blocksize);
619 for (i = 0; i < len; i++)
620 tmp[i] = p[i + blocksize] ^ ivec2[i];
621 for (; i < blocksize; i++)
622 tmp[i] = 0 ^ ivec2[i];
624 EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1);
625 EVP_Cipher(c, p, tmp, blocksize);
627 memcpy(p + blocksize, ivec2, len);
628 if (ivec)
629 memcpy(ivec, p, blocksize);
630 } else {
631 unsigned char tmp2[EVP_MAX_BLOCK_LENGTH], tmp3[EVP_MAX_BLOCK_LENGTH];
633 p = data;
634 if (len > blocksize * 2) {
635 /* remove last two blocks and round up, decrypt this with cbc, then do cts dance */
636 i = ((((len - blocksize * 2) + blocksize - 1) / blocksize) * blocksize);
637 memcpy(ivec2, p + i - blocksize, blocksize);
638 EVP_Cipher(c, p, p, i);
639 p += i;
640 len -= i + blocksize;
641 } else {
642 if (ivec)
643 memcpy(ivec2, ivec, blocksize);
644 else
645 memcpy(ivec2, zero_ivec, blocksize);
646 len -= blocksize;
649 memcpy(tmp, p, blocksize);
650 EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1);
651 EVP_Cipher(c, tmp2, p, blocksize);
653 memcpy(tmp3, p + blocksize, len);
654 memcpy(tmp3 + len, tmp2 + len, blocksize - len); /* xor 0 */
656 for (i = 0; i < len; i++)
657 p[i + blocksize] = tmp2[i] ^ tmp3[i];
659 EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1);
660 EVP_Cipher(c, p, tmp3, blocksize);
662 for (i = 0; i < blocksize; i++)
663 p[i] ^= ivec2[i];
664 if (ivec)
665 memcpy(ivec, tmp, blocksize);
667 return 0;