libtommath: Fix possible integer overflow CVE-2023-36328
[heimdal.git] / lib / krb5 / crypto-evp.c
blob82237f1513ab6976123ea7740690a91b28ed1a06
1 /*
2 * Copyright (c) 1997 - 2008 Kungliga Tekniska Högskolan
3 * (Royal Institute of Technology, Stockholm, Sweden).
4 * All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the Institute nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
34 #include "krb5_locl.h"
36 void
37 _krb5_evp_schedule(krb5_context context,
38 struct _krb5_key_type *kt,
39 struct _krb5_key_data *kd)
41 struct _krb5_evp_schedule *key = kd->schedule->data;
42 const EVP_CIPHER *c = (*kt->evp)();
44 EVP_CIPHER_CTX_init(&key->ectx);
45 EVP_CIPHER_CTX_init(&key->dctx);
47 EVP_CipherInit_ex(&key->ectx, c, NULL, kd->key->keyvalue.data, NULL, 1);
48 EVP_CipherInit_ex(&key->dctx, c, NULL, kd->key->keyvalue.data, NULL, 0);
51 void
52 _krb5_evp_cleanup(krb5_context context, struct _krb5_key_data *kd)
54 struct _krb5_evp_schedule *key = kd->schedule->data;
55 EVP_CIPHER_CTX_cleanup(&key->ectx);
56 EVP_CIPHER_CTX_cleanup(&key->dctx);
59 int
60 _krb5_evp_digest_iov(krb5_crypto crypto,
61 const struct krb5_crypto_iov *iov,
62 int niov,
63 void *hash,
64 unsigned int *hsize,
65 const EVP_MD *md,
66 ENGINE *engine)
68 EVP_MD_CTX *ctx;
69 int ret, i;
70 krb5_data current = {0,0};
72 if (crypto != NULL) {
73 if (crypto->mdctx == NULL)
74 crypto->mdctx = EVP_MD_CTX_create();
75 if (crypto->mdctx == NULL)
76 return 0;
77 ctx = crypto->mdctx;
78 } else
79 ctx = EVP_MD_CTX_create();
81 ret = EVP_DigestInit_ex(ctx, md, engine);
82 if (ret != 1)
83 goto out;
85 /* Minimize EVP calls by coalescing contiguous iovec elements */
86 for (i = 0; i < niov; i++) {
87 if (_krb5_crypto_iov_should_sign(&iov[i])) {
88 if (current.data &&
89 (char *)current.data + current.length == iov[i].data.data) {
90 current.length += iov[i].data.length;
91 } else {
92 if (current.data) {
93 ret = EVP_DigestUpdate(ctx, current.data, current.length);
94 if (ret != 1)
95 goto out;
97 current = iov[i].data;
102 if (current.data) {
103 ret = EVP_DigestUpdate(ctx, current.data, current.length);
104 if (ret != 1)
105 goto out;
108 ret = EVP_DigestFinal_ex(ctx, hash, hsize);
110 out:
111 if (crypto == NULL)
112 EVP_MD_CTX_destroy(ctx);
114 return ret;
117 krb5_error_code
118 _krb5_evp_hmac_iov(krb5_context context,
119 krb5_crypto crypto,
120 struct _krb5_key_data *key,
121 const struct krb5_crypto_iov *iov,
122 int niov,
123 void *hmac,
124 unsigned int *hmaclen,
125 const EVP_MD *md,
126 ENGINE *engine)
128 HMAC_CTX *ctx;
129 krb5_data current = {0, NULL};
130 int i;
132 if (crypto != NULL) {
133 if (crypto->hmacctx == NULL)
134 crypto->hmacctx = HMAC_CTX_new();
135 ctx = crypto->hmacctx;
136 } else {
137 ctx = HMAC_CTX_new();
139 if (ctx == NULL)
140 return krb5_enomem(context);
142 if (HMAC_Init_ex(ctx, key->key->keyvalue.data, key->key->keyvalue.length,
143 md, engine) == 0) {
144 HMAC_CTX_free(ctx);
145 return krb5_enomem(context);
148 for (i = 0; i < niov; i++) {
149 if (_krb5_crypto_iov_should_sign(&iov[i])) {
150 if (current.data &&
151 (char *)current.data + current.length == iov[i].data.data) {
152 current.length += iov[i].data.length;
153 } else {
154 if (current.data)
155 HMAC_Update(ctx, current.data, current.length);
156 current = iov[i].data;
161 if (current.data)
162 HMAC_Update(ctx, current.data, current.length);
164 HMAC_Final(ctx, hmac, hmaclen);
166 if (crypto == NULL)
167 HMAC_CTX_free(ctx);
169 return 0;
172 krb5_error_code
173 _krb5_evp_encrypt(krb5_context context,
174 struct _krb5_key_data *key,
175 void *data,
176 size_t len,
177 krb5_boolean encryptp,
178 int usage,
179 void *ivec)
181 struct _krb5_evp_schedule *ctx = key->schedule->data;
182 EVP_CIPHER_CTX *c;
183 c = encryptp ? &ctx->ectx : &ctx->dctx;
184 if (ivec == NULL) {
185 /* alloca ? */
186 size_t len2 = EVP_CIPHER_CTX_iv_length(c);
187 void *loiv = malloc(len2);
188 if (loiv == NULL)
189 return krb5_enomem(context);
190 memset(loiv, 0, len2);
191 EVP_CipherInit_ex(c, NULL, NULL, NULL, loiv, -1);
192 free(loiv);
193 } else
194 EVP_CipherInit_ex(c, NULL, NULL, NULL, ivec, -1);
195 EVP_Cipher(c, data, data, len);
196 return 0;
199 struct _krb5_evp_iov_cursor
201 struct krb5_crypto_iov *iov;
202 int niov;
203 krb5_data current;
204 int nextidx;
207 static const unsigned char zero_ivec[EVP_MAX_BLOCK_LENGTH] = { 0 };
209 static inline int
210 _krb5_evp_iov_should_encrypt(struct krb5_crypto_iov *iov)
212 return (iov->flags == KRB5_CRYPTO_TYPE_DATA
213 || iov->flags == KRB5_CRYPTO_TYPE_HEADER
214 || iov->flags == KRB5_CRYPTO_TYPE_PADDING);
217 * If we have a group of iovecs which have been split up from
218 * a single common buffer, expand the 'current' iovec out to
219 * be as large as possible.
222 static inline void
223 _krb5_evp_iov_cursor_expand(struct _krb5_evp_iov_cursor *cursor)
225 if (cursor->nextidx == cursor->niov)
226 return;
228 while (_krb5_evp_iov_should_encrypt(&cursor->iov[cursor->nextidx])) {
229 if (cursor->iov[cursor->nextidx].data.length != 0 &&
230 ((char *)cursor->current.data + cursor->current.length
231 != cursor->iov[cursor->nextidx].data.data)) {
232 return;
234 cursor->current.length += cursor->iov[cursor->nextidx].data.length;
235 cursor->nextidx++;
238 return;
241 /* Move the cursor along to the start of the next block to be
242 * encrypted */
243 static inline void
244 _krb5_evp_iov_cursor_nextcrypt(struct _krb5_evp_iov_cursor *cursor)
246 for (; cursor->nextidx < cursor->niov; cursor->nextidx++) {
247 if (_krb5_evp_iov_should_encrypt(&cursor->iov[cursor->nextidx])
248 && cursor->iov[cursor->nextidx].data.length != 0) {
249 cursor->current = cursor->iov[cursor->nextidx].data;
250 cursor->nextidx++;
251 _krb5_evp_iov_cursor_expand(cursor);
252 return;
256 cursor->current.length = 0; /* No matches, so we're done here */
259 static inline void
260 _krb5_evp_iov_cursor_init(struct _krb5_evp_iov_cursor *cursor,
261 struct krb5_crypto_iov *iov, int niov)
263 memset(cursor, 0, sizeof(struct _krb5_evp_iov_cursor));
265 cursor->iov = iov;
266 cursor->niov = niov;
267 cursor->nextidx = 0;
269 /* Move along to the first block we're going to be encrypting */
270 _krb5_evp_iov_cursor_nextcrypt(cursor);
273 static inline void
274 _krb5_evp_iov_cursor_advance(struct _krb5_evp_iov_cursor *cursor,
275 size_t amount)
277 while (amount > 0) {
278 if (cursor->current.length > amount) {
279 cursor->current.data = (char *)cursor->current.data + amount;
280 cursor->current.length -= amount;
281 return;
283 amount -= cursor->current.length;
284 _krb5_evp_iov_cursor_nextcrypt(cursor);
288 static inline int
289 _krb5_evp_iov_cursor_done(struct _krb5_evp_iov_cursor *cursor)
291 return (cursor->nextidx == cursor->niov && cursor->current.length == 0);
294 /* Fill a memory buffer with data from one or more iovecs. Doesn't
295 * advance the passed in cursor - use outcursor for the position
296 * at the end
298 static inline void
299 _krb5_evp_iov_cursor_fillbuf(struct _krb5_evp_iov_cursor *cursor,
300 unsigned char *buf, size_t length,
301 struct _krb5_evp_iov_cursor *outcursor)
303 struct _krb5_evp_iov_cursor cursorint;
305 cursorint = *cursor;
307 while (length > 0 && !_krb5_evp_iov_cursor_done(&cursorint)) {
308 if (cursorint.current.length > length) {
309 memcpy(buf, cursorint.current.data, length);
310 _krb5_evp_iov_cursor_advance(&cursorint, length);
311 length = 0;
312 } else {
313 memcpy(buf, cursorint.current.data, cursorint.current.length);
314 length -= cursorint.current.length;
315 buf += cursorint.current.length;
316 _krb5_evp_iov_cursor_nextcrypt(&cursorint);
320 if (outcursor != NULL)
321 *outcursor = cursorint;
324 /* Fill an iovec from a memory buffer. Always advances the cursor to
325 * the end of the filled region
327 static inline void
328 _krb5_evp_iov_cursor_fillvec(struct _krb5_evp_iov_cursor *cursor,
329 unsigned char *buf, size_t length)
331 while (length > 0 && !_krb5_evp_iov_cursor_done(cursor)) {
332 if (cursor->current.length > length) {
333 memcpy(cursor->current.data, buf, length);
334 _krb5_evp_iov_cursor_advance(cursor, length);
335 length = 0;
336 } else {
337 memcpy(cursor->current.data, buf, cursor->current.length);
338 length -= cursor->current.length;
339 buf += cursor->current.length;
340 _krb5_evp_iov_cursor_nextcrypt(cursor);
345 static size_t
346 _krb5_evp_iov_cryptlength(struct krb5_crypto_iov *iov, int niov)
348 int i;
349 size_t length = 0;
351 for (i = 0; i < niov; i++) {
352 if (_krb5_evp_iov_should_encrypt(&iov[i]))
353 length += iov[i].data.length;
356 return length;
360 _krb5_evp_encrypt_iov(krb5_context context,
361 struct _krb5_key_data *key,
362 struct krb5_crypto_iov *iov,
363 int niov,
364 krb5_boolean encryptp,
365 int usage,
366 void *ivec)
368 size_t blocksize, blockmask, wholeblocks;
369 struct _krb5_evp_schedule *ctx = key->schedule->data;
370 unsigned char tmp[EVP_MAX_BLOCK_LENGTH];
371 EVP_CIPHER_CTX *c;
372 struct _krb5_evp_iov_cursor cursor;
374 c = encryptp ? &ctx->ectx : &ctx->dctx;
376 blocksize = EVP_CIPHER_CTX_block_size(c);
378 blockmask = ~(blocksize - 1);
380 if (ivec)
381 EVP_CipherInit_ex(c, NULL, NULL, NULL, ivec, -1);
382 else
383 EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1);
385 _krb5_evp_iov_cursor_init(&cursor, iov, niov);
387 while (!_krb5_evp_iov_cursor_done(&cursor)) {
389 /* Number of bytes of data in this iovec that are in whole blocks */
390 wholeblocks = cursor.current.length & ~blockmask;
392 if (wholeblocks != 0) {
393 EVP_Cipher(c, cursor.current.data,
394 cursor.current.data, wholeblocks);
395 _krb5_evp_iov_cursor_advance(&cursor, wholeblocks);
398 /* If there's a partial block of data remaining in the current
399 * iovec, steal enough from subsequent iovecs to form a whole block */
400 if (cursor.current.length > 0 && cursor.current.length < blocksize) {
401 /* Build up a block's worth of data in tmp, leaving the cursor
402 * pointing at where we started */
403 _krb5_evp_iov_cursor_fillbuf(&cursor, tmp, blocksize, NULL);
405 EVP_Cipher(c, tmp, tmp, blocksize);
407 /* Copy the data in tmp back into the iovecs that it came from,
408 * advancing the cursor */
409 _krb5_evp_iov_cursor_fillvec(&cursor, tmp, blocksize);
413 return 0;
417 _krb5_evp_encrypt_iov_cts(krb5_context context,
418 struct _krb5_key_data *key,
419 struct krb5_crypto_iov *iov,
420 int niov,
421 krb5_boolean encryptp,
422 int usage,
423 void *ivec)
425 size_t blocksize, blockmask, wholeblocks, length;
426 size_t remaining, partiallen;
427 struct _krb5_evp_iov_cursor cursor, lastpos;
428 struct _krb5_evp_schedule *ctx = key->schedule->data;
429 unsigned char tmp[EVP_MAX_BLOCK_LENGTH], tmp2[EVP_MAX_BLOCK_LENGTH];
430 unsigned char tmp3[EVP_MAX_BLOCK_LENGTH], ivec2[EVP_MAX_BLOCK_LENGTH];
431 EVP_CIPHER_CTX *c;
432 int i;
434 c = encryptp ? &ctx->ectx : &ctx->dctx;
436 blocksize = EVP_CIPHER_CTX_block_size(c);
437 blockmask = ~(blocksize - 1);
439 length = _krb5_evp_iov_cryptlength(iov, niov);
441 if (length < blocksize) {
442 krb5_set_error_message(context, EINVAL,
443 "message block too short");
444 return EINVAL;
447 if (length == blocksize)
448 return _krb5_evp_encrypt_iov(context, key, iov, niov,
449 encryptp, usage, ivec);
451 if (ivec)
452 EVP_CipherInit_ex(c, NULL, NULL, NULL, ivec, -1);
453 else
454 EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1);
456 if (encryptp) {
457 /* On our first pass, we want to process everything but the
458 * final partial block */
459 remaining = ((length - 1) & blockmask);
460 partiallen = length - remaining;
462 memset(&lastpos, 0, sizeof(lastpos)); /* Keep the compiler happy */
463 } else {
464 /* Decryption needs to leave 2 whole blocks and a partial for
465 * further processing */
466 if (length > 2 * blocksize) {
467 remaining = (((length - 1) / blocksize) * blocksize) - (blocksize*2);
468 partiallen = length - remaining - (blocksize * 2);
469 } else {
470 remaining = 0;
471 partiallen = length - blocksize;
475 _krb5_evp_iov_cursor_init(&cursor, iov, niov);
476 while (remaining > 0) {
477 /* If the iovec has more data than we need, just use it */
478 if (cursor.current.length >= remaining) {
479 EVP_Cipher(c, cursor.current.data, cursor.current.data, remaining);
481 if (encryptp) {
482 /* We've just encrypted the last block of data. Make a copy
483 * of it (and its location) for the CTS dance, below */
484 lastpos = cursor;
485 _krb5_evp_iov_cursor_advance(&lastpos, remaining - blocksize);
486 memcpy(ivec2, lastpos.current.data, blocksize);
489 _krb5_evp_iov_cursor_advance(&cursor, remaining);
490 remaining = 0;
491 } else {
492 /* Use as much as we can, firstly all of the whole blocks */
493 wholeblocks = cursor.current.length & blockmask;
495 if (wholeblocks > 0) {
496 EVP_Cipher(c, cursor.current.data, cursor.current.data,
497 wholeblocks);
498 _krb5_evp_iov_cursor_advance(&cursor, wholeblocks);
499 remaining -= wholeblocks;
502 /* Then, if we have partial data left, steal enough from subsequent
503 * iovecs to make a whole block */
504 if (cursor.current.length > 0 && cursor.current.length < blocksize) {
505 if (encryptp && remaining == blocksize)
506 lastpos = cursor;
508 _krb5_evp_iov_cursor_fillbuf(&cursor, ivec2, blocksize, NULL);
509 EVP_Cipher(c, ivec2, ivec2, blocksize);
510 _krb5_evp_iov_cursor_fillvec(&cursor, ivec2, blocksize);
512 remaining -= blocksize;
517 /* Encryption */
518 if (encryptp) {
519 /* Copy the partial block into tmp */
520 _krb5_evp_iov_cursor_fillbuf(&cursor, tmp, partiallen, NULL);
522 /* XOR the final partial block with ivec2 */
523 for (i = 0; i < partiallen; i++)
524 tmp[i] = tmp[i] ^ ivec2[i];
525 for (; i < blocksize; i++)
526 tmp[i] = 0 ^ ivec2[i]; /* XOR 0s if partial block exhausted */
528 EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1);
529 EVP_Cipher(c, tmp, tmp, blocksize);
531 _krb5_evp_iov_cursor_fillvec(&lastpos, tmp, blocksize);
532 _krb5_evp_iov_cursor_fillvec(&cursor, ivec2, partiallen);
534 if (ivec)
535 memcpy(ivec, tmp, blocksize);
537 return 0;
540 /* Decryption */
542 /* Make a copy of the 2nd last full ciphertext block in ivec2 before
543 * decrypting it. If no such block exists, use ivec or zero_ivec */
544 if (length <= blocksize * 2) {
545 if (ivec)
546 memcpy(ivec2, ivec, blocksize);
547 else
548 memcpy(ivec2, zero_ivec, blocksize);
549 } else {
550 _krb5_evp_iov_cursor_fillbuf(&cursor, ivec2, blocksize, NULL);
551 EVP_Cipher(c, tmp, ivec2, blocksize);
552 _krb5_evp_iov_cursor_fillvec(&cursor, tmp, blocksize);
555 lastpos = cursor; /* Remember where the last block is */
556 _krb5_evp_iov_cursor_fillbuf(&cursor, tmp, blocksize, &cursor);
557 EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1);
558 EVP_Cipher(c, tmp2, tmp, blocksize); /* tmp eventually becomes output ivec */
560 _krb5_evp_iov_cursor_fillbuf(&cursor, tmp3, partiallen, NULL);
562 memcpy(tmp3 + partiallen, tmp2 + partiallen, blocksize - partiallen); /* xor 0 */
563 for (i = 0; i < partiallen; i++)
564 tmp2[i] = tmp2[i] ^ tmp3[i];
566 _krb5_evp_iov_cursor_fillvec(&cursor, tmp2, partiallen);
568 EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1);
569 EVP_Cipher(c, tmp3, tmp3, blocksize);
571 for (i = 0; i < blocksize; i++)
572 tmp3[i] ^= ivec2[i];
574 _krb5_evp_iov_cursor_fillvec(&lastpos, tmp3, blocksize);
576 if (ivec)
577 memcpy(ivec, tmp, blocksize);
579 return 0;
582 krb5_error_code
583 _krb5_evp_encrypt_cts(krb5_context context,
584 struct _krb5_key_data *key,
585 void *data,
586 size_t len,
587 krb5_boolean encryptp,
588 int usage,
589 void *ivec)
591 size_t i, blocksize;
592 struct _krb5_evp_schedule *ctx = key->schedule->data;
593 unsigned char tmp[EVP_MAX_BLOCK_LENGTH], ivec2[EVP_MAX_BLOCK_LENGTH];
594 EVP_CIPHER_CTX *c;
595 unsigned char *p;
597 c = encryptp ? &ctx->ectx : &ctx->dctx;
599 blocksize = EVP_CIPHER_CTX_block_size(c);
601 if (len < blocksize) {
602 krb5_set_error_message(context, EINVAL,
603 "message block too short");
604 return EINVAL;
605 } else if (len == blocksize) {
606 EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1);
607 EVP_Cipher(c, data, data, len);
608 return 0;
611 if (ivec)
612 EVP_CipherInit_ex(c, NULL, NULL, NULL, ivec, -1);
613 else
614 EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1);
616 if (encryptp) {
618 p = data;
619 i = ((len - 1) / blocksize) * blocksize;
620 EVP_Cipher(c, p, p, i);
621 p += i - blocksize;
622 len -= i;
623 memcpy(ivec2, p, blocksize);
625 for (i = 0; i < len; i++)
626 tmp[i] = p[i + blocksize] ^ ivec2[i];
627 for (; i < blocksize; i++)
628 tmp[i] = 0 ^ ivec2[i];
630 EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1);
631 EVP_Cipher(c, p, tmp, blocksize);
633 memcpy(p + blocksize, ivec2, len);
634 if (ivec)
635 memcpy(ivec, p, blocksize);
636 } else {
637 unsigned char tmp2[EVP_MAX_BLOCK_LENGTH], tmp3[EVP_MAX_BLOCK_LENGTH];
639 p = data;
640 if (len > blocksize * 2) {
641 /* remove last two blocks and round up, decrypt this with cbc, then do cts dance */
642 i = ((((len - blocksize * 2) + blocksize - 1) / blocksize) * blocksize);
643 memcpy(ivec2, p + i - blocksize, blocksize);
644 EVP_Cipher(c, p, p, i);
645 p += i;
646 len -= i + blocksize;
647 } else {
648 if (ivec)
649 memcpy(ivec2, ivec, blocksize);
650 else
651 memcpy(ivec2, zero_ivec, blocksize);
652 len -= blocksize;
655 memcpy(tmp, p, blocksize);
656 EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1);
657 EVP_Cipher(c, tmp2, p, blocksize);
659 memcpy(tmp3, p + blocksize, len);
660 memcpy(tmp3 + len, tmp2 + len, blocksize - len); /* xor 0 */
662 for (i = 0; i < len; i++)
663 p[i + blocksize] = tmp2[i] ^ tmp3[i];
665 EVP_CipherInit_ex(c, NULL, NULL, NULL, zero_ivec, -1);
666 EVP_Cipher(c, p, tmp3, blocksize);
668 for (i = 0; i < blocksize; i++)
669 p[i] ^= ivec2[i];
670 if (ivec)
671 memcpy(ivec, tmp, blocksize);
673 return 0;