rename find_buffer
[heimdal.git] / lib / gssapi / krb5 / cfx.c
blobc9e1646e7058a5b5782548b38567af93a3a967bf
1 /*
2 * Copyright (c) 2003, PADL Software Pty Ltd.
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of PADL Software nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY PADL SOFTWARE AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL PADL SOFTWARE OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
33 #include "gsskrb5_locl.h"
36 * Implementation of RFC 4121
39 #define CFXSentByAcceptor (1 << 0)
40 #define CFXSealed (1 << 1)
41 #define CFXAcceptorSubkey (1 << 2)
43 krb5_error_code
44 _gsskrb5cfx_wrap_length_cfx(krb5_context context,
45 krb5_crypto crypto,
46 int conf_req_flag,
47 size_t input_length,
48 size_t *output_length,
49 size_t *cksumsize,
50 uint16_t *padlength)
52 krb5_error_code ret;
53 krb5_cksumtype type;
55 /* 16-byte header is always first */
56 *output_length = sizeof(gss_cfx_wrap_token_desc);
57 *padlength = 0;
59 ret = krb5_crypto_get_checksum_type(context, crypto, &type);
60 if (ret)
61 return ret;
63 ret = krb5_checksumsize(context, type, cksumsize);
64 if (ret)
65 return ret;
67 if (conf_req_flag) {
68 size_t padsize;
70 /* Header is concatenated with data before encryption */
71 input_length += sizeof(gss_cfx_wrap_token_desc);
73 ret = krb5_crypto_getpadsize(context, crypto, &padsize);
74 if (ret) {
75 return ret;
77 if (padsize > 1) {
78 /* XXX check this */
79 *padlength = padsize - (input_length % padsize);
81 /* We add the pad ourselves (noted here for completeness only) */
82 input_length += *padlength;
85 *output_length += krb5_get_wrapped_length(context,
86 crypto, input_length);
87 } else {
88 /* Checksum is concatenated with data */
89 *output_length += input_length + *cksumsize;
92 assert(*output_length > input_length);
94 return 0;
97 OM_uint32
98 _gssapi_wrap_size_cfx(OM_uint32 *minor_status,
99 const gsskrb5_ctx ctx,
100 krb5_context context,
101 int conf_req_flag,
102 gss_qop_t qop_req,
103 OM_uint32 req_output_size,
104 OM_uint32 *max_input_size)
106 krb5_error_code ret;
108 *max_input_size = 0;
110 /* 16-byte header is always first */
111 if (req_output_size < 16)
112 return 0;
113 req_output_size -= 16;
115 if (conf_req_flag) {
116 size_t wrapped_size, sz;
118 wrapped_size = req_output_size + 1;
119 do {
120 wrapped_size--;
121 sz = krb5_get_wrapped_length(context,
122 ctx->crypto, wrapped_size);
123 } while (wrapped_size && sz > req_output_size);
124 if (wrapped_size == 0)
125 return 0;
127 /* inner header */
128 if (wrapped_size < 16)
129 return 0;
131 wrapped_size -= 16;
133 *max_input_size = wrapped_size;
134 } else {
135 krb5_cksumtype type;
136 size_t cksumsize;
138 ret = krb5_crypto_get_checksum_type(context, ctx->crypto, &type);
139 if (ret)
140 return ret;
142 ret = krb5_checksumsize(context, type, &cksumsize);
143 if (ret)
144 return ret;
146 if (req_output_size < cksumsize)
147 return 0;
149 /* Checksum is concatenated with data */
150 *max_input_size = req_output_size - cksumsize;
153 return 0;
157 * Rotate "rrc" bytes to the front or back
160 static krb5_error_code
161 rrc_rotate(void *data, size_t len, uint16_t rrc, krb5_boolean unrotate)
163 u_char *tmp, buf[256];
164 size_t left;
166 if (len == 0)
167 return 0;
169 rrc %= len;
171 if (rrc == 0)
172 return 0;
174 left = len - rrc;
176 if (rrc <= sizeof(buf)) {
177 tmp = buf;
178 } else {
179 tmp = malloc(rrc);
180 if (tmp == NULL)
181 return ENOMEM;
184 if (unrotate) {
185 memcpy(tmp, data, rrc);
186 memmove(data, (u_char *)data + rrc, left);
187 memcpy((u_char *)data + left, tmp, rrc);
188 } else {
189 memcpy(tmp, (u_char *)data + left, rrc);
190 memmove((u_char *)data + rrc, data, left);
191 memcpy(data, tmp, rrc);
194 if (rrc > sizeof(buf))
195 free(tmp);
197 return 0;
200 gss_iov_buffer_desc *
201 _gk_find_buffer(gss_iov_buffer_desc *iov, int iov_count, OM_uint32 type)
203 int i;
205 for (i = 0; i < iov_count; i++)
206 if (type == GSS_IOV_BUFFER_TYPE(iov[i].type))
207 return &iov[i];
208 return NULL;
211 static OM_uint32
212 allocate_buffer(OM_uint32 *minor_status, gss_iov_buffer_desc *buffer, size_t size)
214 if (buffer->type & GSS_IOV_BUFFER_TYPE_FLAG_ALLOCATED) {
215 if (buffer->buffer.length == size)
216 return GSS_S_COMPLETE;
217 free(buffer->buffer.value);
220 buffer->buffer.value = malloc(size);
221 buffer->buffer.length = size;
222 if (buffer->buffer.value == NULL) {
223 *minor_status = ENOMEM;
224 return GSS_S_FAILURE;
226 buffer->type |= GSS_IOV_BUFFER_TYPE_FLAG_ALLOCATED;
228 return GSS_S_COMPLETE;
233 OM_uint32
234 _gssapi_wrap_cfx_iov(OM_uint32 *minor_status,
235 gsskrb5_ctx ctx,
236 krb5_context context,
237 int conf_req_flag,
238 int *conf_state,
239 gss_iov_buffer_desc *iov,
240 int iov_count)
242 OM_uint32 major_status, junk;
243 gss_iov_buffer_desc *header, *trailer, *padding;
244 size_t gsshsize, k5hsize;
245 size_t gsstsize, k5tsize;
246 size_t i, padlength, rrc = 0, ec = 0;
247 gss_cfx_wrap_token token;
248 krb5_error_code ret;
249 int32_t seq_number;
250 unsigned usage;
251 krb5_crypto_iov *data = NULL;
252 int paddingoffset = 0;
254 header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
255 if (header == NULL) {
256 *minor_status = EINVAL;
257 return GSS_S_FAILURE;
260 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_PADDING, &padlength);
262 padding = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
263 if (padlength != 0 && padding == NULL) {
264 *minor_status = EINVAL;
265 return GSS_S_FAILURE;
268 trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
270 if (conf_req_flag) {
271 ec = padlength;
273 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_TRAILER, &k5tsize);
274 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_HEADER, &k5hsize);
276 gsshsize = k5hsize + sizeof(*token);
277 gsstsize = k5tsize + sizeof(*token); /* encrypted token stored in trailer */
279 } else {
281 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_CHECKSUM, &k5tsize);
283 gsshsize = sizeof(*token);
284 gsstsize = k5tsize;
291 if (trailer == NULL) {
292 /* conf_req_flag=0 doesn't support DCE_STYLE */
293 if (conf_req_flag == 0) {
294 *minor_status = EINVAL;
295 major_status = GSS_S_FAILURE;
296 goto failure;
298 rrc = gsstsize;
299 if (IS_DCE_STYLE(ctx))
300 rrc -= ec;
301 gsshsize += gsstsize;
302 gsstsize = 0;
303 } else if (GSS_IOV_BUFFER_FLAGS(trailer->type) & GSS_IOV_BUFFER_TYPE_FLAG_ALLOCATE) {
304 major_status = allocate_buffer(minor_status, trailer, gsstsize);
305 if (major_status)
306 goto failure;
307 } else if (trailer->buffer.length < gsstsize) {
308 *minor_status = KRB5_BAD_MSIZE;
309 major_status = GSS_S_FAILURE;
310 goto failure;
311 } else
312 trailer->buffer.length = gsstsize;
318 if (GSS_IOV_BUFFER_FLAGS(header->type) & GSS_IOV_BUFFER_TYPE_FLAG_ALLOCATE) {
319 major_status = allocate_buffer(minor_status, header, gsshsize);
320 if (major_status != GSS_S_COMPLETE)
321 goto failure;
322 } else if (header->buffer.length < gsshsize) {
323 *minor_status = KRB5_BAD_MSIZE;
324 major_status = GSS_S_FAILURE;
325 goto failure;
326 } else
327 header->buffer.length = gsshsize;
329 token = (gss_cfx_wrap_token)header->buffer.value;
331 token->TOK_ID[0] = 0x05;
332 token->TOK_ID[1] = 0x04;
333 token->Flags = 0;
334 token->Filler = 0xFF;
336 if (ctx->more_flags & ACCEPTOR_SUBKEY)
337 token->Flags |= CFXAcceptorSubkey;
339 if (ctx->more_flags & LOCAL)
340 usage = KRB5_KU_USAGE_INITIATOR_SEAL;
341 else
342 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
344 if (conf_req_flag) {
346 * In Wrap tokens with confidentiality, the EC field is
347 * used to encode the size (in bytes) of the random filler.
349 token->Flags |= CFXSealed;
350 token->EC[0] = (padlength >> 8) & 0xFF;
351 token->EC[1] = (padlength >> 0) & 0xFF;
353 } else {
355 * In Wrap tokens without confidentiality, the EC field is
356 * used to encode the size (in bytes) of the trailing
357 * checksum.
359 * This is not used in the checksum calcuation itself,
360 * because the checksum length could potentially vary
361 * depending on the data length.
363 token->EC[0] = 0;
364 token->EC[1] = 0;
368 * In Wrap tokens that provide for confidentiality, the RRC
369 * field in the header contains the hex value 00 00 before
370 * encryption.
372 * In Wrap tokens that do not provide for confidentiality,
373 * both the EC and RRC fields in the appended checksum
374 * contain the hex value 00 00 for the purpose of calculating
375 * the checksum.
377 token->RRC[0] = 0;
378 token->RRC[1] = 0;
380 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
381 krb5_auth_con_getlocalseqnumber(context,
382 ctx->auth_context,
383 &seq_number);
384 _gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]);
385 _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
386 krb5_auth_con_setlocalseqnumber(context,
387 ctx->auth_context,
388 ++seq_number);
389 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
391 data = calloc(iov_count + 3, sizeof(data[0]));
392 if (data == NULL) {
393 *minor_status = ENOMEM;
394 major_status = GSS_S_FAILURE;
395 goto failure;
398 if (conf_req_flag) {
400 plain packet:
402 {"header" | encrypt(plaintext-data | padding | E"header")}
404 Expanded, this is with with RRC = 0:
406 {"header" | krb5-header | plaintext-data | padding | E"header" | krb5-trailer }
408 In DCE-RPC mode == no trailer: RRC = gss "trailer" == length(padding | E"header" | krb5-trailer)
410 {"header" | padding | E"header" | krb5-trailer | krb5-header | plaintext-data }
413 i = 0;
414 data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
415 data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
416 data[i].data.length = k5hsize;
418 for (i = 1; i < iov_count + 1; i++) {
419 switch (GSS_IOV_BUFFER_TYPE(iov[i - 1].type)) {
420 case GSS_IOV_BUFFER_TYPE_DATA:
421 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
422 break;
423 case GSS_IOV_BUFFER_TYPE_PADDING:
424 data[i].flags = KRB5_CRYPTO_TYPE_PADDING;
425 paddingoffset = i;
426 break;
427 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
428 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
429 break;
430 default:
431 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
432 break;
434 data[i].data.length = iov[i - 1].buffer.length;
435 data[i].data.data = iov[i - 1].buffer.value;
439 * Any necessary padding is added here to ensure that the
440 * encrypted token header is always at the end of the
441 * ciphertext.
444 /* XXX KRB5_CRYPTO_TYPE_PADDING */
446 /* encrypted CFX header in trailer (or after the header if in
447 DCE mode). Copy in header into E"header"
449 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
450 if (trailer)
451 data[i].data.data = trailer->buffer.value;
452 else
453 data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize - k5tsize - sizeof(*token);
455 data[i].data.length = sizeof(*token);
456 memcpy(data[i].data.data, token, sizeof(*token));
457 i++;
459 /* Kerberos trailer comes after the gss trailer */
460 data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
461 data[i].data.data = ((uint8_t *)data[i-1].data.data) + sizeof(*token);
462 data[i].data.length = k5tsize;
463 i++;
465 ret = krb5_encrypt_iov_ivec(context, ctx->crypto, usage, data, i, NULL);
466 if (ret != 0) {
467 *minor_status = ret;
468 major_status = GSS_S_FAILURE;
469 goto failure;
472 if (rrc) {
473 token->RRC[0] = (rrc >> 8) & 0xFF;
474 token->RRC[1] = (rrc >> 0) & 0xFF;
477 if (paddingoffset)
478 padding->buffer.length = data[paddingoffset].data.length;
480 } else {
482 plain packet:
484 {data | "header" | gss-trailer (krb5 checksum)
486 don't do RRC != 0
490 for (i = 0; i < iov_count; i++) {
491 switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
492 case GSS_IOV_BUFFER_TYPE_DATA:
493 case GSS_IOV_BUFFER_TYPE_PADDING:
494 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
495 break;
496 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
497 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
498 break;
499 default:
500 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
501 break;
503 data[i].data.length = iov[i].buffer.length;
504 data[i].data.data = iov[i].buffer.value;
507 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
508 data[i].data.data = header->buffer.value;
509 data[i].data.length = header->buffer.length;
510 i++;
512 data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
513 data[i].data.data = trailer->buffer.value;
514 data[i].data.length = trailer->buffer.length;
515 i++;
517 ret = krb5_create_checksum_iov(context, ctx->crypto, usage, data, i, NULL);
518 if (ret) {
519 *minor_status = ret;
520 major_status = GSS_S_FAILURE;
521 goto failure;
524 token->EC[0] = (trailer->buffer.length >> 8) & 0xFF;
525 token->EC[1] = (trailer->buffer.length >> 0) & 0xFF;
528 if (conf_state != NULL)
529 *conf_state = conf_req_flag;
531 free(data);
533 *minor_status = 0;
534 return GSS_S_COMPLETE;
536 failure:
537 if (data)
538 free(data);
540 gss_release_iov_buffer(&junk, iov, iov_count);
542 return major_status;
545 /* This is slowpath */
546 static OM_uint32
547 unrotate_iov(OM_uint32 *minor_status, size_t rrc, gss_iov_buffer_desc *iov, int iov_count)
549 uint8_t *p, *q;
550 size_t len = 0, skip;
551 int i;
553 for (i = 0; i < iov_count; i++)
554 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
555 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
556 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
557 len += iov[i].buffer.length;
559 p = malloc(len);
560 if (p == NULL) {
561 *minor_status = ENOMEM;
562 return GSS_S_FAILURE;
564 q = p;
566 /* copy up */
568 for (i = 0; i < iov_count; i++) {
569 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
570 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
571 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
573 memcpy(q, iov[i].buffer.value, iov[i].buffer.length);
574 q += iov[i].buffer.length;
577 assert((q - p) == len);
579 /* unrotate first part */
580 q = p + rrc;
581 skip = rrc;
582 for (i = 0; i < iov_count; i++) {
583 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
584 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
585 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
587 if (iov[i].buffer.length <= skip) {
588 skip -= iov[i].buffer.length;
589 } else {
590 memcpy(((uint8_t *)iov[i].buffer.value) + skip, q, iov[i].buffer.length - skip);
591 q += iov[i].buffer.length - skip;
592 skip = 0;
596 /* copy trailer */
597 q = p;
598 skip = rrc;
599 for (i = 0; i < iov_count; i++) {
600 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
601 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
602 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
604 memcpy(q, iov[i].buffer.value, MIN(iov[i].buffer.length, skip));
605 if (iov[i].buffer.length > skip)
606 break;
607 skip -= iov[i].buffer.length;
608 q += iov[i].buffer.length;
611 return GSS_S_COMPLETE;
615 OM_uint32
616 _gssapi_unwrap_cfx_iov(OM_uint32 *minor_status,
617 gsskrb5_ctx ctx,
618 krb5_context context,
619 int *conf_state,
620 gss_qop_t *qop_state,
621 gss_iov_buffer_desc *iov,
622 int iov_count)
624 OM_uint32 seq_number_lo, seq_number_hi, major_status, junk;
625 gss_iov_buffer_desc *header, *trailer;
626 gss_cfx_wrap_token token, ttoken;
627 u_char token_flags;
628 krb5_error_code ret;
629 unsigned usage;
630 uint16_t ec, rrc;
631 krb5_crypto_iov *data = NULL;
632 int i, j;
634 *minor_status = 0;
636 header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
637 if (header == NULL) {
638 *minor_status = EINVAL;
639 return GSS_S_FAILURE;
642 if (header->buffer.length < sizeof(*token)) /* we check exact below */
643 return GSS_S_DEFECTIVE_TOKEN;
645 trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
647 token = (gss_cfx_wrap_token)header->buffer.value;
649 if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04)
650 return GSS_S_DEFECTIVE_TOKEN;
652 /* Ignore unknown flags */
653 token_flags = token->Flags &
654 (CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
656 if (token_flags & CFXSentByAcceptor) {
657 if ((ctx->more_flags & LOCAL) == 0)
658 return GSS_S_DEFECTIVE_TOKEN;
661 if (ctx->more_flags & ACCEPTOR_SUBKEY) {
662 if ((token_flags & CFXAcceptorSubkey) == 0)
663 return GSS_S_DEFECTIVE_TOKEN;
664 } else {
665 if (token_flags & CFXAcceptorSubkey)
666 return GSS_S_DEFECTIVE_TOKEN;
669 if (token->Filler != 0xFF)
670 return GSS_S_DEFECTIVE_TOKEN;
672 if (conf_state != NULL)
673 *conf_state = (token_flags & CFXSealed) ? 1 : 0;
675 ec = (token->EC[0] << 8) | token->EC[1];
676 rrc = (token->RRC[0] << 8) | token->RRC[1];
679 * Check sequence number
681 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
682 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
683 if (seq_number_hi) {
684 /* no support for 64-bit sequence numbers */
685 *minor_status = ERANGE;
686 return GSS_S_UNSEQ_TOKEN;
689 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
690 ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
691 if (ret != 0) {
692 *minor_status = 0;
693 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
694 return ret;
696 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
699 * Decrypt and/or verify checksum
702 if (ctx->more_flags & LOCAL) {
703 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
704 } else {
705 usage = KRB5_KU_USAGE_INITIATOR_SEAL;
708 data = calloc(iov_count + 3, sizeof(data[0]));
709 if (data == NULL) {
710 *minor_status = ENOMEM;
711 major_status = GSS_S_FAILURE;
712 goto failure;
715 if (token_flags & CFXSealed) {
716 size_t k5tsize, k5hsize;
718 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_HEADER, &k5hsize);
719 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_TRAILER, &k5tsize);
721 /* Rotate by RRC; bogus to do this in-place XXX */
722 /* Check RRC */
724 if (trailer == NULL) {
725 size_t gsstsize = k5tsize + sizeof(*token);
726 size_t gsshsize = k5hsize + sizeof(*token);
728 if (IS_DCE_STYLE(ctx))
729 gsstsize += ec;
730 gsshsize += gsstsize;
732 if (rrc != gsstsize) {
733 major_status = GSS_S_DEFECTIVE_TOKEN;
734 goto failure;
736 if (header->buffer.length != gsshsize) {
737 major_status = GSS_S_DEFECTIVE_TOKEN;
738 goto failure;
740 } else if (trailer->buffer.length != sizeof(*token) + k5tsize) {
741 major_status = GSS_S_DEFECTIVE_TOKEN;
742 goto failure;
743 } else if (header->buffer.length != sizeof(*token) + k5hsize) {
744 major_status = GSS_S_DEFECTIVE_TOKEN;
745 goto failure;
746 } else if (rrc != 0) {
747 /* go though slowpath */
748 major_status = unrotate_iov(minor_status, rrc, iov, iov_count);
749 if (major_status)
750 goto failure;
753 i = 0;
754 data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
755 data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
756 data[i].data.length = k5hsize;
757 i++;
759 for (j = 0; j < iov_count; i++, j++) {
760 switch (GSS_IOV_BUFFER_TYPE(iov[j].type)) {
761 case GSS_IOV_BUFFER_TYPE_DATA:
762 case GSS_IOV_BUFFER_TYPE_PADDING:
763 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
764 break;
765 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
766 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
767 break;
768 default:
769 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
770 break;
772 data[i].data.length = iov[j].buffer.length;
773 data[i].data.data = iov[j].buffer.value;
776 /* encrypted CFX header in trailer (or after the header if in
777 DCE mode). Copy in header into E"header"
779 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
780 if (trailer)
781 data[i].data.data = trailer->buffer.value;
782 else
783 data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize - k5tsize - sizeof(*token);
784 data[i].data.length = sizeof(*token);
785 ttoken = (gss_cfx_wrap_token)data[i].data.data;
786 i++;
788 /* Kerberos trailer comes after the gss trailer */
789 data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
790 data[i].data.data = ((uint8_t *)data[i-1].data.data) + sizeof(*token);
791 data[i].data.length = k5tsize;
792 i++;
794 ret = krb5_decrypt_iov_ivec(context, ctx->crypto, usage, data, i, NULL);
795 if (ret != 0) {
796 *minor_status = ret;
797 major_status = GSS_S_FAILURE;
798 goto failure;
801 ttoken->RRC[0] = token->RRC[0];
802 ttoken->RRC[1] = token->RRC[1];
804 /* Check the integrity of the header */
805 if (memcmp(ttoken, token, sizeof(*token)) != 0) {
806 major_status = GSS_S_BAD_MIC;
807 goto failure;
809 } else {
810 /* Check RRC */
811 if (rrc != 0) {
812 *minor_status = EINVAL;
813 major_status = GSS_S_FAILURE;
814 goto failure;
817 if (trailer == NULL) {
818 *minor_status = EINVAL;
819 major_status = GSS_S_FAILURE;
820 goto failure;
823 if (trailer->buffer.length != ec) {
824 *minor_status = EINVAL;
825 major_status = GSS_S_FAILURE;
826 goto failure;
829 for (i = 0; i < iov_count; i++) {
830 switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
831 case GSS_IOV_BUFFER_TYPE_DATA:
832 case GSS_IOV_BUFFER_TYPE_PADDING:
833 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
834 break;
835 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
836 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
837 break;
838 default:
839 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
840 break;
842 data[i].data.length = iov[i].buffer.length;
843 data[i].data.data = iov[i].buffer.value;
846 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
847 data[i].data.data = header->buffer.value;
848 data[i].data.length = header->buffer.length;
849 i++;
851 data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
852 data[i].data.data = trailer->buffer.value;
853 data[i].data.length = trailer->buffer.length;
854 i++;
856 token = (gss_cfx_wrap_token)header->buffer.value;
857 token->EC[0] = 0;
858 token->EC[1] = 0;
859 token->RRC[0] = 0;
860 token->RRC[1] = 0;
862 ret = krb5_verify_checksum_iov(context, ctx->crypto, usage, data, i, NULL);
863 if (ret) {
864 *minor_status = ret;
865 major_status = GSS_S_FAILURE;
866 goto failure;
870 if (qop_state != NULL) {
871 *qop_state = GSS_C_QOP_DEFAULT;
874 free(data);
876 *minor_status = 0;
877 return GSS_S_COMPLETE;
879 failure:
880 if (data)
881 free(data);
883 gss_release_iov_buffer(&junk, iov, iov_count);
885 return major_status;
888 OM_uint32
889 _gssapi_wrap_iov_length_cfx(OM_uint32 *minor_status,
890 gsskrb5_ctx ctx,
891 krb5_context context,
892 int conf_req_flag,
893 gss_qop_t qop_req,
894 int *conf_state,
895 gss_iov_buffer_desc *iov,
896 int iov_count)
898 size_t size;
899 int i;
900 size_t *padding = NULL;
902 GSSAPI_KRB5_INIT (&context);
903 *minor_status = 0;
905 for (size = 0, i = 0; i < iov_count; i++) {
906 switch(GSS_IOV_BUFFER_TYPE(iov[i].type)) {
907 case GSS_IOV_BUFFER_TYPE_EMPTY:
908 break;
909 case GSS_IOV_BUFFER_TYPE_DATA:
910 size += iov[i].buffer.length;
911 break;
912 case GSS_IOV_BUFFER_TYPE_HEADER:
913 *minor_status = krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_HEADER, &iov[i].buffer.length);
914 if (*minor_status)
915 return GSS_S_FAILURE;
916 break;
917 case GSS_IOV_BUFFER_TYPE_TRAILER:
918 *minor_status = krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_TRAILER, &iov[i].buffer.length);
919 if (*minor_status)
920 return GSS_S_FAILURE;
921 break;
922 case GSS_IOV_BUFFER_TYPE_PADDING:
923 if (padding != NULL) {
924 *minor_status = 0;
925 return GSS_S_FAILURE;
927 padding = &iov[i].buffer.length;
928 break;
929 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
930 break;
931 default:
932 *minor_status = EINVAL;
933 return GSS_S_FAILURE;
936 if (padding) {
937 size_t pad;
938 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_PADDING, &pad);
939 if (pad > 1) {
940 *padding = pad - (size % pad);
941 if (*padding == pad)
942 *padding = 0;
943 } else
944 *padding = 0;
947 return GSS_S_COMPLETE;
953 OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
954 const gsskrb5_ctx ctx,
955 krb5_context context,
956 int conf_req_flag,
957 const gss_buffer_t input_message_buffer,
958 int *conf_state,
959 gss_buffer_t output_message_buffer)
961 gss_cfx_wrap_token token;
962 krb5_error_code ret;
963 unsigned usage;
964 krb5_data cipher;
965 size_t wrapped_len, cksumsize;
966 uint16_t padlength, rrc = 0;
967 int32_t seq_number;
968 u_char *p;
970 ret = _gsskrb5cfx_wrap_length_cfx(context,
971 ctx->crypto, conf_req_flag,
972 input_message_buffer->length,
973 &wrapped_len, &cksumsize, &padlength);
974 if (ret != 0) {
975 *minor_status = ret;
976 return GSS_S_FAILURE;
979 /* Always rotate encrypted token (if any) and checksum to header */
980 rrc = (conf_req_flag ? sizeof(*token) : 0) + (uint16_t)cksumsize;
982 output_message_buffer->length = wrapped_len;
983 output_message_buffer->value = malloc(output_message_buffer->length);
984 if (output_message_buffer->value == NULL) {
985 *minor_status = ENOMEM;
986 return GSS_S_FAILURE;
989 p = output_message_buffer->value;
990 token = (gss_cfx_wrap_token)p;
991 token->TOK_ID[0] = 0x05;
992 token->TOK_ID[1] = 0x04;
993 token->Flags = 0;
994 token->Filler = 0xFF;
995 if ((ctx->more_flags & LOCAL) == 0)
996 token->Flags |= CFXSentByAcceptor;
997 if (ctx->more_flags & ACCEPTOR_SUBKEY)
998 token->Flags |= CFXAcceptorSubkey;
999 if (conf_req_flag) {
1001 * In Wrap tokens with confidentiality, the EC field is
1002 * used to encode the size (in bytes) of the random filler.
1004 token->Flags |= CFXSealed;
1005 token->EC[0] = (padlength >> 8) & 0xFF;
1006 token->EC[1] = (padlength >> 0) & 0xFF;
1007 } else {
1009 * In Wrap tokens without confidentiality, the EC field is
1010 * used to encode the size (in bytes) of the trailing
1011 * checksum.
1013 * This is not used in the checksum calcuation itself,
1014 * because the checksum length could potentially vary
1015 * depending on the data length.
1017 token->EC[0] = 0;
1018 token->EC[1] = 0;
1022 * In Wrap tokens that provide for confidentiality, the RRC
1023 * field in the header contains the hex value 00 00 before
1024 * encryption.
1026 * In Wrap tokens that do not provide for confidentiality,
1027 * both the EC and RRC fields in the appended checksum
1028 * contain the hex value 00 00 for the purpose of calculating
1029 * the checksum.
1031 token->RRC[0] = 0;
1032 token->RRC[1] = 0;
1034 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1035 krb5_auth_con_getlocalseqnumber(context,
1036 ctx->auth_context,
1037 &seq_number);
1038 _gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]);
1039 _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
1040 krb5_auth_con_setlocalseqnumber(context,
1041 ctx->auth_context,
1042 ++seq_number);
1043 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1046 * If confidentiality is requested, the token header is
1047 * appended to the plaintext before encryption; the resulting
1048 * token is {"header" | encrypt(plaintext | pad | "header")}.
1050 * If no confidentiality is requested, the checksum is
1051 * calculated over the plaintext concatenated with the
1052 * token header.
1054 if (ctx->more_flags & LOCAL) {
1055 usage = KRB5_KU_USAGE_INITIATOR_SEAL;
1056 } else {
1057 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
1060 if (conf_req_flag) {
1062 * Any necessary padding is added here to ensure that the
1063 * encrypted token header is always at the end of the
1064 * ciphertext.
1066 * The specification does not require that the padding
1067 * bytes are initialized.
1069 p += sizeof(*token);
1070 memcpy(p, input_message_buffer->value, input_message_buffer->length);
1071 memset(p + input_message_buffer->length, 0xFF, padlength);
1072 memcpy(p + input_message_buffer->length + padlength,
1073 token, sizeof(*token));
1075 ret = krb5_encrypt(context, ctx->crypto,
1076 usage, p,
1077 input_message_buffer->length + padlength +
1078 sizeof(*token),
1079 &cipher);
1080 if (ret != 0) {
1081 *minor_status = ret;
1082 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1083 return GSS_S_FAILURE;
1085 assert(sizeof(*token) + cipher.length == wrapped_len);
1086 token->RRC[0] = (rrc >> 8) & 0xFF;
1087 token->RRC[1] = (rrc >> 0) & 0xFF;
1089 ret = rrc_rotate(cipher.data, cipher.length, rrc, FALSE);
1090 if (ret != 0) {
1091 *minor_status = ret;
1092 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1093 return GSS_S_FAILURE;
1095 memcpy(p, cipher.data, cipher.length);
1096 krb5_data_free(&cipher);
1097 } else {
1098 char *buf;
1099 Checksum cksum;
1101 buf = malloc(input_message_buffer->length + sizeof(*token));
1102 if (buf == NULL) {
1103 *minor_status = ENOMEM;
1104 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1105 return GSS_S_FAILURE;
1107 memcpy(buf, input_message_buffer->value, input_message_buffer->length);
1108 memcpy(buf + input_message_buffer->length, token, sizeof(*token));
1110 ret = krb5_create_checksum(context, ctx->crypto,
1111 usage, 0, buf,
1112 input_message_buffer->length +
1113 sizeof(*token),
1114 &cksum);
1115 if (ret != 0) {
1116 *minor_status = ret;
1117 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1118 free(buf);
1119 return GSS_S_FAILURE;
1122 free(buf);
1124 assert(cksum.checksum.length == cksumsize);
1125 token->EC[0] = (cksum.checksum.length >> 8) & 0xFF;
1126 token->EC[1] = (cksum.checksum.length >> 0) & 0xFF;
1127 token->RRC[0] = (rrc >> 8) & 0xFF;
1128 token->RRC[1] = (rrc >> 0) & 0xFF;
1130 p += sizeof(*token);
1131 memcpy(p, input_message_buffer->value, input_message_buffer->length);
1132 memcpy(p + input_message_buffer->length,
1133 cksum.checksum.data, cksum.checksum.length);
1135 ret = rrc_rotate(p,
1136 input_message_buffer->length + cksum.checksum.length, rrc, FALSE);
1137 if (ret != 0) {
1138 *minor_status = ret;
1139 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1140 free_Checksum(&cksum);
1141 return GSS_S_FAILURE;
1143 free_Checksum(&cksum);
1146 if (conf_state != NULL) {
1147 *conf_state = conf_req_flag;
1150 *minor_status = 0;
1151 return GSS_S_COMPLETE;
1154 OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status,
1155 const gsskrb5_ctx ctx,
1156 krb5_context context,
1157 const gss_buffer_t input_message_buffer,
1158 gss_buffer_t output_message_buffer,
1159 int *conf_state,
1160 gss_qop_t *qop_state)
1162 gss_cfx_wrap_token token;
1163 u_char token_flags;
1164 krb5_error_code ret;
1165 unsigned usage;
1166 krb5_data data;
1167 uint16_t ec, rrc;
1168 OM_uint32 seq_number_lo, seq_number_hi;
1169 size_t len;
1170 u_char *p;
1172 *minor_status = 0;
1174 if (input_message_buffer->length < sizeof(*token)) {
1175 return GSS_S_DEFECTIVE_TOKEN;
1178 p = input_message_buffer->value;
1180 token = (gss_cfx_wrap_token)p;
1182 if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04) {
1183 return GSS_S_DEFECTIVE_TOKEN;
1186 /* Ignore unknown flags */
1187 token_flags = token->Flags &
1188 (CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
1190 if (token_flags & CFXSentByAcceptor) {
1191 if ((ctx->more_flags & LOCAL) == 0)
1192 return GSS_S_DEFECTIVE_TOKEN;
1195 if (ctx->more_flags & ACCEPTOR_SUBKEY) {
1196 if ((token_flags & CFXAcceptorSubkey) == 0)
1197 return GSS_S_DEFECTIVE_TOKEN;
1198 } else {
1199 if (token_flags & CFXAcceptorSubkey)
1200 return GSS_S_DEFECTIVE_TOKEN;
1203 if (token->Filler != 0xFF) {
1204 return GSS_S_DEFECTIVE_TOKEN;
1207 if (conf_state != NULL) {
1208 *conf_state = (token_flags & CFXSealed) ? 1 : 0;
1211 ec = (token->EC[0] << 8) | token->EC[1];
1212 rrc = (token->RRC[0] << 8) | token->RRC[1];
1215 * Check sequence number
1217 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
1218 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
1219 if (seq_number_hi) {
1220 /* no support for 64-bit sequence numbers */
1221 *minor_status = ERANGE;
1222 return GSS_S_UNSEQ_TOKEN;
1225 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1226 ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
1227 if (ret != 0) {
1228 *minor_status = 0;
1229 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1230 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1231 return ret;
1233 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1236 * Decrypt and/or verify checksum
1239 if (ctx->more_flags & LOCAL) {
1240 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
1241 } else {
1242 usage = KRB5_KU_USAGE_INITIATOR_SEAL;
1245 p += sizeof(*token);
1246 len = input_message_buffer->length;
1247 len -= (p - (u_char *)input_message_buffer->value);
1249 /* Rotate by RRC; bogus to do this in-place XXX */
1250 *minor_status = rrc_rotate(p, len, rrc, TRUE);
1251 if (*minor_status != 0) {
1252 return GSS_S_FAILURE;
1255 if (token_flags & CFXSealed) {
1256 ret = krb5_decrypt(context, ctx->crypto, usage,
1257 p, len, &data);
1258 if (ret != 0) {
1259 *minor_status = ret;
1260 return GSS_S_BAD_MIC;
1263 /* Check that there is room for the pad and token header */
1264 if (data.length < ec + sizeof(*token)) {
1265 krb5_data_free(&data);
1266 return GSS_S_DEFECTIVE_TOKEN;
1268 p = data.data;
1269 p += data.length - sizeof(*token);
1271 /* RRC is unprotected; don't modify input buffer */
1272 ((gss_cfx_wrap_token)p)->RRC[0] = token->RRC[0];
1273 ((gss_cfx_wrap_token)p)->RRC[1] = token->RRC[1];
1275 /* Check the integrity of the header */
1276 if (memcmp(p, token, sizeof(*token)) != 0) {
1277 krb5_data_free(&data);
1278 return GSS_S_BAD_MIC;
1281 output_message_buffer->value = data.data;
1282 output_message_buffer->length = data.length - ec - sizeof(*token);
1283 } else {
1284 Checksum cksum;
1286 /* Determine checksum type */
1287 ret = krb5_crypto_get_checksum_type(context,
1288 ctx->crypto,
1289 &cksum.cksumtype);
1290 if (ret != 0) {
1291 *minor_status = ret;
1292 return GSS_S_FAILURE;
1295 cksum.checksum.length = ec;
1297 /* Check we have at least as much data as the checksum */
1298 if (len < cksum.checksum.length) {
1299 *minor_status = ERANGE;
1300 return GSS_S_BAD_MIC;
1303 /* Length now is of the plaintext only, no checksum */
1304 len -= cksum.checksum.length;
1305 cksum.checksum.data = p + len;
1307 output_message_buffer->length = len; /* for later */
1308 output_message_buffer->value = malloc(len + sizeof(*token));
1309 if (output_message_buffer->value == NULL) {
1310 *minor_status = ENOMEM;
1311 return GSS_S_FAILURE;
1314 /* Checksum is over (plaintext-data | "header") */
1315 memcpy(output_message_buffer->value, p, len);
1316 memcpy((u_char *)output_message_buffer->value + len,
1317 token, sizeof(*token));
1319 /* EC is not included in checksum calculation */
1320 token = (gss_cfx_wrap_token)((u_char *)output_message_buffer->value +
1321 len);
1322 token->EC[0] = 0;
1323 token->EC[1] = 0;
1324 token->RRC[0] = 0;
1325 token->RRC[1] = 0;
1327 ret = krb5_verify_checksum(context, ctx->crypto,
1328 usage,
1329 output_message_buffer->value,
1330 len + sizeof(*token),
1331 &cksum);
1332 if (ret != 0) {
1333 *minor_status = ret;
1334 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1335 return GSS_S_BAD_MIC;
1339 if (qop_state != NULL) {
1340 *qop_state = GSS_C_QOP_DEFAULT;
1343 *minor_status = 0;
1344 return GSS_S_COMPLETE;
1347 OM_uint32 _gssapi_mic_cfx(OM_uint32 *minor_status,
1348 const gsskrb5_ctx ctx,
1349 krb5_context context,
1350 gss_qop_t qop_req,
1351 const gss_buffer_t message_buffer,
1352 gss_buffer_t message_token)
1354 gss_cfx_mic_token token;
1355 krb5_error_code ret;
1356 unsigned usage;
1357 Checksum cksum;
1358 u_char *buf;
1359 size_t len;
1360 int32_t seq_number;
1362 len = message_buffer->length + sizeof(*token);
1363 buf = malloc(len);
1364 if (buf == NULL) {
1365 *minor_status = ENOMEM;
1366 return GSS_S_FAILURE;
1369 memcpy(buf, message_buffer->value, message_buffer->length);
1371 token = (gss_cfx_mic_token)(buf + message_buffer->length);
1372 token->TOK_ID[0] = 0x04;
1373 token->TOK_ID[1] = 0x04;
1374 token->Flags = 0;
1375 if ((ctx->more_flags & LOCAL) == 0)
1376 token->Flags |= CFXSentByAcceptor;
1377 if (ctx->more_flags & ACCEPTOR_SUBKEY)
1378 token->Flags |= CFXAcceptorSubkey;
1379 memset(token->Filler, 0xFF, 5);
1381 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1382 krb5_auth_con_getlocalseqnumber(context,
1383 ctx->auth_context,
1384 &seq_number);
1385 _gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]);
1386 _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
1387 krb5_auth_con_setlocalseqnumber(context,
1388 ctx->auth_context,
1389 ++seq_number);
1390 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1392 if (ctx->more_flags & LOCAL) {
1393 usage = KRB5_KU_USAGE_INITIATOR_SIGN;
1394 } else {
1395 usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
1398 ret = krb5_create_checksum(context, ctx->crypto,
1399 usage, 0, buf, len, &cksum);
1400 if (ret != 0) {
1401 *minor_status = ret;
1402 free(buf);
1403 return GSS_S_FAILURE;
1406 /* Determine MIC length */
1407 message_token->length = sizeof(*token) + cksum.checksum.length;
1408 message_token->value = malloc(message_token->length);
1409 if (message_token->value == NULL) {
1410 *minor_status = ENOMEM;
1411 free_Checksum(&cksum);
1412 free(buf);
1413 return GSS_S_FAILURE;
1416 /* Token is { "header" | get_mic("header" | plaintext-data) } */
1417 memcpy(message_token->value, token, sizeof(*token));
1418 memcpy((u_char *)message_token->value + sizeof(*token),
1419 cksum.checksum.data, cksum.checksum.length);
1421 free_Checksum(&cksum);
1422 free(buf);
1424 *minor_status = 0;
1425 return GSS_S_COMPLETE;
1428 OM_uint32 _gssapi_verify_mic_cfx(OM_uint32 *minor_status,
1429 const gsskrb5_ctx ctx,
1430 krb5_context context,
1431 const gss_buffer_t message_buffer,
1432 const gss_buffer_t token_buffer,
1433 gss_qop_t *qop_state)
1435 gss_cfx_mic_token token;
1436 u_char token_flags;
1437 krb5_error_code ret;
1438 unsigned usage;
1439 OM_uint32 seq_number_lo, seq_number_hi;
1440 u_char *buf, *p;
1441 Checksum cksum;
1443 *minor_status = 0;
1445 if (token_buffer->length < sizeof(*token)) {
1446 return GSS_S_DEFECTIVE_TOKEN;
1449 p = token_buffer->value;
1451 token = (gss_cfx_mic_token)p;
1453 if (token->TOK_ID[0] != 0x04 || token->TOK_ID[1] != 0x04) {
1454 return GSS_S_DEFECTIVE_TOKEN;
1457 /* Ignore unknown flags */
1458 token_flags = token->Flags & (CFXSentByAcceptor | CFXAcceptorSubkey);
1460 if (token_flags & CFXSentByAcceptor) {
1461 if ((ctx->more_flags & LOCAL) == 0)
1462 return GSS_S_DEFECTIVE_TOKEN;
1464 if (ctx->more_flags & ACCEPTOR_SUBKEY) {
1465 if ((token_flags & CFXAcceptorSubkey) == 0)
1466 return GSS_S_DEFECTIVE_TOKEN;
1467 } else {
1468 if (token_flags & CFXAcceptorSubkey)
1469 return GSS_S_DEFECTIVE_TOKEN;
1472 if (memcmp(token->Filler, "\xff\xff\xff\xff\xff", 5) != 0) {
1473 return GSS_S_DEFECTIVE_TOKEN;
1477 * Check sequence number
1479 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
1480 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
1481 if (seq_number_hi) {
1482 *minor_status = ERANGE;
1483 return GSS_S_UNSEQ_TOKEN;
1486 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1487 ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
1488 if (ret != 0) {
1489 *minor_status = 0;
1490 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1491 return ret;
1493 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1496 * Verify checksum
1498 ret = krb5_crypto_get_checksum_type(context, ctx->crypto,
1499 &cksum.cksumtype);
1500 if (ret != 0) {
1501 *minor_status = ret;
1502 return GSS_S_FAILURE;
1505 cksum.checksum.data = p + sizeof(*token);
1506 cksum.checksum.length = token_buffer->length - sizeof(*token);
1508 if (ctx->more_flags & LOCAL) {
1509 usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
1510 } else {
1511 usage = KRB5_KU_USAGE_INITIATOR_SIGN;
1514 buf = malloc(message_buffer->length + sizeof(*token));
1515 if (buf == NULL) {
1516 *minor_status = ENOMEM;
1517 return GSS_S_FAILURE;
1519 memcpy(buf, message_buffer->value, message_buffer->length);
1520 memcpy(buf + message_buffer->length, token, sizeof(*token));
1522 ret = krb5_verify_checksum(context, ctx->crypto,
1523 usage,
1524 buf,
1525 sizeof(*token) + message_buffer->length,
1526 &cksum);
1527 if (ret != 0) {
1528 *minor_status = ret;
1529 free(buf);
1530 return GSS_S_BAD_MIC;
1533 free(buf);
1535 if (qop_state != NULL) {
1536 *qop_state = GSS_C_QOP_DEFAULT;
1539 return GSS_S_COMPLETE;