Round #2 of scan-build warnings cleanup
[heimdal.git] / lib / gssapi / krb5 / cfx.c
blob29fecca861ce6fc50ca4674d741b5bd5538206b2
1 /*
2 * Copyright (c) 2003, PADL Software Pty Ltd.
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of PADL Software nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY PADL SOFTWARE AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL PADL SOFTWARE OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
33 #include "gsskrb5_locl.h"
36 * Implementation of RFC 4121
39 #define CFXSentByAcceptor (1 << 0)
40 #define CFXSealed (1 << 1)
41 #define CFXAcceptorSubkey (1 << 2)
43 krb5_error_code
44 _gsskrb5cfx_wrap_length_cfx(krb5_context context,
45 krb5_crypto crypto,
46 int conf_req_flag,
47 int dce_style,
48 size_t input_length,
49 size_t *output_length,
50 size_t *cksumsize,
51 uint16_t *padlength)
53 krb5_error_code ret;
54 krb5_cksumtype type;
56 /* 16-byte header is always first */
57 *output_length = sizeof(gss_cfx_wrap_token_desc);
58 *padlength = 0;
60 ret = krb5_crypto_get_checksum_type(context, crypto, &type);
61 if (ret)
62 return ret;
64 ret = krb5_checksumsize(context, type, cksumsize);
65 if (ret)
66 return ret;
68 if (conf_req_flag) {
69 size_t padsize;
71 /* Header is concatenated with data before encryption */
72 input_length += sizeof(gss_cfx_wrap_token_desc);
74 if (dce_style) {
75 ret = krb5_crypto_getblocksize(context, crypto, &padsize);
76 } else {
77 ret = krb5_crypto_getpadsize(context, crypto, &padsize);
79 if (ret) {
80 return ret;
82 if (padsize > 1) {
83 /* XXX check this */
84 *padlength = padsize - (input_length % padsize);
86 /* We add the pad ourselves (noted here for completeness only) */
87 input_length += *padlength;
90 *output_length += krb5_get_wrapped_length(context,
91 crypto, input_length);
92 } else {
93 /* Checksum is concatenated with data */
94 *output_length += input_length + *cksumsize;
97 assert(*output_length > input_length);
99 return 0;
102 OM_uint32
103 _gssapi_wrap_size_cfx(OM_uint32 *minor_status,
104 const gsskrb5_ctx ctx,
105 krb5_context context,
106 int conf_req_flag,
107 gss_qop_t qop_req,
108 OM_uint32 req_output_size,
109 OM_uint32 *max_input_size)
111 krb5_error_code ret;
113 *max_input_size = 0;
115 /* 16-byte header is always first */
116 if (req_output_size < 16)
117 return 0;
118 req_output_size -= 16;
120 if (conf_req_flag) {
121 size_t wrapped_size, sz;
123 wrapped_size = req_output_size + 1;
124 do {
125 wrapped_size--;
126 sz = krb5_get_wrapped_length(context,
127 ctx->crypto, wrapped_size);
128 } while (wrapped_size && sz > req_output_size);
129 if (wrapped_size == 0)
130 return 0;
132 /* inner header */
133 if (wrapped_size < 16)
134 return 0;
136 wrapped_size -= 16;
138 *max_input_size = wrapped_size;
139 } else {
140 krb5_cksumtype type;
141 size_t cksumsize;
143 ret = krb5_crypto_get_checksum_type(context, ctx->crypto, &type);
144 if (ret)
145 return ret;
147 ret = krb5_checksumsize(context, type, &cksumsize);
148 if (ret)
149 return ret;
151 if (req_output_size < cksumsize)
152 return 0;
154 /* Checksum is concatenated with data */
155 *max_input_size = req_output_size - cksumsize;
158 return 0;
162 * Rotate "rrc" bytes to the front or back
165 static krb5_error_code
166 rrc_rotate(void *data, size_t len, uint16_t rrc, krb5_boolean unrotate)
168 u_char *tmp, buf[256];
169 size_t left;
171 if (len == 0)
172 return 0;
174 rrc %= len;
176 if (rrc == 0)
177 return 0;
179 left = len - rrc;
181 if (rrc <= sizeof(buf)) {
182 tmp = buf;
183 } else {
184 tmp = malloc(rrc);
185 if (tmp == NULL)
186 return ENOMEM;
189 if (unrotate) {
190 memcpy(tmp, data, rrc);
191 memmove(data, (u_char *)data + rrc, left);
192 memcpy((u_char *)data + left, tmp, rrc);
193 } else {
194 memcpy(tmp, (u_char *)data + left, rrc);
195 memmove((u_char *)data + rrc, data, left);
196 memcpy(data, tmp, rrc);
199 if (rrc > sizeof(buf))
200 free(tmp);
202 return 0;
205 gss_iov_buffer_desc *
206 _gk_find_buffer(gss_iov_buffer_desc *iov, int iov_count, OM_uint32 type)
208 int i;
210 for (i = 0; i < iov_count; i++)
211 if (type == GSS_IOV_BUFFER_TYPE(iov[i].type))
212 return &iov[i];
213 return NULL;
216 OM_uint32
217 _gk_allocate_buffer(OM_uint32 *minor_status, gss_iov_buffer_desc *buffer, size_t size)
219 if (buffer->type & GSS_IOV_BUFFER_FLAG_ALLOCATED) {
220 if (buffer->buffer.length == size)
221 return GSS_S_COMPLETE;
222 free(buffer->buffer.value);
225 buffer->buffer.value = malloc(size);
226 buffer->buffer.length = size;
227 if (buffer->buffer.value == NULL) {
228 *minor_status = ENOMEM;
229 return GSS_S_FAILURE;
231 buffer->type |= GSS_IOV_BUFFER_FLAG_ALLOCATED;
233 return GSS_S_COMPLETE;
237 OM_uint32
238 _gk_verify_buffers(OM_uint32 *minor_status,
239 const gsskrb5_ctx ctx,
240 const gss_iov_buffer_desc *header,
241 const gss_iov_buffer_desc *padding,
242 const gss_iov_buffer_desc *trailer)
244 if (header == NULL) {
245 *minor_status = EINVAL;
246 return GSS_S_FAILURE;
249 if (IS_DCE_STYLE(ctx)) {
251 * In DCE style mode we reject having a padding or trailer buffer
253 if (padding) {
254 *minor_status = EINVAL;
255 return GSS_S_FAILURE;
257 if (trailer) {
258 *minor_status = EINVAL;
259 return GSS_S_FAILURE;
261 } else {
263 * In non-DCE style mode we require having a padding buffer
265 if (padding == NULL) {
266 *minor_status = EINVAL;
267 return GSS_S_FAILURE;
271 *minor_status = 0;
272 return GSS_S_COMPLETE;
275 OM_uint32
276 _gssapi_wrap_cfx_iov(OM_uint32 *minor_status,
277 gsskrb5_ctx ctx,
278 krb5_context context,
279 int conf_req_flag,
280 int *conf_state,
281 gss_iov_buffer_desc *iov,
282 int iov_count)
284 OM_uint32 major_status, junk;
285 gss_iov_buffer_desc *header, *trailer, *padding;
286 size_t gsshsize, k5hsize;
287 size_t gsstsize, k5tsize;
288 size_t rrc = 0, ec = 0;
289 int i;
290 gss_cfx_wrap_token token;
291 krb5_error_code ret;
292 int32_t seq_number;
293 unsigned usage;
294 krb5_crypto_iov *data = NULL;
296 header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
297 if (header == NULL) {
298 *minor_status = EINVAL;
299 return GSS_S_FAILURE;
302 padding = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
303 if (padding != NULL) {
304 padding->buffer.length = 0;
307 trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
309 major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
310 if (major_status != GSS_S_COMPLETE) {
311 return major_status;
314 if (conf_req_flag) {
315 size_t k5psize = 0;
316 size_t k5pbase = 0;
317 size_t k5bsize = 0;
318 size_t size = 0;
320 for (i = 0; i < iov_count; i++) {
321 switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
322 case GSS_IOV_BUFFER_TYPE_DATA:
323 size += iov[i].buffer.length;
324 break;
325 default:
326 break;
330 size += sizeof(gss_cfx_wrap_token_desc);
332 *minor_status = krb5_crypto_length(context, ctx->crypto,
333 KRB5_CRYPTO_TYPE_HEADER,
334 &k5hsize);
335 if (*minor_status)
336 return GSS_S_FAILURE;
338 *minor_status = krb5_crypto_length(context, ctx->crypto,
339 KRB5_CRYPTO_TYPE_TRAILER,
340 &k5tsize);
341 if (*minor_status)
342 return GSS_S_FAILURE;
344 *minor_status = krb5_crypto_length(context, ctx->crypto,
345 KRB5_CRYPTO_TYPE_PADDING,
346 &k5pbase);
347 if (*minor_status)
348 return GSS_S_FAILURE;
350 if (k5pbase > 1) {
351 k5psize = k5pbase - (size % k5pbase);
352 } else {
353 k5psize = 0;
356 if (k5psize == 0 && IS_DCE_STYLE(ctx)) {
357 *minor_status = krb5_crypto_getblocksize(context, ctx->crypto,
358 &k5bsize);
359 if (*minor_status)
360 return GSS_S_FAILURE;
361 ec = k5bsize;
362 } else {
363 ec = k5psize;
366 gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize;
367 gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize;
368 } else {
369 if (IS_DCE_STYLE(ctx)) {
370 *minor_status = EINVAL;
371 return GSS_S_FAILURE;
374 k5hsize = 0;
375 *minor_status = krb5_crypto_length(context, ctx->crypto,
376 KRB5_CRYPTO_TYPE_CHECKSUM,
377 &k5tsize);
378 if (*minor_status)
379 return GSS_S_FAILURE;
381 gsshsize = sizeof(gss_cfx_wrap_token_desc);
382 gsstsize = k5tsize;
389 if (trailer == NULL) {
390 rrc = gsstsize;
391 if (IS_DCE_STYLE(ctx))
392 rrc -= ec;
393 gsshsize += gsstsize;
394 } else if (GSS_IOV_BUFFER_FLAGS(trailer->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
395 major_status = _gk_allocate_buffer(minor_status, trailer, gsstsize);
396 if (major_status)
397 goto failure;
398 } else if (trailer->buffer.length < gsstsize) {
399 *minor_status = KRB5_BAD_MSIZE;
400 major_status = GSS_S_FAILURE;
401 goto failure;
402 } else
403 trailer->buffer.length = gsstsize;
409 if (GSS_IOV_BUFFER_FLAGS(header->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
410 major_status = _gk_allocate_buffer(minor_status, header, gsshsize);
411 if (major_status != GSS_S_COMPLETE)
412 goto failure;
413 } else if (header->buffer.length < gsshsize) {
414 *minor_status = KRB5_BAD_MSIZE;
415 major_status = GSS_S_FAILURE;
416 goto failure;
417 } else
418 header->buffer.length = gsshsize;
420 token = (gss_cfx_wrap_token)header->buffer.value;
422 token->TOK_ID[0] = 0x05;
423 token->TOK_ID[1] = 0x04;
424 token->Flags = 0;
425 token->Filler = 0xFF;
427 if ((ctx->more_flags & LOCAL) == 0)
428 token->Flags |= CFXSentByAcceptor;
430 if (ctx->more_flags & ACCEPTOR_SUBKEY)
431 token->Flags |= CFXAcceptorSubkey;
433 if (ctx->more_flags & LOCAL)
434 usage = KRB5_KU_USAGE_INITIATOR_SEAL;
435 else
436 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
438 if (conf_req_flag) {
440 * In Wrap tokens with confidentiality, the EC field is
441 * used to encode the size (in bytes) of the random filler.
443 token->Flags |= CFXSealed;
444 token->EC[0] = (ec >> 8) & 0xFF;
445 token->EC[1] = (ec >> 0) & 0xFF;
447 } else {
449 * In Wrap tokens without confidentiality, the EC field is
450 * used to encode the size (in bytes) of the trailing
451 * checksum.
453 * This is not used in the checksum calcuation itself,
454 * because the checksum length could potentially vary
455 * depending on the data length.
457 token->EC[0] = 0;
458 token->EC[1] = 0;
462 * In Wrap tokens that provide for confidentiality, the RRC
463 * field in the header contains the hex value 00 00 before
464 * encryption.
466 * In Wrap tokens that do not provide for confidentiality,
467 * both the EC and RRC fields in the appended checksum
468 * contain the hex value 00 00 for the purpose of calculating
469 * the checksum.
471 token->RRC[0] = 0;
472 token->RRC[1] = 0;
474 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
475 krb5_auth_con_getlocalseqnumber(context,
476 ctx->auth_context,
477 &seq_number);
478 _gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]);
479 _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
480 krb5_auth_con_setlocalseqnumber(context,
481 ctx->auth_context,
482 ++seq_number);
483 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
485 data = calloc(iov_count + 3, sizeof(data[0]));
486 if (data == NULL) {
487 *minor_status = ENOMEM;
488 major_status = GSS_S_FAILURE;
489 goto failure;
492 if (conf_req_flag) {
494 plain packet:
496 {"header" | encrypt(plaintext-data | ec-padding | E"header")}
498 Expanded, this is with with RRC = 0:
500 {"header" | krb5-header | plaintext-data | ec-padding | E"header" | krb5-trailer }
502 In DCE-RPC mode == no trailer: RRC = gss "trailer" == length(ec-padding | E"header" | krb5-trailer)
504 {"header" | ec-padding | E"header" | krb5-trailer | krb5-header | plaintext-data }
507 i = 0;
508 data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
509 data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
510 data[i].data.length = k5hsize;
512 for (i = 1; i < iov_count + 1; i++) {
513 switch (GSS_IOV_BUFFER_TYPE(iov[i - 1].type)) {
514 case GSS_IOV_BUFFER_TYPE_DATA:
515 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
516 break;
517 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
518 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
519 break;
520 default:
521 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
522 break;
524 data[i].data.length = iov[i - 1].buffer.length;
525 data[i].data.data = iov[i - 1].buffer.value;
529 * Any necessary padding is added here to ensure that the
530 * encrypted token header is always at the end of the
531 * ciphertext.
534 /* encrypted CFX header in trailer (or after the header if in
535 DCE mode). Copy in header into E"header"
537 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
538 if (trailer)
539 data[i].data.data = trailer->buffer.value;
540 else
541 data[i].data.data = ((uint8_t *)header->buffer.value) + sizeof(*token);
543 data[i].data.length = ec + sizeof(*token);
544 memset(data[i].data.data, 0xFF, ec);
545 memcpy(((uint8_t *)data[i].data.data) + ec, token, sizeof(*token));
546 i++;
548 /* Kerberos trailer comes after the gss trailer */
549 data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
550 data[i].data.data = ((uint8_t *)data[i-1].data.data) + ec + sizeof(*token);
551 data[i].data.length = k5tsize;
552 i++;
554 ret = krb5_encrypt_iov_ivec(context, ctx->crypto, usage, data, i, NULL);
555 if (ret != 0) {
556 *minor_status = ret;
557 major_status = GSS_S_FAILURE;
558 goto failure;
561 if (rrc) {
562 token->RRC[0] = (rrc >> 8) & 0xFF;
563 token->RRC[1] = (rrc >> 0) & 0xFF;
566 } else {
568 plain packet:
570 {data | "header" | gss-trailer (krb5 checksum)
572 don't do RRC != 0
576 for (i = 0; i < iov_count; i++) {
577 switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
578 case GSS_IOV_BUFFER_TYPE_DATA:
579 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
580 break;
581 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
582 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
583 break;
584 default:
585 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
586 break;
588 data[i].data.length = iov[i].buffer.length;
589 data[i].data.data = iov[i].buffer.value;
592 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
593 data[i].data.data = header->buffer.value;
594 data[i].data.length = sizeof(gss_cfx_wrap_token_desc);
595 i++;
597 data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
598 if (trailer) {
599 data[i].data.data = trailer->buffer.value;
600 } else {
601 data[i].data.data = (uint8_t *)header->buffer.value +
602 sizeof(gss_cfx_wrap_token_desc);
604 data[i].data.length = k5tsize;
605 i++;
607 ret = krb5_create_checksum_iov(context, ctx->crypto, usage, data, i, NULL);
608 if (ret) {
609 *minor_status = ret;
610 major_status = GSS_S_FAILURE;
611 goto failure;
614 if (rrc) {
615 token->RRC[0] = (rrc >> 8) & 0xFF;
616 token->RRC[1] = (rrc >> 0) & 0xFF;
619 token->EC[0] = (k5tsize >> 8) & 0xFF;
620 token->EC[1] = (k5tsize >> 0) & 0xFF;
623 if (conf_state != NULL)
624 *conf_state = conf_req_flag;
626 free(data);
628 *minor_status = 0;
629 return GSS_S_COMPLETE;
631 failure:
632 if (data)
633 free(data);
635 gss_release_iov_buffer(&junk, iov, iov_count);
637 return major_status;
640 /* This is slowpath */
641 static OM_uint32
642 unrotate_iov(OM_uint32 *minor_status, size_t rrc, gss_iov_buffer_desc *iov, int iov_count)
644 uint8_t *p, *q;
645 size_t len = 0, skip;
646 int i;
648 for (i = 0; i < iov_count; i++)
649 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
650 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
651 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
652 len += iov[i].buffer.length;
654 p = malloc(len);
655 if (p == NULL) {
656 *minor_status = ENOMEM;
657 return GSS_S_FAILURE;
659 q = p;
661 /* copy up */
663 for (i = 0; i < iov_count; i++) {
664 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
665 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
666 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
668 memcpy(q, iov[i].buffer.value, iov[i].buffer.length);
669 q += iov[i].buffer.length;
672 assert((size_t)(q - p) == len);
674 /* unrotate first part */
675 q = p + rrc;
676 skip = rrc;
677 for (i = 0; i < iov_count; i++) {
678 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
679 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
680 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
682 if (iov[i].buffer.length <= skip) {
683 skip -= iov[i].buffer.length;
684 } else {
685 /* copy back to original buffer */
686 memcpy(((uint8_t *)iov[i].buffer.value) + skip, q, iov[i].buffer.length - skip);
687 q += iov[i].buffer.length - skip;
688 skip = 0;
692 /* copy trailer */
693 q = p;
694 skip = rrc;
695 for (i = 0; i < iov_count; i++) {
696 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
697 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
698 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
700 memcpy(iov[i].buffer.value, q, min(iov[i].buffer.length, skip));
701 if (iov[i].buffer.length > skip)
702 break;
703 skip -= iov[i].buffer.length;
704 q += iov[i].buffer.length;
707 free(p);
708 return GSS_S_COMPLETE;
712 OM_uint32
713 _gssapi_unwrap_cfx_iov(OM_uint32 *minor_status,
714 gsskrb5_ctx ctx,
715 krb5_context context,
716 int *conf_state,
717 gss_qop_t *qop_state,
718 gss_iov_buffer_desc *iov,
719 int iov_count)
721 OM_uint32 seq_number_lo, seq_number_hi, major_status, junk;
722 gss_iov_buffer_desc *header, *trailer, *padding;
723 gss_cfx_wrap_token token, ttoken;
724 u_char token_flags;
725 krb5_error_code ret;
726 unsigned usage;
727 uint16_t ec, rrc;
728 krb5_crypto_iov *data = NULL;
729 int i, j;
731 *minor_status = 0;
733 header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
734 if (header == NULL) {
735 *minor_status = EINVAL;
736 return GSS_S_FAILURE;
739 if (header->buffer.length < sizeof(*token)) /* we check exact below */
740 return GSS_S_DEFECTIVE_TOKEN;
742 padding = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
743 if (padding != NULL && padding->buffer.length != 0) {
744 *minor_status = EINVAL;
745 return GSS_S_FAILURE;
748 trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
750 major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
751 if (major_status != GSS_S_COMPLETE) {
752 return major_status;
755 token = (gss_cfx_wrap_token)header->buffer.value;
757 if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04)
758 return GSS_S_DEFECTIVE_TOKEN;
760 /* Ignore unknown flags */
761 token_flags = token->Flags &
762 (CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
764 if (token_flags & CFXSentByAcceptor) {
765 if ((ctx->more_flags & LOCAL) == 0)
766 return GSS_S_DEFECTIVE_TOKEN;
769 if (ctx->more_flags & ACCEPTOR_SUBKEY) {
770 if ((token_flags & CFXAcceptorSubkey) == 0)
771 return GSS_S_DEFECTIVE_TOKEN;
772 } else {
773 if (token_flags & CFXAcceptorSubkey)
774 return GSS_S_DEFECTIVE_TOKEN;
777 if (token->Filler != 0xFF)
778 return GSS_S_DEFECTIVE_TOKEN;
780 if (conf_state != NULL)
781 *conf_state = (token_flags & CFXSealed) ? 1 : 0;
783 ec = (token->EC[0] << 8) | token->EC[1];
784 rrc = (token->RRC[0] << 8) | token->RRC[1];
787 * Check sequence number
789 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
790 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
791 if (seq_number_hi) {
792 /* no support for 64-bit sequence numbers */
793 *minor_status = ERANGE;
794 return GSS_S_UNSEQ_TOKEN;
797 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
798 ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
799 if (ret != 0) {
800 *minor_status = 0;
801 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
802 return ret;
804 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
807 * Decrypt and/or verify checksum
810 if (ctx->more_flags & LOCAL) {
811 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
812 } else {
813 usage = KRB5_KU_USAGE_INITIATOR_SEAL;
816 data = calloc(iov_count + 3, sizeof(data[0]));
817 if (data == NULL) {
818 *minor_status = ENOMEM;
819 major_status = GSS_S_FAILURE;
820 goto failure;
823 if (token_flags & CFXSealed) {
824 size_t k5tsize, k5hsize;
826 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_HEADER, &k5hsize);
827 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_TRAILER, &k5tsize);
829 /* Rotate by RRC; bogus to do this in-place XXX */
830 /* Check RRC */
832 if (trailer == NULL) {
833 size_t gsstsize = k5tsize + sizeof(*token);
834 size_t gsshsize = k5hsize + sizeof(*token);
836 if (rrc != gsstsize) {
837 major_status = GSS_S_DEFECTIVE_TOKEN;
838 goto failure;
841 if (IS_DCE_STYLE(ctx))
842 gsstsize += ec;
844 gsshsize += gsstsize;
846 if (header->buffer.length != gsshsize) {
847 major_status = GSS_S_DEFECTIVE_TOKEN;
848 goto failure;
850 } else if (trailer->buffer.length != sizeof(*token) + k5tsize) {
851 major_status = GSS_S_DEFECTIVE_TOKEN;
852 goto failure;
853 } else if (header->buffer.length != sizeof(*token) + k5hsize) {
854 major_status = GSS_S_DEFECTIVE_TOKEN;
855 goto failure;
856 } else if (rrc != 0) {
857 /* go though slowpath */
858 major_status = unrotate_iov(minor_status, rrc, iov, iov_count);
859 if (major_status)
860 goto failure;
863 i = 0;
864 data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
865 data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
866 data[i].data.length = k5hsize;
867 i++;
869 for (j = 0; j < iov_count; i++, j++) {
870 switch (GSS_IOV_BUFFER_TYPE(iov[j].type)) {
871 case GSS_IOV_BUFFER_TYPE_DATA:
872 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
873 break;
874 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
875 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
876 break;
877 default:
878 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
879 break;
881 data[i].data.length = iov[j].buffer.length;
882 data[i].data.data = iov[j].buffer.value;
885 /* encrypted CFX header in trailer (or after the header if in
886 DCE mode). Copy in header into E"header"
888 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
889 if (trailer) {
890 data[i].data.data = trailer->buffer.value;
891 } else {
892 data[i].data.data = ((uint8_t *)header->buffer.value) +
893 header->buffer.length - k5hsize - k5tsize - ec- sizeof(*token);
896 data[i].data.length = ec + sizeof(*token);
897 ttoken = (gss_cfx_wrap_token)(((uint8_t *)data[i].data.data) + ec);
898 i++;
900 /* Kerberos trailer comes after the gss trailer */
901 data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
902 data[i].data.data = ((uint8_t *)data[i-1].data.data) + ec + sizeof(*token);
903 data[i].data.length = k5tsize;
904 i++;
906 ret = krb5_decrypt_iov_ivec(context, ctx->crypto, usage, data, i, NULL);
907 if (ret != 0) {
908 *minor_status = ret;
909 major_status = GSS_S_FAILURE;
910 goto failure;
913 ttoken->RRC[0] = token->RRC[0];
914 ttoken->RRC[1] = token->RRC[1];
916 /* Check the integrity of the header */
917 if (ct_memcmp(ttoken, token, sizeof(*token)) != 0) {
918 major_status = GSS_S_BAD_MIC;
919 goto failure;
921 } else {
922 size_t gsstsize = ec;
923 size_t gsshsize = sizeof(*token);
925 if (trailer == NULL) {
926 /* Check RRC */
927 if (rrc != gsstsize) {
928 *minor_status = EINVAL;
929 major_status = GSS_S_FAILURE;
930 goto failure;
933 gsshsize += gsstsize;
934 } else if (trailer->buffer.length != gsstsize) {
935 major_status = GSS_S_DEFECTIVE_TOKEN;
936 goto failure;
937 } else if (rrc != 0) {
938 /* Check RRC */
939 *minor_status = EINVAL;
940 major_status = GSS_S_FAILURE;
941 goto failure;
944 if (header->buffer.length != gsshsize) {
945 major_status = GSS_S_DEFECTIVE_TOKEN;
946 goto failure;
949 for (i = 0; i < iov_count; i++) {
950 switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
951 case GSS_IOV_BUFFER_TYPE_DATA:
952 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
953 break;
954 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
955 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
956 break;
957 default:
958 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
959 break;
961 data[i].data.length = iov[i].buffer.length;
962 data[i].data.data = iov[i].buffer.value;
965 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
966 data[i].data.data = header->buffer.value;
967 data[i].data.length = sizeof(*token);
968 i++;
970 data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
971 if (trailer) {
972 data[i].data.data = trailer->buffer.value;
973 } else {
974 data[i].data.data = (uint8_t *)header->buffer.value +
975 sizeof(*token);
977 data[i].data.length = ec;
978 i++;
980 token = (gss_cfx_wrap_token)header->buffer.value;
981 token->EC[0] = 0;
982 token->EC[1] = 0;
983 token->RRC[0] = 0;
984 token->RRC[1] = 0;
986 ret = krb5_verify_checksum_iov(context, ctx->crypto, usage, data, i, NULL);
987 if (ret) {
988 *minor_status = ret;
989 major_status = GSS_S_FAILURE;
990 goto failure;
994 if (qop_state != NULL) {
995 *qop_state = GSS_C_QOP_DEFAULT;
998 free(data);
1000 *minor_status = 0;
1001 return GSS_S_COMPLETE;
1003 failure:
1004 if (data)
1005 free(data);
1007 gss_release_iov_buffer(&junk, iov, iov_count);
1009 return major_status;
1012 OM_uint32
1013 _gssapi_wrap_iov_length_cfx(OM_uint32 *minor_status,
1014 gsskrb5_ctx ctx,
1015 krb5_context context,
1016 int conf_req_flag,
1017 gss_qop_t qop_req,
1018 int *conf_state,
1019 gss_iov_buffer_desc *iov,
1020 int iov_count)
1022 OM_uint32 major_status;
1023 size_t size;
1024 int i;
1025 gss_iov_buffer_desc *header = NULL;
1026 gss_iov_buffer_desc *padding = NULL;
1027 gss_iov_buffer_desc *trailer = NULL;
1028 size_t gsshsize = 0;
1029 size_t gsstsize = 0;
1030 size_t k5hsize = 0;
1031 size_t k5tsize = 0;
1033 GSSAPI_KRB5_INIT (&context);
1034 *minor_status = 0;
1036 for (size = 0, i = 0; i < iov_count; i++) {
1037 switch(GSS_IOV_BUFFER_TYPE(iov[i].type)) {
1038 case GSS_IOV_BUFFER_TYPE_EMPTY:
1039 break;
1040 case GSS_IOV_BUFFER_TYPE_DATA:
1041 size += iov[i].buffer.length;
1042 break;
1043 case GSS_IOV_BUFFER_TYPE_HEADER:
1044 if (header != NULL) {
1045 *minor_status = 0;
1046 return GSS_S_FAILURE;
1048 header = &iov[i];
1049 break;
1050 case GSS_IOV_BUFFER_TYPE_TRAILER:
1051 if (trailer != NULL) {
1052 *minor_status = 0;
1053 return GSS_S_FAILURE;
1055 trailer = &iov[i];
1056 break;
1057 case GSS_IOV_BUFFER_TYPE_PADDING:
1058 if (padding != NULL) {
1059 *minor_status = 0;
1060 return GSS_S_FAILURE;
1062 padding = &iov[i];
1063 break;
1064 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
1065 break;
1066 default:
1067 *minor_status = EINVAL;
1068 return GSS_S_FAILURE;
1072 major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
1073 if (major_status != GSS_S_COMPLETE) {
1074 return major_status;
1077 if (conf_req_flag) {
1078 size_t k5psize = 0;
1079 size_t k5pbase = 0;
1080 size_t k5bsize = 0;
1081 size_t ec = 0;
1083 size += sizeof(gss_cfx_wrap_token_desc);
1085 *minor_status = krb5_crypto_length(context, ctx->crypto,
1086 KRB5_CRYPTO_TYPE_HEADER,
1087 &k5hsize);
1088 if (*minor_status)
1089 return GSS_S_FAILURE;
1091 *minor_status = krb5_crypto_length(context, ctx->crypto,
1092 KRB5_CRYPTO_TYPE_TRAILER,
1093 &k5tsize);
1094 if (*minor_status)
1095 return GSS_S_FAILURE;
1097 *minor_status = krb5_crypto_length(context, ctx->crypto,
1098 KRB5_CRYPTO_TYPE_PADDING,
1099 &k5pbase);
1100 if (*minor_status)
1101 return GSS_S_FAILURE;
1103 if (k5pbase > 1) {
1104 k5psize = k5pbase - (size % k5pbase);
1105 } else {
1106 k5psize = 0;
1109 if (k5psize == 0 && IS_DCE_STYLE(ctx)) {
1110 *minor_status = krb5_crypto_getblocksize(context, ctx->crypto,
1111 &k5bsize);
1112 if (*minor_status)
1113 return GSS_S_FAILURE;
1115 ec = k5bsize;
1116 } else {
1117 ec = k5psize;
1120 gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize;
1121 gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize;
1122 } else {
1123 *minor_status = krb5_crypto_length(context, ctx->crypto,
1124 KRB5_CRYPTO_TYPE_CHECKSUM,
1125 &k5tsize);
1126 if (*minor_status)
1127 return GSS_S_FAILURE;
1129 gsshsize = sizeof(gss_cfx_wrap_token_desc);
1130 gsstsize = k5tsize;
1133 if (trailer != NULL) {
1134 trailer->buffer.length = gsstsize;
1135 } else {
1136 gsshsize += gsstsize;
1139 header->buffer.length = gsshsize;
1141 if (padding) {
1142 /* padding is done via EC and is contained in the header or trailer */
1143 padding->buffer.length = 0;
1146 if (conf_state) {
1147 *conf_state = conf_req_flag;
1150 return GSS_S_COMPLETE;
1156 OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
1157 const gsskrb5_ctx ctx,
1158 krb5_context context,
1159 int conf_req_flag,
1160 const gss_buffer_t input_message_buffer,
1161 int *conf_state,
1162 gss_buffer_t output_message_buffer)
1164 gss_cfx_wrap_token token;
1165 krb5_error_code ret;
1166 unsigned usage;
1167 krb5_data cipher;
1168 size_t wrapped_len, cksumsize;
1169 uint16_t padlength, rrc = 0;
1170 int32_t seq_number;
1171 u_char *p;
1173 ret = _gsskrb5cfx_wrap_length_cfx(context,
1174 ctx->crypto, conf_req_flag,
1175 IS_DCE_STYLE(ctx),
1176 input_message_buffer->length,
1177 &wrapped_len, &cksumsize, &padlength);
1178 if (ret != 0) {
1179 *minor_status = ret;
1180 return GSS_S_FAILURE;
1183 /* Always rotate encrypted token (if any) and checksum to header */
1184 rrc = (conf_req_flag ? sizeof(*token) : 0) + (uint16_t)cksumsize;
1186 output_message_buffer->length = wrapped_len;
1187 output_message_buffer->value = malloc(output_message_buffer->length);
1188 if (output_message_buffer->value == NULL) {
1189 *minor_status = ENOMEM;
1190 return GSS_S_FAILURE;
1193 p = output_message_buffer->value;
1194 token = (gss_cfx_wrap_token)p;
1195 token->TOK_ID[0] = 0x05;
1196 token->TOK_ID[1] = 0x04;
1197 token->Flags = 0;
1198 token->Filler = 0xFF;
1199 if ((ctx->more_flags & LOCAL) == 0)
1200 token->Flags |= CFXSentByAcceptor;
1201 if (ctx->more_flags & ACCEPTOR_SUBKEY)
1202 token->Flags |= CFXAcceptorSubkey;
1203 if (conf_req_flag) {
1205 * In Wrap tokens with confidentiality, the EC field is
1206 * used to encode the size (in bytes) of the random filler.
1208 token->Flags |= CFXSealed;
1209 token->EC[0] = (padlength >> 8) & 0xFF;
1210 token->EC[1] = (padlength >> 0) & 0xFF;
1211 } else {
1213 * In Wrap tokens without confidentiality, the EC field is
1214 * used to encode the size (in bytes) of the trailing
1215 * checksum.
1217 * This is not used in the checksum calcuation itself,
1218 * because the checksum length could potentially vary
1219 * depending on the data length.
1221 token->EC[0] = 0;
1222 token->EC[1] = 0;
1226 * In Wrap tokens that provide for confidentiality, the RRC
1227 * field in the header contains the hex value 00 00 before
1228 * encryption.
1230 * In Wrap tokens that do not provide for confidentiality,
1231 * both the EC and RRC fields in the appended checksum
1232 * contain the hex value 00 00 for the purpose of calculating
1233 * the checksum.
1235 token->RRC[0] = 0;
1236 token->RRC[1] = 0;
1238 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1239 krb5_auth_con_getlocalseqnumber(context,
1240 ctx->auth_context,
1241 &seq_number);
1242 _gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]);
1243 _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
1244 krb5_auth_con_setlocalseqnumber(context,
1245 ctx->auth_context,
1246 ++seq_number);
1247 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1250 * If confidentiality is requested, the token header is
1251 * appended to the plaintext before encryption; the resulting
1252 * token is {"header" | encrypt(plaintext | pad | "header")}.
1254 * If no confidentiality is requested, the checksum is
1255 * calculated over the plaintext concatenated with the
1256 * token header.
1258 if (ctx->more_flags & LOCAL) {
1259 usage = KRB5_KU_USAGE_INITIATOR_SEAL;
1260 } else {
1261 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
1264 if (conf_req_flag) {
1266 * Any necessary padding is added here to ensure that the
1267 * encrypted token header is always at the end of the
1268 * ciphertext.
1270 * The specification does not require that the padding
1271 * bytes are initialized.
1273 p += sizeof(*token);
1274 memcpy(p, input_message_buffer->value, input_message_buffer->length);
1275 memset(p + input_message_buffer->length, 0xFF, padlength);
1276 memcpy(p + input_message_buffer->length + padlength,
1277 token, sizeof(*token));
1279 ret = krb5_encrypt(context, ctx->crypto,
1280 usage, p,
1281 input_message_buffer->length + padlength +
1282 sizeof(*token),
1283 &cipher);
1284 if (ret != 0) {
1285 *minor_status = ret;
1286 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1287 return GSS_S_FAILURE;
1289 assert(sizeof(*token) + cipher.length == wrapped_len);
1290 token->RRC[0] = (rrc >> 8) & 0xFF;
1291 token->RRC[1] = (rrc >> 0) & 0xFF;
1294 * this is really ugly, but needed against windows
1295 * for DCERPC, as windows rotates by EC+RRC.
1297 if (IS_DCE_STYLE(ctx)) {
1298 ret = rrc_rotate(cipher.data, cipher.length, rrc+padlength, FALSE);
1299 } else {
1300 ret = rrc_rotate(cipher.data, cipher.length, rrc, FALSE);
1302 if (ret != 0) {
1303 *minor_status = ret;
1304 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1305 return GSS_S_FAILURE;
1307 memcpy(p, cipher.data, cipher.length);
1308 krb5_data_free(&cipher);
1309 } else {
1310 char *buf;
1311 Checksum cksum;
1313 buf = malloc(input_message_buffer->length + sizeof(*token));
1314 if (buf == NULL) {
1315 *minor_status = ENOMEM;
1316 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1317 return GSS_S_FAILURE;
1319 memcpy(buf, input_message_buffer->value, input_message_buffer->length);
1320 memcpy(buf + input_message_buffer->length, token, sizeof(*token));
1322 ret = krb5_create_checksum(context, ctx->crypto,
1323 usage, 0, buf,
1324 input_message_buffer->length +
1325 sizeof(*token),
1326 &cksum);
1327 if (ret != 0) {
1328 *minor_status = ret;
1329 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1330 free(buf);
1331 return GSS_S_FAILURE;
1334 free(buf);
1336 assert(cksum.checksum.length == cksumsize);
1337 token->EC[0] = (cksum.checksum.length >> 8) & 0xFF;
1338 token->EC[1] = (cksum.checksum.length >> 0) & 0xFF;
1339 token->RRC[0] = (rrc >> 8) & 0xFF;
1340 token->RRC[1] = (rrc >> 0) & 0xFF;
1342 p += sizeof(*token);
1343 memcpy(p, input_message_buffer->value, input_message_buffer->length);
1344 memcpy(p + input_message_buffer->length,
1345 cksum.checksum.data, cksum.checksum.length);
1347 ret = rrc_rotate(p,
1348 input_message_buffer->length + cksum.checksum.length, rrc, FALSE);
1349 if (ret != 0) {
1350 *minor_status = ret;
1351 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1352 free_Checksum(&cksum);
1353 return GSS_S_FAILURE;
1355 free_Checksum(&cksum);
1358 if (conf_state != NULL) {
1359 *conf_state = conf_req_flag;
1362 *minor_status = 0;
1363 return GSS_S_COMPLETE;
1366 OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status,
1367 const gsskrb5_ctx ctx,
1368 krb5_context context,
1369 const gss_buffer_t input_message_buffer,
1370 gss_buffer_t output_message_buffer,
1371 int *conf_state,
1372 gss_qop_t *qop_state)
1374 gss_cfx_wrap_token token;
1375 u_char token_flags;
1376 krb5_error_code ret;
1377 unsigned usage;
1378 krb5_data data;
1379 uint16_t ec, rrc;
1380 OM_uint32 seq_number_lo, seq_number_hi;
1381 size_t len;
1382 u_char *p;
1384 *minor_status = 0;
1386 if (input_message_buffer->length < sizeof(*token)) {
1387 return GSS_S_DEFECTIVE_TOKEN;
1390 p = input_message_buffer->value;
1392 token = (gss_cfx_wrap_token)p;
1394 if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04) {
1395 return GSS_S_DEFECTIVE_TOKEN;
1398 /* Ignore unknown flags */
1399 token_flags = token->Flags &
1400 (CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
1402 if (token_flags & CFXSentByAcceptor) {
1403 if ((ctx->more_flags & LOCAL) == 0)
1404 return GSS_S_DEFECTIVE_TOKEN;
1407 if (ctx->more_flags & ACCEPTOR_SUBKEY) {
1408 if ((token_flags & CFXAcceptorSubkey) == 0)
1409 return GSS_S_DEFECTIVE_TOKEN;
1410 } else {
1411 if (token_flags & CFXAcceptorSubkey)
1412 return GSS_S_DEFECTIVE_TOKEN;
1415 if (token->Filler != 0xFF) {
1416 return GSS_S_DEFECTIVE_TOKEN;
1419 if (conf_state != NULL) {
1420 *conf_state = (token_flags & CFXSealed) ? 1 : 0;
1423 ec = (token->EC[0] << 8) | token->EC[1];
1424 rrc = (token->RRC[0] << 8) | token->RRC[1];
1427 * Check sequence number
1429 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
1430 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
1431 if (seq_number_hi) {
1432 /* no support for 64-bit sequence numbers */
1433 *minor_status = ERANGE;
1434 return GSS_S_UNSEQ_TOKEN;
1437 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1438 ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
1439 if (ret != 0) {
1440 *minor_status = 0;
1441 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1442 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1443 return ret;
1445 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1448 * Decrypt and/or verify checksum
1451 if (ctx->more_flags & LOCAL) {
1452 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
1453 } else {
1454 usage = KRB5_KU_USAGE_INITIATOR_SEAL;
1457 p += sizeof(*token);
1458 len = input_message_buffer->length;
1459 len -= (p - (u_char *)input_message_buffer->value);
1461 if (token_flags & CFXSealed) {
1463 * this is really ugly, but needed against windows
1464 * for DCERPC, as windows rotates by EC+RRC.
1466 if (IS_DCE_STYLE(ctx)) {
1467 *minor_status = rrc_rotate(p, len, rrc+ec, TRUE);
1468 } else {
1469 *minor_status = rrc_rotate(p, len, rrc, TRUE);
1471 if (*minor_status != 0) {
1472 return GSS_S_FAILURE;
1475 ret = krb5_decrypt(context, ctx->crypto, usage,
1476 p, len, &data);
1477 if (ret != 0) {
1478 *minor_status = ret;
1479 return GSS_S_BAD_MIC;
1482 /* Check that there is room for the pad and token header */
1483 if (data.length < ec + sizeof(*token)) {
1484 krb5_data_free(&data);
1485 return GSS_S_DEFECTIVE_TOKEN;
1487 p = data.data;
1488 p += data.length - sizeof(*token);
1490 /* RRC is unprotected; don't modify input buffer */
1491 ((gss_cfx_wrap_token)p)->RRC[0] = token->RRC[0];
1492 ((gss_cfx_wrap_token)p)->RRC[1] = token->RRC[1];
1494 /* Check the integrity of the header */
1495 if (ct_memcmp(p, token, sizeof(*token)) != 0) {
1496 krb5_data_free(&data);
1497 return GSS_S_BAD_MIC;
1500 output_message_buffer->value = data.data;
1501 output_message_buffer->length = data.length - ec - sizeof(*token);
1502 } else {
1503 Checksum cksum;
1505 /* Rotate by RRC; bogus to do this in-place XXX */
1506 *minor_status = rrc_rotate(p, len, rrc, TRUE);
1507 if (*minor_status != 0) {
1508 return GSS_S_FAILURE;
1511 /* Determine checksum type */
1512 ret = krb5_crypto_get_checksum_type(context,
1513 ctx->crypto,
1514 &cksum.cksumtype);
1515 if (ret != 0) {
1516 *minor_status = ret;
1517 return GSS_S_FAILURE;
1520 cksum.checksum.length = ec;
1522 /* Check we have at least as much data as the checksum */
1523 if (len < cksum.checksum.length) {
1524 *minor_status = ERANGE;
1525 return GSS_S_BAD_MIC;
1528 /* Length now is of the plaintext only, no checksum */
1529 len -= cksum.checksum.length;
1530 cksum.checksum.data = p + len;
1532 output_message_buffer->length = len; /* for later */
1533 output_message_buffer->value = malloc(len + sizeof(*token));
1534 if (output_message_buffer->value == NULL) {
1535 *minor_status = ENOMEM;
1536 return GSS_S_FAILURE;
1539 /* Checksum is over (plaintext-data | "header") */
1540 memcpy(output_message_buffer->value, p, len);
1541 memcpy((u_char *)output_message_buffer->value + len,
1542 token, sizeof(*token));
1544 /* EC is not included in checksum calculation */
1545 token = (gss_cfx_wrap_token)((u_char *)output_message_buffer->value +
1546 len);
1547 token->EC[0] = 0;
1548 token->EC[1] = 0;
1549 token->RRC[0] = 0;
1550 token->RRC[1] = 0;
1552 ret = krb5_verify_checksum(context, ctx->crypto,
1553 usage,
1554 output_message_buffer->value,
1555 len + sizeof(*token),
1556 &cksum);
1557 if (ret != 0) {
1558 *minor_status = ret;
1559 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1560 return GSS_S_BAD_MIC;
1564 if (qop_state != NULL) {
1565 *qop_state = GSS_C_QOP_DEFAULT;
1568 *minor_status = 0;
1569 return GSS_S_COMPLETE;
1572 OM_uint32 _gssapi_mic_cfx(OM_uint32 *minor_status,
1573 const gsskrb5_ctx ctx,
1574 krb5_context context,
1575 gss_qop_t qop_req,
1576 const gss_buffer_t message_buffer,
1577 gss_buffer_t message_token)
1579 gss_cfx_mic_token token;
1580 krb5_error_code ret;
1581 unsigned usage;
1582 Checksum cksum;
1583 u_char *buf;
1584 size_t len;
1585 int32_t seq_number;
1587 len = message_buffer->length + sizeof(*token);
1588 buf = malloc(len);
1589 if (buf == NULL) {
1590 *minor_status = ENOMEM;
1591 return GSS_S_FAILURE;
1594 memcpy(buf, message_buffer->value, message_buffer->length);
1596 token = (gss_cfx_mic_token)(buf + message_buffer->length);
1597 token->TOK_ID[0] = 0x04;
1598 token->TOK_ID[1] = 0x04;
1599 token->Flags = 0;
1600 if ((ctx->more_flags & LOCAL) == 0)
1601 token->Flags |= CFXSentByAcceptor;
1602 if (ctx->more_flags & ACCEPTOR_SUBKEY)
1603 token->Flags |= CFXAcceptorSubkey;
1604 memset(token->Filler, 0xFF, 5);
1606 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1607 krb5_auth_con_getlocalseqnumber(context,
1608 ctx->auth_context,
1609 &seq_number);
1610 _gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]);
1611 _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
1612 krb5_auth_con_setlocalseqnumber(context,
1613 ctx->auth_context,
1614 ++seq_number);
1615 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1617 if (ctx->more_flags & LOCAL) {
1618 usage = KRB5_KU_USAGE_INITIATOR_SIGN;
1619 } else {
1620 usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
1623 ret = krb5_create_checksum(context, ctx->crypto,
1624 usage, 0, buf, len, &cksum);
1625 if (ret != 0) {
1626 *minor_status = ret;
1627 free(buf);
1628 return GSS_S_FAILURE;
1631 /* Determine MIC length */
1632 message_token->length = sizeof(*token) + cksum.checksum.length;
1633 message_token->value = malloc(message_token->length);
1634 if (message_token->value == NULL) {
1635 *minor_status = ENOMEM;
1636 free_Checksum(&cksum);
1637 free(buf);
1638 return GSS_S_FAILURE;
1641 /* Token is { "header" | get_mic("header" | plaintext-data) } */
1642 memcpy(message_token->value, token, sizeof(*token));
1643 memcpy((u_char *)message_token->value + sizeof(*token),
1644 cksum.checksum.data, cksum.checksum.length);
1646 free_Checksum(&cksum);
1647 free(buf);
1649 *minor_status = 0;
1650 return GSS_S_COMPLETE;
1653 OM_uint32 _gssapi_verify_mic_cfx(OM_uint32 *minor_status,
1654 const gsskrb5_ctx ctx,
1655 krb5_context context,
1656 const gss_buffer_t message_buffer,
1657 const gss_buffer_t token_buffer,
1658 gss_qop_t *qop_state)
1660 gss_cfx_mic_token token;
1661 u_char token_flags;
1662 krb5_error_code ret;
1663 unsigned usage;
1664 OM_uint32 seq_number_lo, seq_number_hi;
1665 u_char *buf, *p;
1666 Checksum cksum;
1668 *minor_status = 0;
1670 if (token_buffer->length < sizeof(*token)) {
1671 return GSS_S_DEFECTIVE_TOKEN;
1674 p = token_buffer->value;
1676 token = (gss_cfx_mic_token)p;
1678 if (token->TOK_ID[0] != 0x04 || token->TOK_ID[1] != 0x04) {
1679 return GSS_S_DEFECTIVE_TOKEN;
1682 /* Ignore unknown flags */
1683 token_flags = token->Flags & (CFXSentByAcceptor | CFXAcceptorSubkey);
1685 if (token_flags & CFXSentByAcceptor) {
1686 if ((ctx->more_flags & LOCAL) == 0)
1687 return GSS_S_DEFECTIVE_TOKEN;
1689 if (ctx->more_flags & ACCEPTOR_SUBKEY) {
1690 if ((token_flags & CFXAcceptorSubkey) == 0)
1691 return GSS_S_DEFECTIVE_TOKEN;
1692 } else {
1693 if (token_flags & CFXAcceptorSubkey)
1694 return GSS_S_DEFECTIVE_TOKEN;
1697 if (ct_memcmp(token->Filler, "\xff\xff\xff\xff\xff", 5) != 0) {
1698 return GSS_S_DEFECTIVE_TOKEN;
1702 * Check sequence number
1704 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
1705 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
1706 if (seq_number_hi) {
1707 *minor_status = ERANGE;
1708 return GSS_S_UNSEQ_TOKEN;
1711 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1712 ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
1713 if (ret != 0) {
1714 *minor_status = 0;
1715 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1716 return ret;
1718 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1721 * Verify checksum
1723 ret = krb5_crypto_get_checksum_type(context, ctx->crypto,
1724 &cksum.cksumtype);
1725 if (ret != 0) {
1726 *minor_status = ret;
1727 return GSS_S_FAILURE;
1730 cksum.checksum.data = p + sizeof(*token);
1731 cksum.checksum.length = token_buffer->length - sizeof(*token);
1733 if (ctx->more_flags & LOCAL) {
1734 usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
1735 } else {
1736 usage = KRB5_KU_USAGE_INITIATOR_SIGN;
1739 buf = malloc(message_buffer->length + sizeof(*token));
1740 if (buf == NULL) {
1741 *minor_status = ENOMEM;
1742 return GSS_S_FAILURE;
1744 memcpy(buf, message_buffer->value, message_buffer->length);
1745 memcpy(buf + message_buffer->length, token, sizeof(*token));
1747 ret = krb5_verify_checksum(context, ctx->crypto,
1748 usage,
1749 buf,
1750 sizeof(*token) + message_buffer->length,
1751 &cksum);
1752 if (ret != 0) {
1753 *minor_status = ret;
1754 free(buf);
1755 return GSS_S_BAD_MIC;
1758 free(buf);
1760 if (qop_state != NULL) {
1761 *qop_state = GSS_C_QOP_DEFAULT;
1764 return GSS_S_COMPLETE;