2 * Copyright (c) 2003, PADL Software Pty Ltd.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of PADL Software nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY PADL SOFTWARE AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL PADL SOFTWARE OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include "gsskrb5_locl.h"
36 * Implementation of RFC 4121
39 #define CFXSentByAcceptor (1 << 0)
40 #define CFXSealed (1 << 1)
41 #define CFXAcceptorSubkey (1 << 2)
44 _gsskrb5cfx_wrap_length_cfx(krb5_context context
,
49 size_t *output_length
,
56 /* 16-byte header is always first */
57 *output_length
= sizeof(gss_cfx_wrap_token_desc
);
60 ret
= krb5_crypto_get_checksum_type(context
, crypto
, &type
);
64 ret
= krb5_checksumsize(context
, type
, cksumsize
);
71 /* Header is concatenated with data before encryption */
72 input_length
+= sizeof(gss_cfx_wrap_token_desc
);
75 ret
= krb5_crypto_getblocksize(context
, crypto
, &padsize
);
77 ret
= krb5_crypto_getpadsize(context
, crypto
, &padsize
);
84 *padlength
= padsize
- (input_length
% padsize
);
86 /* We add the pad ourselves (noted here for completeness only) */
87 input_length
+= *padlength
;
90 *output_length
+= krb5_get_wrapped_length(context
,
91 crypto
, input_length
);
93 /* Checksum is concatenated with data */
94 *output_length
+= input_length
+ *cksumsize
;
97 assert(*output_length
> input_length
);
103 _gssapi_wrap_size_cfx(OM_uint32
*minor_status
,
104 const gsskrb5_ctx ctx
,
105 krb5_context context
,
108 OM_uint32 req_output_size
,
109 OM_uint32
*max_input_size
)
115 /* 16-byte header is always first */
116 if (req_output_size
< 16)
118 req_output_size
-= 16;
121 size_t wrapped_size
, sz
;
123 wrapped_size
= req_output_size
+ 1;
126 sz
= krb5_get_wrapped_length(context
,
127 ctx
->crypto
, wrapped_size
);
128 } while (wrapped_size
&& sz
> req_output_size
);
129 if (wrapped_size
== 0)
133 if (wrapped_size
< 16)
138 *max_input_size
= wrapped_size
;
143 ret
= krb5_crypto_get_checksum_type(context
, ctx
->crypto
, &type
);
147 ret
= krb5_checksumsize(context
, type
, &cksumsize
);
151 if (req_output_size
< cksumsize
)
154 /* Checksum is concatenated with data */
155 *max_input_size
= req_output_size
- cksumsize
;
162 * Rotate "rrc" bytes to the front or back
165 static krb5_error_code
166 rrc_rotate(void *data
, size_t len
, uint16_t rrc
, krb5_boolean unrotate
)
168 u_char
*tmp
, buf
[256];
181 if (rrc
<= sizeof(buf
)) {
190 memcpy(tmp
, data
, rrc
);
191 memmove(data
, (u_char
*)data
+ rrc
, left
);
192 memcpy((u_char
*)data
+ left
, tmp
, rrc
);
194 memcpy(tmp
, (u_char
*)data
+ left
, rrc
);
195 memmove((u_char
*)data
+ rrc
, data
, left
);
196 memcpy(data
, tmp
, rrc
);
199 if (rrc
> sizeof(buf
))
205 gss_iov_buffer_desc
*
206 _gk_find_buffer(gss_iov_buffer_desc
*iov
, int iov_count
, OM_uint32 type
)
209 gss_iov_buffer_t iovp
= GSS_C_NO_IOV_BUFFER
;
211 if (iov
== GSS_C_NO_IOV_BUFFER
)
212 return GSS_C_NO_IOV_BUFFER
;
215 * This function is used to find header, padding or trailer buffers
216 * which are singletons; return NULL if multiple instances are found.
218 for (i
= 0; i
< iov_count
; i
++) {
219 if (type
== GSS_IOV_BUFFER_TYPE(iov
[i
].type
)) {
220 if (iovp
== GSS_C_NO_IOV_BUFFER
)
223 return GSS_C_NO_IOV_BUFFER
;
228 * For compatibility with SSPI, an empty padding buffer is treated
229 * equivalent to an absent padding buffer (unless the caller is
230 * requesting that a padding buffer be allocated).
233 iovp
->buffer
.length
== 0 &&
234 type
== GSS_IOV_BUFFER_TYPE_PADDING
&&
235 (GSS_IOV_BUFFER_FLAGS(iovp
->type
) & GSS_IOV_BUFFER_FLAG_ALLOCATE
) == 0)
242 _gk_allocate_buffer(OM_uint32
*minor_status
, gss_iov_buffer_desc
*buffer
, size_t size
)
244 if (buffer
->type
& GSS_IOV_BUFFER_FLAG_ALLOCATED
) {
245 if (buffer
->buffer
.length
== size
)
246 return GSS_S_COMPLETE
;
247 free(buffer
->buffer
.value
);
250 buffer
->buffer
.value
= malloc(size
);
251 buffer
->buffer
.length
= size
;
252 if (buffer
->buffer
.value
== NULL
) {
253 *minor_status
= ENOMEM
;
254 return GSS_S_FAILURE
;
256 buffer
->type
|= GSS_IOV_BUFFER_FLAG_ALLOCATED
;
258 return GSS_S_COMPLETE
;
263 _gk_verify_buffers(OM_uint32
*minor_status
,
264 const gsskrb5_ctx ctx
,
265 const gss_iov_buffer_desc
*header
,
266 const gss_iov_buffer_desc
*padding
,
267 const gss_iov_buffer_desc
*trailer
,
270 if (header
== NULL
) {
271 *minor_status
= EINVAL
;
272 return GSS_S_FAILURE
;
275 if (IS_DCE_STYLE(ctx
)) {
277 * In DCE style mode we reject having a padding or trailer buffer
279 if (padding
|| trailer
) {
280 *minor_status
= EINVAL
;
281 return GSS_S_FAILURE
;
285 * In non-DCE style mode we require having a padding buffer for
286 * encryption types that do not behave as stream ciphers. This
287 * check is superfluous for now, as only RC4 and RFC4121 enctypes
288 * are presently implemented for the IOV APIs; be defensive.
290 if (block_cipher
&& padding
== NULL
) {
291 *minor_status
= EINVAL
;
292 return GSS_S_FAILURE
;
297 return GSS_S_COMPLETE
;
301 _gssapi_wrap_cfx_iov(OM_uint32
*minor_status
,
303 krb5_context context
,
306 gss_iov_buffer_desc
*iov
,
309 OM_uint32 major_status
, junk
;
310 gss_iov_buffer_desc
*header
, *trailer
, *padding
;
311 size_t gsshsize
, k5hsize
;
312 size_t gsstsize
, k5tsize
;
313 size_t rrc
= 0, ec
= 0;
315 gss_cfx_wrap_token token
;
319 krb5_crypto_iov
*data
= NULL
;
321 header
= _gk_find_buffer(iov
, iov_count
, GSS_IOV_BUFFER_TYPE_HEADER
);
322 if (header
== NULL
) {
323 *minor_status
= EINVAL
;
324 return GSS_S_FAILURE
;
327 padding
= _gk_find_buffer(iov
, iov_count
, GSS_IOV_BUFFER_TYPE_PADDING
);
328 if (padding
!= NULL
) {
329 padding
->buffer
.length
= 0;
332 trailer
= _gk_find_buffer(iov
, iov_count
, GSS_IOV_BUFFER_TYPE_TRAILER
);
334 major_status
= _gk_verify_buffers(minor_status
, ctx
, header
,
335 padding
, trailer
, FALSE
);
336 if (major_status
!= GSS_S_COMPLETE
) {
346 for (i
= 0; i
< iov_count
; i
++) {
347 switch (GSS_IOV_BUFFER_TYPE(iov
[i
].type
)) {
348 case GSS_IOV_BUFFER_TYPE_DATA
:
349 size
+= iov
[i
].buffer
.length
;
356 size
+= sizeof(gss_cfx_wrap_token_desc
);
358 *minor_status
= krb5_crypto_length(context
, ctx
->crypto
,
359 KRB5_CRYPTO_TYPE_HEADER
,
362 return GSS_S_FAILURE
;
364 *minor_status
= krb5_crypto_length(context
, ctx
->crypto
,
365 KRB5_CRYPTO_TYPE_TRAILER
,
368 return GSS_S_FAILURE
;
370 *minor_status
= krb5_crypto_length(context
, ctx
->crypto
,
371 KRB5_CRYPTO_TYPE_PADDING
,
374 return GSS_S_FAILURE
;
377 k5psize
= k5pbase
- (size
% k5pbase
);
382 if (k5psize
== 0 && IS_DCE_STYLE(ctx
)) {
383 *minor_status
= krb5_crypto_getblocksize(context
, ctx
->crypto
,
386 return GSS_S_FAILURE
;
392 gsshsize
= sizeof(gss_cfx_wrap_token_desc
) + k5hsize
;
393 gsstsize
= sizeof(gss_cfx_wrap_token_desc
) + ec
+ k5tsize
;
395 if (IS_DCE_STYLE(ctx
)) {
396 *minor_status
= EINVAL
;
397 return GSS_S_FAILURE
;
401 *minor_status
= krb5_crypto_length(context
, ctx
->crypto
,
402 KRB5_CRYPTO_TYPE_CHECKSUM
,
405 return GSS_S_FAILURE
;
407 gsshsize
= sizeof(gss_cfx_wrap_token_desc
);
415 if (trailer
== NULL
) {
417 if (IS_DCE_STYLE(ctx
))
419 gsshsize
+= gsstsize
;
420 } else if (GSS_IOV_BUFFER_FLAGS(trailer
->type
) & GSS_IOV_BUFFER_FLAG_ALLOCATE
) {
421 major_status
= _gk_allocate_buffer(minor_status
, trailer
, gsstsize
);
424 } else if (trailer
->buffer
.length
< gsstsize
) {
425 *minor_status
= KRB5_BAD_MSIZE
;
426 major_status
= GSS_S_FAILURE
;
429 trailer
->buffer
.length
= gsstsize
;
435 if (GSS_IOV_BUFFER_FLAGS(header
->type
) & GSS_IOV_BUFFER_FLAG_ALLOCATE
) {
436 major_status
= _gk_allocate_buffer(minor_status
, header
, gsshsize
);
437 if (major_status
!= GSS_S_COMPLETE
)
439 } else if (header
->buffer
.length
< gsshsize
) {
440 *minor_status
= KRB5_BAD_MSIZE
;
441 major_status
= GSS_S_FAILURE
;
444 header
->buffer
.length
= gsshsize
;
446 token
= (gss_cfx_wrap_token
)header
->buffer
.value
;
448 token
->TOK_ID
[0] = 0x05;
449 token
->TOK_ID
[1] = 0x04;
451 token
->Filler
= 0xFF;
453 if ((ctx
->more_flags
& LOCAL
) == 0)
454 token
->Flags
|= CFXSentByAcceptor
;
456 if (ctx
->more_flags
& ACCEPTOR_SUBKEY
)
457 token
->Flags
|= CFXAcceptorSubkey
;
459 if (ctx
->more_flags
& LOCAL
)
460 usage
= KRB5_KU_USAGE_INITIATOR_SEAL
;
462 usage
= KRB5_KU_USAGE_ACCEPTOR_SEAL
;
466 * In Wrap tokens with confidentiality, the EC field is
467 * used to encode the size (in bytes) of the random filler.
469 token
->Flags
|= CFXSealed
;
470 token
->EC
[0] = (ec
>> 8) & 0xFF;
471 token
->EC
[1] = (ec
>> 0) & 0xFF;
475 * In Wrap tokens without confidentiality, the EC field is
476 * used to encode the size (in bytes) of the trailing
479 * This is not used in the checksum calcuation itself,
480 * because the checksum length could potentially vary
481 * depending on the data length.
488 * In Wrap tokens that provide for confidentiality, the RRC
489 * field in the header contains the hex value 00 00 before
492 * In Wrap tokens that do not provide for confidentiality,
493 * both the EC and RRC fields in the appended checksum
494 * contain the hex value 00 00 for the purpose of calculating
500 HEIMDAL_MUTEX_lock(&ctx
->ctx_id_mutex
);
501 krb5_auth_con_getlocalseqnumber(context
,
504 _gss_mg_encode_be_uint32(0, &token
->SND_SEQ
[0]);
505 _gss_mg_encode_be_uint32(seq_number
, &token
->SND_SEQ
[4]);
506 krb5_auth_con_setlocalseqnumber(context
,
509 HEIMDAL_MUTEX_unlock(&ctx
->ctx_id_mutex
);
511 data
= calloc(iov_count
+ 3, sizeof(data
[0]));
513 *minor_status
= ENOMEM
;
514 major_status
= GSS_S_FAILURE
;
522 {"header" | encrypt(plaintext-data | ec-padding | E"header")}
524 Expanded, this is with with RRC = 0:
526 {"header" | krb5-header | plaintext-data | ec-padding | E"header" | krb5-trailer }
528 In DCE-RPC mode == no trailer: RRC = gss "trailer" == length(ec-padding | E"header" | krb5-trailer)
530 {"header" | ec-padding | E"header" | krb5-trailer | krb5-header | plaintext-data }
534 data
[i
].flags
= KRB5_CRYPTO_TYPE_HEADER
;
535 data
[i
].data
.data
= ((uint8_t *)header
->buffer
.value
) + header
->buffer
.length
- k5hsize
;
536 data
[i
].data
.length
= k5hsize
;
538 for (i
= 1; i
< iov_count
+ 1; i
++) {
539 switch (GSS_IOV_BUFFER_TYPE(iov
[i
- 1].type
)) {
540 case GSS_IOV_BUFFER_TYPE_DATA
:
541 data
[i
].flags
= KRB5_CRYPTO_TYPE_DATA
;
543 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY
:
544 data
[i
].flags
= KRB5_CRYPTO_TYPE_SIGN_ONLY
;
547 data
[i
].flags
= KRB5_CRYPTO_TYPE_EMPTY
;
550 data
[i
].data
.length
= iov
[i
- 1].buffer
.length
;
551 data
[i
].data
.data
= iov
[i
- 1].buffer
.value
;
555 * Any necessary padding is added here to ensure that the
556 * encrypted token header is always at the end of the
560 /* encrypted CFX header in trailer (or after the header if in
561 DCE mode). Copy in header into E"header"
563 data
[i
].flags
= KRB5_CRYPTO_TYPE_DATA
;
565 data
[i
].data
.data
= trailer
->buffer
.value
;
567 data
[i
].data
.data
= ((uint8_t *)header
->buffer
.value
) + sizeof(*token
);
569 data
[i
].data
.length
= ec
+ sizeof(*token
);
570 memset(data
[i
].data
.data
, 0xFF, ec
);
571 memcpy(((uint8_t *)data
[i
].data
.data
) + ec
, token
, sizeof(*token
));
574 /* Kerberos trailer comes after the gss trailer */
575 data
[i
].flags
= KRB5_CRYPTO_TYPE_TRAILER
;
576 data
[i
].data
.data
= ((uint8_t *)data
[i
-1].data
.data
) + ec
+ sizeof(*token
);
577 data
[i
].data
.length
= k5tsize
;
580 ret
= krb5_encrypt_iov_ivec(context
, ctx
->crypto
, usage
, data
, i
, NULL
);
583 major_status
= GSS_S_FAILURE
;
588 token
->RRC
[0] = (rrc
>> 8) & 0xFF;
589 token
->RRC
[1] = (rrc
>> 0) & 0xFF;
596 {data | "header" | gss-trailer (krb5 checksum)
602 for (i
= 0; i
< iov_count
; i
++) {
603 switch (GSS_IOV_BUFFER_TYPE(iov
[i
].type
)) {
604 case GSS_IOV_BUFFER_TYPE_DATA
:
605 data
[i
].flags
= KRB5_CRYPTO_TYPE_DATA
;
607 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY
:
608 data
[i
].flags
= KRB5_CRYPTO_TYPE_SIGN_ONLY
;
611 data
[i
].flags
= KRB5_CRYPTO_TYPE_EMPTY
;
614 data
[i
].data
.length
= iov
[i
].buffer
.length
;
615 data
[i
].data
.data
= iov
[i
].buffer
.value
;
618 data
[i
].flags
= KRB5_CRYPTO_TYPE_DATA
;
619 data
[i
].data
.data
= header
->buffer
.value
;
620 data
[i
].data
.length
= sizeof(gss_cfx_wrap_token_desc
);
623 data
[i
].flags
= KRB5_CRYPTO_TYPE_CHECKSUM
;
625 data
[i
].data
.data
= trailer
->buffer
.value
;
627 data
[i
].data
.data
= (uint8_t *)header
->buffer
.value
+
628 sizeof(gss_cfx_wrap_token_desc
);
630 data
[i
].data
.length
= k5tsize
;
633 ret
= krb5_create_checksum_iov(context
, ctx
->crypto
, usage
, data
, i
, NULL
);
636 major_status
= GSS_S_FAILURE
;
641 token
->RRC
[0] = (rrc
>> 8) & 0xFF;
642 token
->RRC
[1] = (rrc
>> 0) & 0xFF;
645 token
->EC
[0] = (k5tsize
>> 8) & 0xFF;
646 token
->EC
[1] = (k5tsize
>> 0) & 0xFF;
649 if (conf_state
!= NULL
)
650 *conf_state
= conf_req_flag
;
655 return GSS_S_COMPLETE
;
661 gss_release_iov_buffer(&junk
, iov
, iov_count
);
666 /* This is slowpath */
668 unrotate_iov(OM_uint32
*minor_status
, size_t rrc
, gss_iov_buffer_desc
*iov
, int iov_count
)
671 size_t len
= 0, skip
;
674 for (i
= 0; i
< iov_count
; i
++)
675 if (GSS_IOV_BUFFER_TYPE(iov
[i
].type
) == GSS_IOV_BUFFER_TYPE_DATA
||
676 GSS_IOV_BUFFER_TYPE(iov
[i
].type
) == GSS_IOV_BUFFER_TYPE_PADDING
||
677 GSS_IOV_BUFFER_TYPE(iov
[i
].type
) == GSS_IOV_BUFFER_TYPE_TRAILER
)
678 len
+= iov
[i
].buffer
.length
;
682 *minor_status
= ENOMEM
;
683 return GSS_S_FAILURE
;
689 for (i
= 0; i
< iov_count
; i
++) {
690 if (GSS_IOV_BUFFER_TYPE(iov
[i
].type
) == GSS_IOV_BUFFER_TYPE_DATA
||
691 GSS_IOV_BUFFER_TYPE(iov
[i
].type
) == GSS_IOV_BUFFER_TYPE_PADDING
||
692 GSS_IOV_BUFFER_TYPE(iov
[i
].type
) == GSS_IOV_BUFFER_TYPE_TRAILER
)
694 memcpy(q
, iov
[i
].buffer
.value
, iov
[i
].buffer
.length
);
695 q
+= iov
[i
].buffer
.length
;
698 assert((size_t)(q
- p
) == len
);
700 /* unrotate first part */
703 for (i
= 0; i
< iov_count
; i
++) {
704 if (GSS_IOV_BUFFER_TYPE(iov
[i
].type
) == GSS_IOV_BUFFER_TYPE_DATA
||
705 GSS_IOV_BUFFER_TYPE(iov
[i
].type
) == GSS_IOV_BUFFER_TYPE_PADDING
||
706 GSS_IOV_BUFFER_TYPE(iov
[i
].type
) == GSS_IOV_BUFFER_TYPE_TRAILER
)
708 if (iov
[i
].buffer
.length
<= skip
) {
709 skip
-= iov
[i
].buffer
.length
;
711 /* copy back to original buffer */
712 memcpy(((uint8_t *)iov
[i
].buffer
.value
) + skip
, q
, iov
[i
].buffer
.length
- skip
);
713 q
+= iov
[i
].buffer
.length
- skip
;
721 for (i
= 0; i
< iov_count
; i
++) {
722 if (GSS_IOV_BUFFER_TYPE(iov
[i
].type
) == GSS_IOV_BUFFER_TYPE_DATA
||
723 GSS_IOV_BUFFER_TYPE(iov
[i
].type
) == GSS_IOV_BUFFER_TYPE_PADDING
||
724 GSS_IOV_BUFFER_TYPE(iov
[i
].type
) == GSS_IOV_BUFFER_TYPE_TRAILER
)
726 memcpy(iov
[i
].buffer
.value
, q
, min(iov
[i
].buffer
.length
, skip
));
727 if (iov
[i
].buffer
.length
> skip
)
729 skip
-= iov
[i
].buffer
.length
;
730 q
+= iov
[i
].buffer
.length
;
734 return GSS_S_COMPLETE
;
739 _gssapi_unwrap_cfx_iov(OM_uint32
*minor_status
,
741 krb5_context context
,
743 gss_qop_t
*qop_state
,
744 gss_iov_buffer_desc
*iov
,
747 OM_uint32 seq_number_lo
, seq_number_hi
, major_status
, junk
;
748 gss_iov_buffer_desc
*header
, *trailer
, *padding
;
749 gss_cfx_wrap_token token
, ttoken
;
754 krb5_crypto_iov
*data
= NULL
;
759 header
= _gk_find_buffer(iov
, iov_count
, GSS_IOV_BUFFER_TYPE_HEADER
);
760 if (header
== NULL
) {
761 *minor_status
= EINVAL
;
762 return GSS_S_FAILURE
;
765 if (header
->buffer
.length
< sizeof(*token
)) /* we check exact below */
766 return GSS_S_DEFECTIVE_TOKEN
;
768 padding
= _gk_find_buffer(iov
, iov_count
, GSS_IOV_BUFFER_TYPE_PADDING
);
769 if (padding
!= NULL
&& padding
->buffer
.length
!= 0) {
770 *minor_status
= EINVAL
;
771 return GSS_S_FAILURE
;
774 trailer
= _gk_find_buffer(iov
, iov_count
, GSS_IOV_BUFFER_TYPE_TRAILER
);
776 major_status
= _gk_verify_buffers(minor_status
, ctx
, header
,
777 padding
, trailer
, FALSE
);
778 if (major_status
!= GSS_S_COMPLETE
) {
782 token
= (gss_cfx_wrap_token
)header
->buffer
.value
;
784 if (token
->TOK_ID
[0] != 0x05 || token
->TOK_ID
[1] != 0x04)
785 return GSS_S_DEFECTIVE_TOKEN
;
787 /* Ignore unknown flags */
788 token_flags
= token
->Flags
&
789 (CFXSentByAcceptor
| CFXSealed
| CFXAcceptorSubkey
);
791 if (token_flags
& CFXSentByAcceptor
) {
792 if ((ctx
->more_flags
& LOCAL
) == 0)
793 return GSS_S_DEFECTIVE_TOKEN
;
796 if (ctx
->more_flags
& ACCEPTOR_SUBKEY
) {
797 if ((token_flags
& CFXAcceptorSubkey
) == 0)
798 return GSS_S_DEFECTIVE_TOKEN
;
800 if (token_flags
& CFXAcceptorSubkey
)
801 return GSS_S_DEFECTIVE_TOKEN
;
804 if (token
->Filler
!= 0xFF)
805 return GSS_S_DEFECTIVE_TOKEN
;
807 if (conf_state
!= NULL
)
808 *conf_state
= (token_flags
& CFXSealed
) ? 1 : 0;
810 ec
= (token
->EC
[0] << 8) | token
->EC
[1];
811 rrc
= (token
->RRC
[0] << 8) | token
->RRC
[1];
814 * Check sequence number
816 _gss_mg_decode_be_uint32(&token
->SND_SEQ
[0], &seq_number_hi
);
817 _gss_mg_decode_be_uint32(&token
->SND_SEQ
[4], &seq_number_lo
);
819 /* no support for 64-bit sequence numbers */
820 *minor_status
= ERANGE
;
821 return GSS_S_UNSEQ_TOKEN
;
824 HEIMDAL_MUTEX_lock(&ctx
->ctx_id_mutex
);
825 ret
= _gssapi_msg_order_check(ctx
->order
, seq_number_lo
);
828 HEIMDAL_MUTEX_unlock(&ctx
->ctx_id_mutex
);
831 HEIMDAL_MUTEX_unlock(&ctx
->ctx_id_mutex
);
834 * Decrypt and/or verify checksum
837 if (ctx
->more_flags
& LOCAL
) {
838 usage
= KRB5_KU_USAGE_ACCEPTOR_SEAL
;
840 usage
= KRB5_KU_USAGE_INITIATOR_SEAL
;
843 data
= calloc(iov_count
+ 3, sizeof(data
[0]));
845 *minor_status
= ENOMEM
;
846 major_status
= GSS_S_FAILURE
;
850 if (token_flags
& CFXSealed
) {
851 size_t k5tsize
, k5hsize
;
853 krb5_crypto_length(context
, ctx
->crypto
, KRB5_CRYPTO_TYPE_HEADER
, &k5hsize
);
854 krb5_crypto_length(context
, ctx
->crypto
, KRB5_CRYPTO_TYPE_TRAILER
, &k5tsize
);
856 /* Rotate by RRC; bogus to do this in-place XXX */
859 if (trailer
== NULL
) {
860 size_t gsstsize
= k5tsize
+ sizeof(*token
);
861 size_t gsshsize
= k5hsize
+ sizeof(*token
);
863 if (rrc
!= gsstsize
) {
864 major_status
= GSS_S_DEFECTIVE_TOKEN
;
868 if (IS_DCE_STYLE(ctx
))
871 gsshsize
+= gsstsize
;
873 if (header
->buffer
.length
!= gsshsize
) {
874 major_status
= GSS_S_DEFECTIVE_TOKEN
;
877 } else if (trailer
->buffer
.length
!= sizeof(*token
) + k5tsize
) {
878 major_status
= GSS_S_DEFECTIVE_TOKEN
;
880 } else if (header
->buffer
.length
!= sizeof(*token
) + k5hsize
) {
881 major_status
= GSS_S_DEFECTIVE_TOKEN
;
883 } else if (rrc
!= 0) {
884 /* go though slowpath */
885 major_status
= unrotate_iov(minor_status
, rrc
, iov
, iov_count
);
891 data
[i
].flags
= KRB5_CRYPTO_TYPE_HEADER
;
892 data
[i
].data
.data
= ((uint8_t *)header
->buffer
.value
) + header
->buffer
.length
- k5hsize
;
893 data
[i
].data
.length
= k5hsize
;
896 for (j
= 0; j
< iov_count
; i
++, j
++) {
897 switch (GSS_IOV_BUFFER_TYPE(iov
[j
].type
)) {
898 case GSS_IOV_BUFFER_TYPE_DATA
:
899 data
[i
].flags
= KRB5_CRYPTO_TYPE_DATA
;
901 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY
:
902 data
[i
].flags
= KRB5_CRYPTO_TYPE_SIGN_ONLY
;
905 data
[i
].flags
= KRB5_CRYPTO_TYPE_EMPTY
;
908 data
[i
].data
.length
= iov
[j
].buffer
.length
;
909 data
[i
].data
.data
= iov
[j
].buffer
.value
;
912 /* encrypted CFX header in trailer (or after the header if in
913 DCE mode). Copy in header into E"header"
915 data
[i
].flags
= KRB5_CRYPTO_TYPE_DATA
;
917 data
[i
].data
.data
= trailer
->buffer
.value
;
919 data
[i
].data
.data
= ((uint8_t *)header
->buffer
.value
) +
920 header
->buffer
.length
- k5hsize
- k5tsize
- ec
- sizeof(*token
);
923 data
[i
].data
.length
= ec
+ sizeof(*token
);
924 ttoken
= (gss_cfx_wrap_token
)(((uint8_t *)data
[i
].data
.data
) + ec
);
927 /* Kerberos trailer comes after the gss trailer */
928 data
[i
].flags
= KRB5_CRYPTO_TYPE_TRAILER
;
929 data
[i
].data
.data
= ((uint8_t *)data
[i
-1].data
.data
) + ec
+ sizeof(*token
);
930 data
[i
].data
.length
= k5tsize
;
933 ret
= krb5_decrypt_iov_ivec(context
, ctx
->crypto
, usage
, data
, i
, NULL
);
936 major_status
= GSS_S_FAILURE
;
940 ttoken
->RRC
[0] = token
->RRC
[0];
941 ttoken
->RRC
[1] = token
->RRC
[1];
943 /* Check the integrity of the header */
944 if (ct_memcmp(ttoken
, token
, sizeof(*token
)) != 0) {
945 major_status
= GSS_S_BAD_MIC
;
949 size_t gsstsize
= ec
;
950 size_t gsshsize
= sizeof(*token
);
952 if (trailer
== NULL
) {
954 if (rrc
!= gsstsize
) {
955 *minor_status
= EINVAL
;
956 major_status
= GSS_S_FAILURE
;
960 gsshsize
+= gsstsize
;
961 } else if (trailer
->buffer
.length
!= gsstsize
) {
962 major_status
= GSS_S_DEFECTIVE_TOKEN
;
964 } else if (rrc
!= 0) {
966 *minor_status
= EINVAL
;
967 major_status
= GSS_S_FAILURE
;
971 if (header
->buffer
.length
!= gsshsize
) {
972 major_status
= GSS_S_DEFECTIVE_TOKEN
;
976 for (i
= 0; i
< iov_count
; i
++) {
977 switch (GSS_IOV_BUFFER_TYPE(iov
[i
].type
)) {
978 case GSS_IOV_BUFFER_TYPE_DATA
:
979 data
[i
].flags
= KRB5_CRYPTO_TYPE_DATA
;
981 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY
:
982 data
[i
].flags
= KRB5_CRYPTO_TYPE_SIGN_ONLY
;
985 data
[i
].flags
= KRB5_CRYPTO_TYPE_EMPTY
;
988 data
[i
].data
.length
= iov
[i
].buffer
.length
;
989 data
[i
].data
.data
= iov
[i
].buffer
.value
;
992 data
[i
].flags
= KRB5_CRYPTO_TYPE_DATA
;
993 data
[i
].data
.data
= header
->buffer
.value
;
994 data
[i
].data
.length
= sizeof(*token
);
997 data
[i
].flags
= KRB5_CRYPTO_TYPE_CHECKSUM
;
999 data
[i
].data
.data
= trailer
->buffer
.value
;
1001 data
[i
].data
.data
= (uint8_t *)header
->buffer
.value
+
1004 data
[i
].data
.length
= ec
;
1007 token
= (gss_cfx_wrap_token
)header
->buffer
.value
;
1013 ret
= krb5_verify_checksum_iov(context
, ctx
->crypto
, usage
, data
, i
, NULL
);
1015 *minor_status
= ret
;
1016 major_status
= GSS_S_FAILURE
;
1021 if (qop_state
!= NULL
) {
1022 *qop_state
= GSS_C_QOP_DEFAULT
;
1028 return GSS_S_COMPLETE
;
1034 gss_release_iov_buffer(&junk
, iov
, iov_count
);
1036 return major_status
;
1040 _gssapi_wrap_iov_length_cfx(OM_uint32
*minor_status
,
1042 krb5_context context
,
1046 gss_iov_buffer_desc
*iov
,
1049 OM_uint32 major_status
;
1052 gss_iov_buffer_desc
*header
= NULL
;
1053 gss_iov_buffer_desc
*padding
= NULL
;
1054 gss_iov_buffer_desc
*trailer
= NULL
;
1055 size_t gsshsize
= 0;
1056 size_t gsstsize
= 0;
1060 GSSAPI_KRB5_INIT (&context
);
1063 for (size
= 0, i
= 0; i
< iov_count
; i
++) {
1064 switch(GSS_IOV_BUFFER_TYPE(iov
[i
].type
)) {
1065 case GSS_IOV_BUFFER_TYPE_EMPTY
:
1067 case GSS_IOV_BUFFER_TYPE_DATA
:
1068 size
+= iov
[i
].buffer
.length
;
1070 case GSS_IOV_BUFFER_TYPE_HEADER
:
1071 if (header
!= NULL
) {
1073 return GSS_S_FAILURE
;
1077 case GSS_IOV_BUFFER_TYPE_TRAILER
:
1078 if (trailer
!= NULL
) {
1080 return GSS_S_FAILURE
;
1084 case GSS_IOV_BUFFER_TYPE_PADDING
:
1085 if (padding
!= NULL
) {
1087 return GSS_S_FAILURE
;
1091 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY
:
1094 *minor_status
= EINVAL
;
1095 return GSS_S_FAILURE
;
1099 major_status
= _gk_verify_buffers(minor_status
, ctx
, header
,
1100 padding
, trailer
, FALSE
);
1101 if (major_status
!= GSS_S_COMPLETE
) {
1102 return major_status
;
1105 if (conf_req_flag
) {
1111 size
+= sizeof(gss_cfx_wrap_token_desc
);
1113 *minor_status
= krb5_crypto_length(context
, ctx
->crypto
,
1114 KRB5_CRYPTO_TYPE_HEADER
,
1117 return GSS_S_FAILURE
;
1119 *minor_status
= krb5_crypto_length(context
, ctx
->crypto
,
1120 KRB5_CRYPTO_TYPE_TRAILER
,
1123 return GSS_S_FAILURE
;
1125 *minor_status
= krb5_crypto_length(context
, ctx
->crypto
,
1126 KRB5_CRYPTO_TYPE_PADDING
,
1129 return GSS_S_FAILURE
;
1132 k5psize
= k5pbase
- (size
% k5pbase
);
1137 if (k5psize
== 0 && IS_DCE_STYLE(ctx
)) {
1138 *minor_status
= krb5_crypto_getblocksize(context
, ctx
->crypto
,
1141 return GSS_S_FAILURE
;
1148 gsshsize
= sizeof(gss_cfx_wrap_token_desc
) + k5hsize
;
1149 gsstsize
= sizeof(gss_cfx_wrap_token_desc
) + ec
+ k5tsize
;
1151 *minor_status
= krb5_crypto_length(context
, ctx
->crypto
,
1152 KRB5_CRYPTO_TYPE_CHECKSUM
,
1155 return GSS_S_FAILURE
;
1157 gsshsize
= sizeof(gss_cfx_wrap_token_desc
);
1161 if (trailer
!= NULL
) {
1162 trailer
->buffer
.length
= gsstsize
;
1164 gsshsize
+= gsstsize
;
1167 header
->buffer
.length
= gsshsize
;
1170 /* padding is done via EC and is contained in the header or trailer */
1171 padding
->buffer
.length
= 0;
1175 *conf_state
= conf_req_flag
;
1178 return GSS_S_COMPLETE
;
1184 OM_uint32
_gssapi_wrap_cfx(OM_uint32
*minor_status
,
1185 const gsskrb5_ctx ctx
,
1186 krb5_context context
,
1188 const gss_buffer_t input_message_buffer
,
1190 gss_buffer_t output_message_buffer
)
1192 gss_cfx_wrap_token token
;
1193 krb5_error_code ret
;
1196 size_t wrapped_len
, cksumsize
;
1197 uint16_t padlength
, rrc
= 0;
1201 ret
= _gsskrb5cfx_wrap_length_cfx(context
,
1202 ctx
->crypto
, conf_req_flag
,
1204 input_message_buffer
->length
,
1205 &wrapped_len
, &cksumsize
, &padlength
);
1207 *minor_status
= ret
;
1208 return GSS_S_FAILURE
;
1211 /* Always rotate encrypted token (if any) and checksum to header */
1212 rrc
= (conf_req_flag
? sizeof(*token
) : 0) + (uint16_t)cksumsize
;
1214 output_message_buffer
->length
= wrapped_len
;
1215 output_message_buffer
->value
= malloc(output_message_buffer
->length
);
1216 if (output_message_buffer
->value
== NULL
) {
1217 *minor_status
= ENOMEM
;
1218 return GSS_S_FAILURE
;
1221 p
= output_message_buffer
->value
;
1222 token
= (gss_cfx_wrap_token
)p
;
1223 token
->TOK_ID
[0] = 0x05;
1224 token
->TOK_ID
[1] = 0x04;
1226 token
->Filler
= 0xFF;
1227 if ((ctx
->more_flags
& LOCAL
) == 0)
1228 token
->Flags
|= CFXSentByAcceptor
;
1229 if (ctx
->more_flags
& ACCEPTOR_SUBKEY
)
1230 token
->Flags
|= CFXAcceptorSubkey
;
1231 if (conf_req_flag
) {
1233 * In Wrap tokens with confidentiality, the EC field is
1234 * used to encode the size (in bytes) of the random filler.
1236 token
->Flags
|= CFXSealed
;
1237 token
->EC
[0] = (padlength
>> 8) & 0xFF;
1238 token
->EC
[1] = (padlength
>> 0) & 0xFF;
1241 * In Wrap tokens without confidentiality, the EC field is
1242 * used to encode the size (in bytes) of the trailing
1245 * This is not used in the checksum calcuation itself,
1246 * because the checksum length could potentially vary
1247 * depending on the data length.
1254 * In Wrap tokens that provide for confidentiality, the RRC
1255 * field in the header contains the hex value 00 00 before
1258 * In Wrap tokens that do not provide for confidentiality,
1259 * both the EC and RRC fields in the appended checksum
1260 * contain the hex value 00 00 for the purpose of calculating
1266 HEIMDAL_MUTEX_lock(&ctx
->ctx_id_mutex
);
1267 krb5_auth_con_getlocalseqnumber(context
,
1270 _gss_mg_encode_be_uint32(0, &token
->SND_SEQ
[0]);
1271 _gss_mg_encode_be_uint32(seq_number
, &token
->SND_SEQ
[4]);
1272 krb5_auth_con_setlocalseqnumber(context
,
1275 HEIMDAL_MUTEX_unlock(&ctx
->ctx_id_mutex
);
1278 * If confidentiality is requested, the token header is
1279 * appended to the plaintext before encryption; the resulting
1280 * token is {"header" | encrypt(plaintext | pad | "header")}.
1282 * If no confidentiality is requested, the checksum is
1283 * calculated over the plaintext concatenated with the
1286 if (ctx
->more_flags
& LOCAL
) {
1287 usage
= KRB5_KU_USAGE_INITIATOR_SEAL
;
1289 usage
= KRB5_KU_USAGE_ACCEPTOR_SEAL
;
1292 if (conf_req_flag
) {
1294 * Any necessary padding is added here to ensure that the
1295 * encrypted token header is always at the end of the
1298 * The specification does not require that the padding
1299 * bytes are initialized.
1301 p
+= sizeof(*token
);
1302 memcpy(p
, input_message_buffer
->value
, input_message_buffer
->length
);
1303 memset(p
+ input_message_buffer
->length
, 0xFF, padlength
);
1304 memcpy(p
+ input_message_buffer
->length
+ padlength
,
1305 token
, sizeof(*token
));
1307 ret
= krb5_encrypt(context
, ctx
->crypto
,
1309 input_message_buffer
->length
+ padlength
+
1313 *minor_status
= ret
;
1314 _gsskrb5_release_buffer(minor_status
, output_message_buffer
);
1315 return GSS_S_FAILURE
;
1317 assert(sizeof(*token
) + cipher
.length
== wrapped_len
);
1318 token
->RRC
[0] = (rrc
>> 8) & 0xFF;
1319 token
->RRC
[1] = (rrc
>> 0) & 0xFF;
1322 * this is really ugly, but needed against windows
1323 * for DCERPC, as windows rotates by EC+RRC.
1325 if (IS_DCE_STYLE(ctx
)) {
1326 ret
= rrc_rotate(cipher
.data
, cipher
.length
, rrc
+padlength
, FALSE
);
1328 ret
= rrc_rotate(cipher
.data
, cipher
.length
, rrc
, FALSE
);
1331 *minor_status
= ret
;
1332 _gsskrb5_release_buffer(minor_status
, output_message_buffer
);
1333 return GSS_S_FAILURE
;
1335 memcpy(p
, cipher
.data
, cipher
.length
);
1336 krb5_data_free(&cipher
);
1341 buf
= malloc(input_message_buffer
->length
+ sizeof(*token
));
1343 *minor_status
= ENOMEM
;
1344 _gsskrb5_release_buffer(minor_status
, output_message_buffer
);
1345 return GSS_S_FAILURE
;
1347 memcpy(buf
, input_message_buffer
->value
, input_message_buffer
->length
);
1348 memcpy(buf
+ input_message_buffer
->length
, token
, sizeof(*token
));
1350 ret
= krb5_create_checksum(context
, ctx
->crypto
,
1352 input_message_buffer
->length
+
1356 *minor_status
= ret
;
1357 _gsskrb5_release_buffer(minor_status
, output_message_buffer
);
1359 return GSS_S_FAILURE
;
1364 assert(cksum
.checksum
.length
== cksumsize
);
1365 token
->EC
[0] = (cksum
.checksum
.length
>> 8) & 0xFF;
1366 token
->EC
[1] = (cksum
.checksum
.length
>> 0) & 0xFF;
1367 token
->RRC
[0] = (rrc
>> 8) & 0xFF;
1368 token
->RRC
[1] = (rrc
>> 0) & 0xFF;
1370 p
+= sizeof(*token
);
1371 memcpy(p
, input_message_buffer
->value
, input_message_buffer
->length
);
1372 memcpy(p
+ input_message_buffer
->length
,
1373 cksum
.checksum
.data
, cksum
.checksum
.length
);
1376 input_message_buffer
->length
+ cksum
.checksum
.length
, rrc
, FALSE
);
1378 *minor_status
= ret
;
1379 _gsskrb5_release_buffer(minor_status
, output_message_buffer
);
1380 free_Checksum(&cksum
);
1381 return GSS_S_FAILURE
;
1383 free_Checksum(&cksum
);
1386 if (conf_state
!= NULL
) {
1387 *conf_state
= conf_req_flag
;
1391 return GSS_S_COMPLETE
;
1394 OM_uint32
_gssapi_unwrap_cfx(OM_uint32
*minor_status
,
1395 const gsskrb5_ctx ctx
,
1396 krb5_context context
,
1397 const gss_buffer_t input_message_buffer
,
1398 gss_buffer_t output_message_buffer
,
1400 gss_qop_t
*qop_state
)
1402 gss_cfx_wrap_token token
;
1404 krb5_error_code ret
;
1408 OM_uint32 seq_number_lo
, seq_number_hi
;
1414 if (input_message_buffer
->length
< sizeof(*token
)) {
1415 return GSS_S_DEFECTIVE_TOKEN
;
1418 p
= input_message_buffer
->value
;
1420 token
= (gss_cfx_wrap_token
)p
;
1422 if (token
->TOK_ID
[0] != 0x05 || token
->TOK_ID
[1] != 0x04) {
1423 return GSS_S_DEFECTIVE_TOKEN
;
1426 /* Ignore unknown flags */
1427 token_flags
= token
->Flags
&
1428 (CFXSentByAcceptor
| CFXSealed
| CFXAcceptorSubkey
);
1430 if (token_flags
& CFXSentByAcceptor
) {
1431 if ((ctx
->more_flags
& LOCAL
) == 0)
1432 return GSS_S_DEFECTIVE_TOKEN
;
1435 if (ctx
->more_flags
& ACCEPTOR_SUBKEY
) {
1436 if ((token_flags
& CFXAcceptorSubkey
) == 0)
1437 return GSS_S_DEFECTIVE_TOKEN
;
1439 if (token_flags
& CFXAcceptorSubkey
)
1440 return GSS_S_DEFECTIVE_TOKEN
;
1443 if (token
->Filler
!= 0xFF) {
1444 return GSS_S_DEFECTIVE_TOKEN
;
1447 if (conf_state
!= NULL
) {
1448 *conf_state
= (token_flags
& CFXSealed
) ? 1 : 0;
1451 ec
= (token
->EC
[0] << 8) | token
->EC
[1];
1452 rrc
= (token
->RRC
[0] << 8) | token
->RRC
[1];
1455 * Check sequence number
1457 _gss_mg_decode_be_uint32(&token
->SND_SEQ
[0], &seq_number_hi
);
1458 _gss_mg_decode_be_uint32(&token
->SND_SEQ
[4], &seq_number_lo
);
1459 if (seq_number_hi
) {
1460 /* no support for 64-bit sequence numbers */
1461 *minor_status
= ERANGE
;
1462 return GSS_S_UNSEQ_TOKEN
;
1465 HEIMDAL_MUTEX_lock(&ctx
->ctx_id_mutex
);
1466 ret
= _gssapi_msg_order_check(ctx
->order
, seq_number_lo
);
1469 HEIMDAL_MUTEX_unlock(&ctx
->ctx_id_mutex
);
1470 _gsskrb5_release_buffer(minor_status
, output_message_buffer
);
1473 HEIMDAL_MUTEX_unlock(&ctx
->ctx_id_mutex
);
1476 * Decrypt and/or verify checksum
1479 if (ctx
->more_flags
& LOCAL
) {
1480 usage
= KRB5_KU_USAGE_ACCEPTOR_SEAL
;
1482 usage
= KRB5_KU_USAGE_INITIATOR_SEAL
;
1485 p
+= sizeof(*token
);
1486 len
= input_message_buffer
->length
;
1487 len
-= (p
- (u_char
*)input_message_buffer
->value
);
1489 if (token_flags
& CFXSealed
) {
1491 * this is really ugly, but needed against windows
1492 * for DCERPC, as windows rotates by EC+RRC.
1494 if (IS_DCE_STYLE(ctx
)) {
1495 *minor_status
= rrc_rotate(p
, len
, rrc
+ec
, TRUE
);
1497 *minor_status
= rrc_rotate(p
, len
, rrc
, TRUE
);
1499 if (*minor_status
!= 0) {
1500 return GSS_S_FAILURE
;
1503 ret
= krb5_decrypt(context
, ctx
->crypto
, usage
,
1506 *minor_status
= ret
;
1507 return GSS_S_BAD_MIC
;
1510 /* Check that there is room for the pad and token header */
1511 if (data
.length
< ec
+ sizeof(*token
)) {
1512 krb5_data_free(&data
);
1513 return GSS_S_DEFECTIVE_TOKEN
;
1516 p
+= data
.length
- sizeof(*token
);
1518 /* RRC is unprotected; don't modify input buffer */
1519 ((gss_cfx_wrap_token
)p
)->RRC
[0] = token
->RRC
[0];
1520 ((gss_cfx_wrap_token
)p
)->RRC
[1] = token
->RRC
[1];
1522 /* Check the integrity of the header */
1523 if (ct_memcmp(p
, token
, sizeof(*token
)) != 0) {
1524 krb5_data_free(&data
);
1525 return GSS_S_BAD_MIC
;
1528 output_message_buffer
->value
= data
.data
;
1529 output_message_buffer
->length
= data
.length
- ec
- sizeof(*token
);
1533 /* Rotate by RRC; bogus to do this in-place XXX */
1534 *minor_status
= rrc_rotate(p
, len
, rrc
, TRUE
);
1535 if (*minor_status
!= 0) {
1536 return GSS_S_FAILURE
;
1539 /* Determine checksum type */
1540 ret
= krb5_crypto_get_checksum_type(context
,
1544 *minor_status
= ret
;
1545 return GSS_S_FAILURE
;
1548 cksum
.checksum
.length
= ec
;
1550 /* Check we have at least as much data as the checksum */
1551 if (len
< cksum
.checksum
.length
) {
1552 *minor_status
= ERANGE
;
1553 return GSS_S_BAD_MIC
;
1556 /* Length now is of the plaintext only, no checksum */
1557 len
-= cksum
.checksum
.length
;
1558 cksum
.checksum
.data
= p
+ len
;
1560 output_message_buffer
->length
= len
; /* for later */
1561 output_message_buffer
->value
= malloc(len
+ sizeof(*token
));
1562 if (output_message_buffer
->value
== NULL
) {
1563 *minor_status
= ENOMEM
;
1564 return GSS_S_FAILURE
;
1567 /* Checksum is over (plaintext-data | "header") */
1568 memcpy(output_message_buffer
->value
, p
, len
);
1569 memcpy((u_char
*)output_message_buffer
->value
+ len
,
1570 token
, sizeof(*token
));
1572 /* EC is not included in checksum calculation */
1573 token
= (gss_cfx_wrap_token
)((u_char
*)output_message_buffer
->value
+
1580 ret
= krb5_verify_checksum(context
, ctx
->crypto
,
1582 output_message_buffer
->value
,
1583 len
+ sizeof(*token
),
1586 *minor_status
= ret
;
1587 _gsskrb5_release_buffer(minor_status
, output_message_buffer
);
1588 return GSS_S_BAD_MIC
;
1592 if (qop_state
!= NULL
) {
1593 *qop_state
= GSS_C_QOP_DEFAULT
;
1597 return GSS_S_COMPLETE
;
1600 OM_uint32
_gssapi_mic_cfx(OM_uint32
*minor_status
,
1601 const gsskrb5_ctx ctx
,
1602 krb5_context context
,
1604 const gss_buffer_t message_buffer
,
1605 gss_buffer_t message_token
)
1607 gss_cfx_mic_token token
;
1608 krb5_error_code ret
;
1615 len
= message_buffer
->length
+ sizeof(*token
);
1618 *minor_status
= ENOMEM
;
1619 return GSS_S_FAILURE
;
1622 if (message_buffer
->length
)
1623 memcpy(buf
, message_buffer
->value
, message_buffer
->length
);
1625 memset(buf
, 0, len
);
1627 token
= (gss_cfx_mic_token
)(buf
+ message_buffer
->length
);
1628 token
->TOK_ID
[0] = 0x04;
1629 token
->TOK_ID
[1] = 0x04;
1631 if ((ctx
->more_flags
& LOCAL
) == 0)
1632 token
->Flags
|= CFXSentByAcceptor
;
1633 if (ctx
->more_flags
& ACCEPTOR_SUBKEY
)
1634 token
->Flags
|= CFXAcceptorSubkey
;
1635 memset(token
->Filler
, 0xFF, 5);
1637 HEIMDAL_MUTEX_lock(&ctx
->ctx_id_mutex
);
1638 krb5_auth_con_getlocalseqnumber(context
,
1641 _gss_mg_encode_be_uint32(0, &token
->SND_SEQ
[0]);
1642 _gss_mg_encode_be_uint32(seq_number
, &token
->SND_SEQ
[4]);
1643 krb5_auth_con_setlocalseqnumber(context
,
1646 HEIMDAL_MUTEX_unlock(&ctx
->ctx_id_mutex
);
1648 if (ctx
->more_flags
& LOCAL
) {
1649 usage
= KRB5_KU_USAGE_INITIATOR_SIGN
;
1651 usage
= KRB5_KU_USAGE_ACCEPTOR_SIGN
;
1654 ret
= krb5_create_checksum(context
, ctx
->crypto
,
1655 usage
, 0, buf
, len
, &cksum
);
1657 *minor_status
= ret
;
1659 return GSS_S_FAILURE
;
1662 /* Determine MIC length */
1663 message_token
->length
= sizeof(*token
) + cksum
.checksum
.length
;
1664 message_token
->value
= malloc(message_token
->length
);
1665 if (message_token
->value
== NULL
) {
1666 *minor_status
= ENOMEM
;
1667 free_Checksum(&cksum
);
1669 return GSS_S_FAILURE
;
1672 /* Token is { "header" | get_mic("header" | plaintext-data) } */
1673 memcpy(message_token
->value
, token
, sizeof(*token
));
1674 memcpy((u_char
*)message_token
->value
+ sizeof(*token
),
1675 cksum
.checksum
.data
, cksum
.checksum
.length
);
1677 free_Checksum(&cksum
);
1681 return GSS_S_COMPLETE
;
1684 OM_uint32
_gssapi_verify_mic_cfx(OM_uint32
*minor_status
,
1685 const gsskrb5_ctx ctx
,
1686 krb5_context context
,
1687 const gss_buffer_t message_buffer
,
1688 const gss_buffer_t token_buffer
,
1689 gss_qop_t
*qop_state
)
1691 gss_cfx_mic_token token
;
1693 krb5_error_code ret
;
1695 OM_uint32 seq_number_lo
, seq_number_hi
;
1701 if (token_buffer
->length
< sizeof(*token
)) {
1702 return GSS_S_DEFECTIVE_TOKEN
;
1705 p
= token_buffer
->value
;
1707 token
= (gss_cfx_mic_token
)p
;
1709 if (token
->TOK_ID
[0] != 0x04 || token
->TOK_ID
[1] != 0x04) {
1710 return GSS_S_DEFECTIVE_TOKEN
;
1713 /* Ignore unknown flags */
1714 token_flags
= token
->Flags
& (CFXSentByAcceptor
| CFXAcceptorSubkey
);
1716 if (token_flags
& CFXSentByAcceptor
) {
1717 if ((ctx
->more_flags
& LOCAL
) == 0)
1718 return GSS_S_DEFECTIVE_TOKEN
;
1720 if (ctx
->more_flags
& ACCEPTOR_SUBKEY
) {
1721 if ((token_flags
& CFXAcceptorSubkey
) == 0)
1722 return GSS_S_DEFECTIVE_TOKEN
;
1724 if (token_flags
& CFXAcceptorSubkey
)
1725 return GSS_S_DEFECTIVE_TOKEN
;
1728 if (ct_memcmp(token
->Filler
, "\xff\xff\xff\xff\xff", 5) != 0) {
1729 return GSS_S_DEFECTIVE_TOKEN
;
1733 * Check sequence number
1735 _gss_mg_decode_be_uint32(&token
->SND_SEQ
[0], &seq_number_hi
);
1736 _gss_mg_decode_be_uint32(&token
->SND_SEQ
[4], &seq_number_lo
);
1737 if (seq_number_hi
) {
1738 *minor_status
= ERANGE
;
1739 return GSS_S_UNSEQ_TOKEN
;
1742 HEIMDAL_MUTEX_lock(&ctx
->ctx_id_mutex
);
1743 ret
= _gssapi_msg_order_check(ctx
->order
, seq_number_lo
);
1746 HEIMDAL_MUTEX_unlock(&ctx
->ctx_id_mutex
);
1749 HEIMDAL_MUTEX_unlock(&ctx
->ctx_id_mutex
);
1754 ret
= krb5_crypto_get_checksum_type(context
, ctx
->crypto
,
1757 *minor_status
= ret
;
1758 return GSS_S_FAILURE
;
1761 cksum
.checksum
.data
= p
+ sizeof(*token
);
1762 cksum
.checksum
.length
= token_buffer
->length
- sizeof(*token
);
1764 if (ctx
->more_flags
& LOCAL
) {
1765 usage
= KRB5_KU_USAGE_ACCEPTOR_SIGN
;
1767 usage
= KRB5_KU_USAGE_INITIATOR_SIGN
;
1770 buf
= malloc(message_buffer
->length
+ sizeof(*token
));
1772 *minor_status
= ENOMEM
;
1773 return GSS_S_FAILURE
;
1775 if (message_buffer
->length
)
1776 memcpy(buf
, message_buffer
->value
, message_buffer
->length
);
1777 memcpy(buf
+ message_buffer
->length
, token
, sizeof(*token
));
1779 ret
= krb5_verify_checksum(context
, ctx
->crypto
,
1782 sizeof(*token
) + message_buffer
->length
,
1785 *minor_status
= ret
;
1787 return GSS_S_BAD_MIC
;
1792 if (qop_state
!= NULL
) {
1793 *qop_state
= GSS_C_QOP_DEFAULT
;
1796 return GSS_S_COMPLETE
;