libtommath: Fix possible integer overflow CVE-2023-36328
[heimdal.git] / lib / gssapi / mech / gss_aeap.c
blobdbcd6113468b7c621742a5d294060f6d98e4fde7
1 /*
2 * AEAD support
3 */
5 #include "mech_locl.h"
7 /**
8 * Encrypts or sign the data.
10 * This is a more complicated version of gss_wrap(), it allows the
11 * caller to use AEAD data (signed header/trailer) and allow greater
12 * controll over where the encrypted data is placed.
14 * The maximum packet size is gss_context_stream_sizes.max_msg_size.
16 * The caller needs provide the folloing buffers when using in conf_req_flag=1 mode:
18 * - HEADER (of size gss_context_stream_sizes.header)
19 * { DATA or SIGN_ONLY } (optional, zero or more)
20 * PADDING (of size gss_context_stream_sizes.blocksize, if zero padding is zero, can be omitted)
21 * TRAILER (of size gss_context_stream_sizes.trailer)
23 * - on DCE-RPC mode, the caller can skip PADDING and TRAILER if the
24 * DATA elements is padded to a block bountry and header is of at
25 * least size gss_context_stream_sizes.header + gss_context_stream_sizes.trailer.
27 * HEADER, PADDING, TRAILER will be shrunken to the size required to transmit any of them too large.
29 * To generate gss_wrap() compatible packets, use: HEADER | DATA | PADDING | TRAILER
31 * When used in conf_req_flag=0,
33 * - HEADER (of size gss_context_stream_sizes.header)
34 * { DATA or SIGN_ONLY } (optional, zero or more)
35 * PADDING (of size gss_context_stream_sizes.blocksize, if zero padding is zero, can be omitted)
36 * TRAILER (of size gss_context_stream_sizes.trailer)
39 * The input sizes of HEADER, PADDING and TRAILER can be fetched using gss_wrap_iov_length() or
40 * gss_context_query_attributes().
42 * @ingroup gssapi
46 GSSAPI_LIB_FUNCTION OM_uint32 GSSAPI_LIB_CALL
47 gss_wrap_iov(OM_uint32 * minor_status,
48 gss_ctx_id_t context_handle,
49 int conf_req_flag,
50 gss_qop_t qop_req,
51 int * conf_state,
52 gss_iov_buffer_desc *iov,
53 int iov_count)
55 struct _gss_context *ctx = (struct _gss_context *) context_handle;
56 gssapi_mech_interface m;
58 if (minor_status)
59 *minor_status = 0;
60 if (conf_state)
61 *conf_state = 0;
62 if (ctx == NULL)
63 return GSS_S_NO_CONTEXT;
64 if (iov == NULL && iov_count != 0)
65 return GSS_S_CALL_INACCESSIBLE_READ;
67 m = ctx->gc_mech;
69 if (m->gm_wrap_iov == NULL)
70 return GSS_S_UNAVAILABLE;
72 return (m->gm_wrap_iov)(minor_status, ctx->gc_ctx,
73 conf_req_flag, qop_req, conf_state,
74 iov, iov_count);
77 /**
78 * Decrypt or verifies the signature on the data.
81 * @ingroup gssapi
84 GSSAPI_LIB_FUNCTION OM_uint32 GSSAPI_LIB_CALL
85 gss_unwrap_iov(OM_uint32 *minor_status,
86 gss_ctx_id_t context_handle,
87 int *conf_state,
88 gss_qop_t *qop_state,
89 gss_iov_buffer_desc *iov,
90 int iov_count)
92 struct _gss_context *ctx = (struct _gss_context *) context_handle;
93 gssapi_mech_interface m;
95 if (minor_status)
96 *minor_status = 0;
97 if (conf_state)
98 *conf_state = 0;
99 if (qop_state)
100 *qop_state = 0;
101 if (ctx == NULL)
102 return GSS_S_NO_CONTEXT;
103 if (iov == NULL && iov_count != 0)
104 return GSS_S_CALL_INACCESSIBLE_READ;
106 m = ctx->gc_mech;
108 if (m->gm_unwrap_iov == NULL)
109 return GSS_S_UNAVAILABLE;
111 return (m->gm_unwrap_iov)(minor_status, ctx->gc_ctx,
112 conf_state, qop_state,
113 iov, iov_count);
117 * Update the length fields in iov buffer for the types:
118 * - GSS_IOV_BUFFER_TYPE_HEADER
119 * - GSS_IOV_BUFFER_TYPE_PADDING
120 * - GSS_IOV_BUFFER_TYPE_TRAILER
122 * Consider using gss_context_query_attributes() to fetch the data instead.
124 * @ingroup gssapi
127 GSSAPI_LIB_FUNCTION OM_uint32 GSSAPI_LIB_CALL
128 gss_wrap_iov_length(OM_uint32 * minor_status,
129 gss_ctx_id_t context_handle,
130 int conf_req_flag,
131 gss_qop_t qop_req,
132 int *conf_state,
133 gss_iov_buffer_desc *iov,
134 int iov_count)
136 struct _gss_context *ctx = (struct _gss_context *) context_handle;
137 gssapi_mech_interface m;
139 if (minor_status)
140 *minor_status = 0;
141 if (conf_state)
142 *conf_state = 0;
143 if (ctx == NULL)
144 return GSS_S_NO_CONTEXT;
145 if (iov == NULL && iov_count != 0)
146 return GSS_S_CALL_INACCESSIBLE_READ;
148 m = ctx->gc_mech;
150 if (m->gm_wrap_iov_length == NULL)
151 return GSS_S_UNAVAILABLE;
153 return (m->gm_wrap_iov_length)(minor_status, ctx->gc_ctx,
154 conf_req_flag, qop_req, conf_state,
155 iov, iov_count);
159 * Free all buffer allocated by gss_wrap_iov() or gss_unwrap_iov() by
160 * looking at the GSS_IOV_BUFFER_FLAG_ALLOCATED flag.
162 * @ingroup gssapi
165 GSSAPI_LIB_FUNCTION OM_uint32 GSSAPI_LIB_CALL
166 gss_release_iov_buffer(OM_uint32 *minor_status,
167 gss_iov_buffer_desc *iov,
168 int iov_count)
170 OM_uint32 junk;
171 int i;
173 if (minor_status)
174 *minor_status = 0;
175 if (iov == NULL && iov_count != 0)
176 return GSS_S_CALL_INACCESSIBLE_READ;
178 for (i = 0; i < iov_count; i++) {
179 if ((iov[i].type & GSS_IOV_BUFFER_FLAG_ALLOCATED) == 0)
180 continue;
181 gss_release_buffer(&junk, &iov[i].buffer);
182 iov[i].type &= ~GSS_IOV_BUFFER_FLAG_ALLOCATED;
184 return GSS_S_COMPLETE;
188 * Query the context for parameters.
190 * SSPI equivalent if this function is QueryContextAttributes.
192 * - GSS_C_ATTR_STREAM_SIZES data is a gss_context_stream_sizes.
194 * @ingroup gssapi
197 gss_OID_desc GSSAPI_LIB_VARIABLE __gss_c_attr_stream_sizes_oid_desc =
198 {10, rk_UNCONST("\x2a\x86\x48\x86\xf7\x12\x01\x02\x01\x03")};
200 GSSAPI_LIB_FUNCTION OM_uint32 GSSAPI_LIB_CALL
201 gss_context_query_attributes(OM_uint32 *minor_status,
202 gss_const_ctx_id_t context_handle,
203 const gss_OID attribute,
204 void *data,
205 size_t len)
207 if (minor_status)
208 *minor_status = 0;
210 if (gss_oid_equal(GSS_C_ATTR_STREAM_SIZES, attribute)) {
211 memset(data, 0, len);
212 return GSS_S_COMPLETE;
215 return GSS_S_FAILURE;
219 * AEAD wrap API for a single piece of associated data, for compatibility
220 * with MIT and as specified by draft-howard-gssapi-aead-00.txt.
222 * @ingroup gssapi
224 GSSAPI_LIB_FUNCTION OM_uint32 GSSAPI_LIB_CALL
225 gss_wrap_aead(OM_uint32 *minor_status,
226 gss_ctx_id_t context_handle,
227 int conf_req_flag,
228 gss_qop_t qop_req,
229 gss_buffer_t input_assoc_buffer,
230 gss_buffer_t input_payload_buffer,
231 int *conf_state,
232 gss_buffer_t output_message_buffer)
234 OM_uint32 major_status, tmp, flags = 0;
235 gss_iov_buffer_desc iov[5];
236 size_t i;
237 unsigned char *p;
239 memset(iov, 0, sizeof(iov));
241 iov[0].type = GSS_IOV_BUFFER_TYPE_HEADER;
243 iov[1].type = GSS_IOV_BUFFER_TYPE_SIGN_ONLY;
244 if (input_assoc_buffer)
245 iov[1].buffer = *input_assoc_buffer;
247 iov[2].type = GSS_IOV_BUFFER_TYPE_DATA;
248 if (input_payload_buffer)
249 iov[2].buffer.length = input_payload_buffer->length;
251 gss_inquire_context(minor_status, context_handle, NULL, NULL,
252 NULL, NULL, &flags, NULL, NULL);
254 /* krb5 mech rejects padding/trailer if DCE-style is set */
255 iov[3].type = (flags & GSS_C_DCE_STYLE) ? GSS_IOV_BUFFER_TYPE_EMPTY
256 : GSS_IOV_BUFFER_TYPE_PADDING;
257 iov[4].type = (flags & GSS_C_DCE_STYLE) ? GSS_IOV_BUFFER_TYPE_EMPTY
258 : GSS_IOV_BUFFER_TYPE_TRAILER;
260 major_status = gss_wrap_iov_length(minor_status, context_handle,
261 conf_req_flag, qop_req, conf_state,
262 iov, 5);
263 if (GSS_ERROR(major_status))
264 return major_status;
266 for (i = 0, output_message_buffer->length = 0; i < 5; i++) {
267 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_SIGN_ONLY)
268 continue;
270 output_message_buffer->length += iov[i].buffer.length;
273 output_message_buffer->value = malloc(output_message_buffer->length);
274 if (output_message_buffer->value == NULL) {
275 *minor_status = ENOMEM;
276 return GSS_S_FAILURE;
279 for (i = 0, p = output_message_buffer->value; i < 5; i++) {
280 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_SIGN_ONLY)
281 continue;
282 else if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA)
283 memcpy(p, input_payload_buffer->value, input_payload_buffer->length);
285 iov[i].buffer.value = p;
286 p += iov[i].buffer.length;
289 major_status = gss_wrap_iov(minor_status, context_handle, conf_req_flag,
290 qop_req, conf_state, iov, 5);
291 if (GSS_ERROR(major_status))
292 gss_release_buffer(&tmp, output_message_buffer);
294 return major_status;
298 * AEAD unwrap for a single piece of associated data, for compatibility
299 * with MIT and as specified by draft-howard-gssapi-aead-00.txt.
301 * @ingroup gssapi
303 GSSAPI_LIB_FUNCTION OM_uint32 GSSAPI_LIB_CALL
304 gss_unwrap_aead(OM_uint32 *minor_status,
305 gss_ctx_id_t context_handle,
306 gss_buffer_t input_message_buffer,
307 gss_buffer_t input_assoc_buffer,
308 gss_buffer_t output_payload_buffer,
309 int *conf_state,
310 gss_qop_t *qop_state)
312 OM_uint32 major_status, tmp;
313 gss_iov_buffer_desc iov[3];
315 memset(iov, 0, sizeof(iov));
317 iov[0].type = GSS_IOV_BUFFER_TYPE_STREAM;
318 iov[0].buffer = *input_message_buffer;
320 iov[1].type = GSS_IOV_BUFFER_TYPE_SIGN_ONLY;
321 if (input_assoc_buffer)
322 iov[1].buffer = *input_assoc_buffer;
324 iov[2].type = GSS_IOV_BUFFER_TYPE_DATA | GSS_IOV_BUFFER_FLAG_ALLOCATE;
326 major_status = gss_unwrap_iov(minor_status, context_handle, conf_state,
327 qop_state, iov, 3);
328 if (GSS_ERROR(major_status))
329 gss_release_iov_buffer(&tmp, &iov[2], 1);
330 else
331 *output_payload_buffer = iov[2].buffer;
333 return major_status;