Implement gss_wrap_iov, gss_unwrap_iov for CFX type encryption types.
[heimdal.git] / lib / gssapi / mech / gss_aeap.c
blob9c784f42de108f42a37d84decc8beeaeaf663cdd
1 /*
2 * AEAD support
3 */
5 #include "mech_locl.h"
7 /**
8 * Encrypts or sign the data.
10 * This is a more complicated version of gss_wrap(), it allows the
11 * caller to use AEAD data (signed header/trailer) and allow greater
12 * controll over where the encrypted data is placed.
14 * The maximum packet size is gss_context_stream_sizes.max_msg_size.
16 * The caller needs provide the folloing buffers when using in conf_req_flag=1 mode:
18 * - HEADER (of size gss_context_stream_sizes.header)
19 * { DATA or SIGN_ONLY } (optional, zero or more)
20 * PADDING (of size gss_context_stream_sizes.blocksize, if zero padding is zero, can be omitted)
21 * TRAILER (of size gss_context_stream_sizes.trailer)
23 * - on DCE-RPC mode, the caller can skip PADDING and TRAILER if the
24 * DATA elements is padded to a block bountry and header is of at
25 * least size gss_context_stream_sizes.header + gss_context_stream_sizes.trailer.
27 * HEADER, PADDING, TRAILER will be shrunken to the size required to transmit any of them too large.
29 * To generate gss_wrap() compatible packets, use: HEADER | DATA | PADDING | TRAILER
31 * When used in conf_req_flag=0,
33 * - HEADER (of size gss_context_stream_sizes.header)
34 * { DATA or SIGN_ONLY } (optional, zero or more)
35 * PADDING (of size gss_context_stream_sizes.blocksize, if zero padding is zero, can be omitted)
36 * TRAILER (of size gss_context_stream_sizes.trailer)
39 * The input sizes of HEADER, PADDING and TRAILER can be fetched using gss_wrap_iov_length() or
40 * gss_context_query_attributes().
42 * @ingroup gssapi
46 OM_uint32 GSSAPI_LIB_FUNCTION
47 gss_wrap_iov(OM_uint32 * minor_status,
48 gss_ctx_id_t context_handle,
49 int conf_req_flag,
50 gss_qop_t qop_req,
51 int * conf_state,
52 gss_iov_buffer_desc *iov,
53 int iov_count)
55 struct _gss_context *ctx = (struct _gss_context *) context_handle;
56 gssapi_mech_interface m;
58 if (minor_status)
59 *minor_status = 0;
60 if (conf_state)
61 *conf_state = 0;
62 if (ctx == NULL)
63 return GSS_S_NO_CONTEXT;
64 if (iov == NULL && iov_count != 0)
65 return GSS_S_CALL_INACCESSIBLE_READ;
67 m = ctx->gc_mech;
69 if (m->gm_wrap_iov == NULL) {
70 if (minor_status)
71 *minor_status = 0;
72 return GSS_S_UNAVAILABLE;
75 return (m->gm_wrap_iov)(minor_status, ctx->gc_ctx,
76 conf_req_flag, qop_req, conf_state,
77 iov, iov_count);
80 /**
81 * Decrypt or verifies the signature on the data.
84 * @ingroup gssapi
87 OM_uint32 GSSAPI_LIB_FUNCTION
88 gss_unwrap_iov(OM_uint32 *minor_status,
89 gss_ctx_id_t context_handle,
90 int *conf_state,
91 gss_qop_t *qop_state,
92 gss_iov_buffer_desc *iov,
93 int iov_count)
95 struct _gss_context *ctx = (struct _gss_context *) context_handle;
96 gssapi_mech_interface m;
98 if (minor_status)
99 *minor_status = 0;
100 if (conf_state)
101 *conf_state = 0;
102 if (qop_state)
103 *qop_state = 0;
104 if (ctx == NULL)
105 return GSS_S_NO_CONTEXT;
106 if (iov == NULL && iov_count != 0)
107 return GSS_S_CALL_INACCESSIBLE_READ;
109 m = ctx->gc_mech;
111 if (m->gm_unwrap_iov == NULL) {
112 *minor_status = 0;
113 return GSS_S_UNAVAILABLE;
116 return (m->gm_unwrap_iov)(minor_status, ctx->gc_ctx,
117 conf_state, qop_state,
118 iov, iov_count);
122 * Update the length fields in iov buffer for the types:
123 * - GSS_IOV_BUFFER_TYPE_HEADER
124 * - GSS_IOV_BUFFER_TYPE_PADDING
125 * - GSS_IOV_BUFFER_TYPE_TRAILER
127 * Consider using gss_context_query_attributes() to fetch the data instead.
129 * @ingroup gssapi
132 OM_uint32 GSSAPI_LIB_FUNCTION
133 gss_wrap_iov_length(OM_uint32 * minor_status,
134 gss_ctx_id_t context_handle,
135 int conf_req_flag,
136 gss_qop_t qop_req,
137 int *conf_state,
138 gss_iov_buffer_desc *iov,
139 int iov_count)
141 struct _gss_context *ctx = (struct _gss_context *) context_handle;
142 gssapi_mech_interface m;
144 if (minor_status)
145 *minor_status = 0;
146 if (conf_state)
147 *conf_state = 0;
148 if (ctx == NULL)
149 return GSS_S_NO_CONTEXT;
150 if (iov == NULL && iov_count != 0)
151 return GSS_S_CALL_INACCESSIBLE_READ;
153 m = ctx->gc_mech;
155 if (m->gm_wrap_iov_length == NULL) {
156 *minor_status = 0;
157 return GSS_S_UNAVAILABLE;
160 return (m->gm_wrap_iov_length)(minor_status, ctx->gc_ctx,
161 conf_req_flag, qop_req, conf_state,
162 iov, iov_count);
166 * Free all buffer allocated by gss_wrap_iov() or gss_unwrap_iov() by
167 * looking at the GSS_IOV_BUFFER_TYPE_FLAG_ALLOCATED flag.
169 * @ingroup gssapi
172 OM_uint32 GSSAPI_LIB_FUNCTION
173 gss_release_iov_buffer(OM_uint32 *minor_status,
174 gss_iov_buffer_desc *iov,
175 int iov_count)
177 OM_uint32 junk;
178 size_t i;
180 if (minor_status)
181 *minor_status = 0;
182 if (iov == NULL && iov_count != 0)
183 return GSS_S_CALL_INACCESSIBLE_READ;
185 for (i = 0; i < iov_count; i++) {
186 if ((iov[i].type & GSS_IOV_BUFFER_TYPE_FLAG_ALLOCATED) == 0)
187 continue;
188 gss_release_buffer(&junk, &iov[i].buffer);
189 iov[i].type &= ~GSS_IOV_BUFFER_TYPE_FLAG_ALLOCATED;
191 return GSS_S_COMPLETE;
195 * Query the context for parameters.
197 * SSPI equivalent if this function is QueryContextAttributes.
199 * - GSS_C_ATTR_STREAM_SIZES data is a gss_context_stream_sizes.
201 * @ingroup gssapi
204 static gss_OID_desc gss_c_attr_stream_sizes_desc =
205 {10, rk_UNCONST("\x2a\x86\x48\x86\xf7\x12\x01\x02\x01\x03")};
207 gss_OID GSSAPI_LIB_VARIABLE GSS_C_ATTR_STREAM_SIZES =
208 &gss_c_attr_stream_sizes_desc;
210 OM_uint32 GSSAPI_LIB_FUNCTION
211 gss_context_query_attributes(OM_uint32 *minor_status,
212 gss_OID attribute,
213 void *data,
214 size_t len)
216 *minor_status = 0;
218 if (gss_oid_equal(GSS_C_ATTR_STREAM_SIZES, attribute)) {
219 memset(data, 0, len);
220 return GSS_S_COMPLETE;
223 return GSS_S_FAILURE;