1 #include <linux/types.h>
2 #include <linux/slab.h>
3 #include <linux/jiffies.h>
4 #include <linux/sunrpc/gss_krb5.h>
5 #include <linux/random.h>
6 #include <linux/pagemap.h>
7 #include <linux/crypto.h>
10 # define RPCDBG_FACILITY RPCDBG_AUTH
14 gss_krb5_padding(int blocksize
, int length
)
16 /* Most of the code is block-size independent but currently we
18 BUG_ON(blocksize
!= 8);
19 return 8 - (length
& 7);
23 gss_krb5_add_padding(struct xdr_buf
*buf
, int offset
, int blocksize
)
25 int padding
= gss_krb5_padding(blocksize
, buf
->len
- offset
);
29 if (buf
->page_len
|| buf
->tail
[0].iov_len
)
33 p
= iov
->iov_base
+ iov
->iov_len
;
34 iov
->iov_len
+= padding
;
36 memset(p
, padding
, padding
);
40 gss_krb5_remove_padding(struct xdr_buf
*buf
, int blocksize
)
44 size_t len
= buf
->len
;
46 if (len
<= buf
->head
[0].iov_len
) {
47 pad
= *(u8
*)(buf
->head
[0].iov_base
+ len
- 1);
48 if (pad
> buf
->head
[0].iov_len
)
50 buf
->head
[0].iov_len
-= pad
;
53 len
-= buf
->head
[0].iov_len
;
54 if (len
<= buf
->page_len
) {
55 unsigned int last
= (buf
->page_base
+ len
- 1)
57 unsigned int offset
= (buf
->page_base
+ len
- 1)
58 & (PAGE_CACHE_SIZE
- 1);
59 ptr
= kmap_atomic(buf
->pages
[last
], KM_USER0
);
60 pad
= *(ptr
+ offset
);
61 kunmap_atomic(ptr
, KM_USER0
);
65 BUG_ON(len
> buf
->tail
[0].iov_len
);
66 pad
= *(u8
*)(buf
->tail
[0].iov_base
+ len
- 1);
68 /* XXX: NOTE: we do not adjust the page lengths--they represent
69 * a range of data in the real filesystem page cache, and we need
70 * to know that range so the xdr code can properly place read data.
71 * However adjusting the head length, as we do above, is harmless.
72 * In the case of a request that fits into a single page, the server
73 * also uses length and head length together to determine the original
74 * start of the request to copy the request for deferal; so it's
75 * easier on the server if we adjust head and tail length in tandem.
76 * It's not really a problem that we don't fool with the page and
77 * tail lengths, though--at worst badly formed xdr might lead the
78 * server to attempt to parse the padding.
79 * XXX: Document all these weird requirements for gss mechanism
80 * wrap/unwrap functions. */
91 make_confounder(char *p
, u32 conflen
)
96 /* rfc1964 claims this should be "random". But all that's really
97 * necessary is that it be unique. And not even that is necessary in
98 * our case since our "gssapi" implementation exists only to support
99 * rpcsec_gss, so we know that the only buffers we will ever encrypt
100 * already begin with a unique sequence number. Just to hedge my bets
101 * I'll make a half-hearted attempt at something unique, but ensuring
102 * uniqueness would mean worrying about atomicity and rollover, and I
103 * don't care enough. */
105 /* initialize to random value */
108 i
= (i
<< 32) | random32();
123 /* Assumptions: the head and tail of inbuf are ours to play with.
124 * The pages, however, may be real pages in the page cache and we replace
125 * them with scratch pages from **pages before writing to them. */
126 /* XXX: obviously the above should be documentation of wrap interface,
127 * and shouldn't be in this kerberos-specific file. */
129 /* XXX factor out common code with seal/unseal. */
132 gss_wrap_kerberos(struct gss_ctx
*ctx
, int offset
,
133 struct xdr_buf
*buf
, struct page
**pages
)
135 struct krb5_ctx
*kctx
= ctx
->internal_ctx_id
;
137 struct xdr_netobj md5cksum
= {.len
= 0, .data
= cksumdata
};
138 int blocksize
= 0, plainlen
;
139 unsigned char *ptr
, *msg_start
;
142 struct page
**tmp_pages
;
145 dprintk("RPC: gss_wrap_kerberos\n");
149 blocksize
= crypto_blkcipher_blocksize(kctx
->enc
);
150 gss_krb5_add_padding(buf
, offset
, blocksize
);
151 BUG_ON((buf
->len
- offset
) % blocksize
);
152 plainlen
= blocksize
+ buf
->len
- offset
;
154 headlen
= g_token_size(&kctx
->mech_used
, 24 + plainlen
) -
157 ptr
= buf
->head
[0].iov_base
+ offset
;
158 /* shift data to make room for header. */
159 /* XXX Would be cleverer to encrypt while copying. */
160 /* XXX bounds checking, slack, etc. */
161 memmove(ptr
+ headlen
, ptr
, buf
->head
[0].iov_len
- offset
);
162 buf
->head
[0].iov_len
+= headlen
;
164 BUG_ON((buf
->len
- offset
- headlen
) % blocksize
);
166 g_make_token_header(&kctx
->mech_used
,
167 GSS_KRB5_TOK_HDR_LEN
+ 8 + plainlen
, &ptr
);
170 /* ptr now at header described in rfc 1964, section 1.2.1: */
171 ptr
[0] = (unsigned char) ((KG_TOK_WRAP_MSG
>> 8) & 0xff);
172 ptr
[1] = (unsigned char) (KG_TOK_WRAP_MSG
& 0xff);
174 msg_start
= ptr
+ 24;
176 *(__be16
*)(ptr
+ 2) = htons(SGN_ALG_DES_MAC_MD5
);
177 memset(ptr
+ 4, 0xff, 4);
178 *(__be16
*)(ptr
+ 4) = htons(SEAL_ALG_DES
);
180 make_confounder(msg_start
, blocksize
);
183 tmp_pages
= buf
->pages
;
185 if (make_checksum("md5", ptr
, 8, buf
,
186 offset
+ headlen
- blocksize
, &md5cksum
))
187 return GSS_S_FAILURE
;
188 buf
->pages
= tmp_pages
;
190 if (krb5_encrypt(kctx
->seq
, NULL
, md5cksum
.data
,
191 md5cksum
.data
, md5cksum
.len
))
192 return GSS_S_FAILURE
;
193 memcpy(ptr
+ GSS_KRB5_TOK_HDR_LEN
, md5cksum
.data
+ md5cksum
.len
- 8, 8);
195 spin_lock(&krb5_seq_lock
);
196 seq_send
= kctx
->seq_send
++;
197 spin_unlock(&krb5_seq_lock
);
199 /* XXX would probably be more efficient to compute checksum
200 * and encrypt at the same time: */
201 if ((krb5_make_seq_num(kctx
->seq
, kctx
->initiate
? 0 : 0xff,
202 seq_send
, ptr
+ GSS_KRB5_TOK_HDR_LEN
, ptr
+ 8)))
203 return GSS_S_FAILURE
;
205 if (gss_encrypt_xdr_buf(kctx
->enc
, buf
, offset
+ headlen
- blocksize
,
207 return GSS_S_FAILURE
;
209 return (kctx
->endtime
< now
) ? GSS_S_CONTEXT_EXPIRED
: GSS_S_COMPLETE
;
213 gss_unwrap_kerberos(struct gss_ctx
*ctx
, int offset
, struct xdr_buf
*buf
)
215 struct krb5_ctx
*kctx
= ctx
->internal_ctx_id
;
219 struct xdr_netobj md5cksum
= {.len
= 0, .data
= cksumdata
};
225 void *data_start
, *orig_start
;
229 dprintk("RPC: gss_unwrap_kerberos\n");
231 ptr
= (u8
*)buf
->head
[0].iov_base
+ offset
;
232 if (g_verify_token_header(&kctx
->mech_used
, &bodysize
, &ptr
,
234 return GSS_S_DEFECTIVE_TOKEN
;
236 if ((ptr
[0] != ((KG_TOK_WRAP_MSG
>> 8) & 0xff)) ||
237 (ptr
[1] != (KG_TOK_WRAP_MSG
& 0xff)))
238 return GSS_S_DEFECTIVE_TOKEN
;
240 /* XXX sanity-check bodysize?? */
242 /* get the sign and seal algorithms */
244 signalg
= ptr
[2] + (ptr
[3] << 8);
245 if (signalg
!= SGN_ALG_DES_MAC_MD5
)
246 return GSS_S_DEFECTIVE_TOKEN
;
248 sealalg
= ptr
[4] + (ptr
[5] << 8);
249 if (sealalg
!= SEAL_ALG_DES
)
250 return GSS_S_DEFECTIVE_TOKEN
;
252 if ((ptr
[6] != 0xff) || (ptr
[7] != 0xff))
253 return GSS_S_DEFECTIVE_TOKEN
;
255 if (gss_decrypt_xdr_buf(kctx
->enc
, buf
,
256 ptr
+ GSS_KRB5_TOK_HDR_LEN
+ 8 - (unsigned char *)buf
->head
[0].iov_base
))
257 return GSS_S_DEFECTIVE_TOKEN
;
259 if (make_checksum("md5", ptr
, 8, buf
,
260 ptr
+ GSS_KRB5_TOK_HDR_LEN
+ 8 - (unsigned char *)buf
->head
[0].iov_base
, &md5cksum
))
261 return GSS_S_FAILURE
;
263 if (krb5_encrypt(kctx
->seq
, NULL
, md5cksum
.data
,
264 md5cksum
.data
, md5cksum
.len
))
265 return GSS_S_FAILURE
;
267 if (memcmp(md5cksum
.data
+ 8, ptr
+ GSS_KRB5_TOK_HDR_LEN
, 8))
268 return GSS_S_BAD_SIG
;
270 /* it got through unscathed. Make sure the context is unexpired */
274 if (now
> kctx
->endtime
)
275 return GSS_S_CONTEXT_EXPIRED
;
277 /* do sequencing checks */
279 if (krb5_get_seq_num(kctx
->seq
, ptr
+ GSS_KRB5_TOK_HDR_LEN
, ptr
+ 8,
280 &direction
, &seqnum
))
281 return GSS_S_BAD_SIG
;
283 if ((kctx
->initiate
&& direction
!= 0xff) ||
284 (!kctx
->initiate
&& direction
!= 0))
285 return GSS_S_BAD_SIG
;
287 /* Copy the data back to the right position. XXX: Would probably be
288 * better to copy and encrypt at the same time. */
290 blocksize
= crypto_blkcipher_blocksize(kctx
->enc
);
291 data_start
= ptr
+ GSS_KRB5_TOK_HDR_LEN
+ 8 + blocksize
;
292 orig_start
= buf
->head
[0].iov_base
+ offset
;
293 data_len
= (buf
->head
[0].iov_base
+ buf
->head
[0].iov_len
) - data_start
;
294 memmove(orig_start
, data_start
, data_len
);
295 buf
->head
[0].iov_len
-= (data_start
- orig_start
);
296 buf
->len
-= (data_start
- orig_start
);
298 if (gss_krb5_remove_padding(buf
, blocksize
))
299 return GSS_S_DEFECTIVE_TOKEN
;
301 return GSS_S_COMPLETE
;