fix compat console unimap regression
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / sunrpc / auth_gss / auth_gss.c
blob4e4ccc5b6fea905131bab2cc6d1a7dd39fd14b97
1 /*
2 * linux/net/sunrpc/auth_gss/auth_gss.c
4 * RPCSEC_GSS client authentication.
6 * Copyright (c) 2000 The Regents of the University of Michigan.
7 * All rights reserved.
9 * Dug Song <dugsong@monkey.org>
10 * Andy Adamson <andros@umich.edu>
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 * $Id$
41 #include <linux/module.h>
42 #include <linux/init.h>
43 #include <linux/types.h>
44 #include <linux/slab.h>
45 #include <linux/sched.h>
46 #include <linux/pagemap.h>
47 #include <linux/sunrpc/clnt.h>
48 #include <linux/sunrpc/auth.h>
49 #include <linux/sunrpc/auth_gss.h>
50 #include <linux/sunrpc/svcauth_gss.h>
51 #include <linux/sunrpc/gss_err.h>
52 #include <linux/workqueue.h>
53 #include <linux/sunrpc/rpc_pipe_fs.h>
54 #include <linux/sunrpc/gss_api.h>
55 #include <asm/uaccess.h>
57 static struct rpc_authops authgss_ops;
59 static struct rpc_credops gss_credops;
61 #ifdef RPC_DEBUG
62 # define RPCDBG_FACILITY RPCDBG_AUTH
63 #endif
65 #define NFS_NGROUPS 16
67 #define GSS_CRED_EXPIRE (60 * HZ) /* XXX: reasonable? */
68 #define GSS_CRED_SLACK 1024 /* XXX: unused */
69 /* length of a krb5 verifier (48), plus data added before arguments when
70 * using integrity (two 4-byte integers): */
71 #define GSS_VERF_SLACK 100
73 /* XXX this define must match the gssd define
74 * as it is passed to gssd to signal the use of
75 * machine creds should be part of the shared rpc interface */
77 #define CA_RUN_AS_MACHINE 0x00000200
79 /* dump the buffer in `emacs-hexl' style */
80 #define isprint(c) ((c > 0x1f) && (c < 0x7f))
82 static DEFINE_RWLOCK(gss_ctx_lock);
84 struct gss_auth {
85 struct rpc_auth rpc_auth;
86 struct gss_api_mech *mech;
87 enum rpc_gss_svc service;
88 struct list_head upcalls;
89 struct rpc_clnt *client;
90 struct dentry *dentry;
91 spinlock_t lock;
94 static void gss_destroy_ctx(struct gss_cl_ctx *);
95 static struct rpc_pipe_ops gss_upcall_ops;
97 static inline struct gss_cl_ctx *
98 gss_get_ctx(struct gss_cl_ctx *ctx)
100 atomic_inc(&ctx->count);
101 return ctx;
104 static inline void
105 gss_put_ctx(struct gss_cl_ctx *ctx)
107 if (atomic_dec_and_test(&ctx->count))
108 gss_destroy_ctx(ctx);
111 static void
112 gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
114 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
115 struct gss_cl_ctx *old;
116 write_lock(&gss_ctx_lock);
117 old = gss_cred->gc_ctx;
118 gss_cred->gc_ctx = ctx;
119 cred->cr_flags |= RPCAUTH_CRED_UPTODATE;
120 cred->cr_flags &= ~RPCAUTH_CRED_NEW;
121 write_unlock(&gss_ctx_lock);
122 if (old)
123 gss_put_ctx(old);
126 static int
127 gss_cred_is_uptodate_ctx(struct rpc_cred *cred)
129 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
130 int res = 0;
132 read_lock(&gss_ctx_lock);
133 if ((cred->cr_flags & RPCAUTH_CRED_UPTODATE) && gss_cred->gc_ctx)
134 res = 1;
135 read_unlock(&gss_ctx_lock);
136 return res;
139 static const void *
140 simple_get_bytes(const void *p, const void *end, void *res, size_t len)
142 const void *q = (const void *)((const char *)p + len);
143 if (unlikely(q > end || q < p))
144 return ERR_PTR(-EFAULT);
145 memcpy(res, p, len);
146 return q;
149 static inline const void *
150 simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
152 const void *q;
153 unsigned int len;
155 p = simple_get_bytes(p, end, &len, sizeof(len));
156 if (IS_ERR(p))
157 return p;
158 q = (const void *)((const char *)p + len);
159 if (unlikely(q > end || q < p))
160 return ERR_PTR(-EFAULT);
161 dest->data = kmemdup(p, len, GFP_KERNEL);
162 if (unlikely(dest->data == NULL))
163 return ERR_PTR(-ENOMEM);
164 dest->len = len;
165 return q;
168 static struct gss_cl_ctx *
169 gss_cred_get_ctx(struct rpc_cred *cred)
171 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
172 struct gss_cl_ctx *ctx = NULL;
174 read_lock(&gss_ctx_lock);
175 if (gss_cred->gc_ctx)
176 ctx = gss_get_ctx(gss_cred->gc_ctx);
177 read_unlock(&gss_ctx_lock);
178 return ctx;
181 static struct gss_cl_ctx *
182 gss_alloc_context(void)
184 struct gss_cl_ctx *ctx;
186 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
187 if (ctx != NULL) {
188 ctx->gc_proc = RPC_GSS_PROC_DATA;
189 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */
190 spin_lock_init(&ctx->gc_seq_lock);
191 atomic_set(&ctx->count,1);
193 return ctx;
196 #define GSSD_MIN_TIMEOUT (60 * 60)
197 static const void *
198 gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm)
200 const void *q;
201 unsigned int seclen;
202 unsigned int timeout;
203 u32 window_size;
204 int ret;
206 /* First unsigned int gives the lifetime (in seconds) of the cred */
207 p = simple_get_bytes(p, end, &timeout, sizeof(timeout));
208 if (IS_ERR(p))
209 goto err;
210 if (timeout == 0)
211 timeout = GSSD_MIN_TIMEOUT;
212 ctx->gc_expiry = jiffies + (unsigned long)timeout * HZ * 3 / 4;
213 /* Sequence number window. Determines the maximum number of simultaneous requests */
214 p = simple_get_bytes(p, end, &window_size, sizeof(window_size));
215 if (IS_ERR(p))
216 goto err;
217 ctx->gc_win = window_size;
218 /* gssd signals an error by passing ctx->gc_win = 0: */
219 if (ctx->gc_win == 0) {
220 /* in which case, p points to an error code which we ignore */
221 p = ERR_PTR(-EACCES);
222 goto err;
224 /* copy the opaque wire context */
225 p = simple_get_netobj(p, end, &ctx->gc_wire_ctx);
226 if (IS_ERR(p))
227 goto err;
228 /* import the opaque security context */
229 p = simple_get_bytes(p, end, &seclen, sizeof(seclen));
230 if (IS_ERR(p))
231 goto err;
232 q = (const void *)((const char *)p + seclen);
233 if (unlikely(q > end || q < p)) {
234 p = ERR_PTR(-EFAULT);
235 goto err;
237 ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx);
238 if (ret < 0) {
239 p = ERR_PTR(ret);
240 goto err;
242 return q;
243 err:
244 dprintk("RPC: gss_fill_context returning %ld\n", -PTR_ERR(p));
245 return p;
249 struct gss_upcall_msg {
250 atomic_t count;
251 uid_t uid;
252 struct rpc_pipe_msg msg;
253 struct list_head list;
254 struct gss_auth *auth;
255 struct rpc_wait_queue rpc_waitqueue;
256 wait_queue_head_t waitqueue;
257 struct gss_cl_ctx *ctx;
260 static void
261 gss_release_msg(struct gss_upcall_msg *gss_msg)
263 if (!atomic_dec_and_test(&gss_msg->count))
264 return;
265 BUG_ON(!list_empty(&gss_msg->list));
266 if (gss_msg->ctx != NULL)
267 gss_put_ctx(gss_msg->ctx);
268 kfree(gss_msg);
271 static struct gss_upcall_msg *
272 __gss_find_upcall(struct gss_auth *gss_auth, uid_t uid)
274 struct gss_upcall_msg *pos;
275 list_for_each_entry(pos, &gss_auth->upcalls, list) {
276 if (pos->uid != uid)
277 continue;
278 atomic_inc(&pos->count);
279 dprintk("RPC: gss_find_upcall found msg %p\n", pos);
280 return pos;
282 dprintk("RPC: gss_find_upcall found nothing\n");
283 return NULL;
286 /* Try to add a upcall to the pipefs queue.
287 * If an upcall owned by our uid already exists, then we return a reference
288 * to that upcall instead of adding the new upcall.
290 static inline struct gss_upcall_msg *
291 gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg)
293 struct gss_upcall_msg *old;
295 spin_lock(&gss_auth->lock);
296 old = __gss_find_upcall(gss_auth, gss_msg->uid);
297 if (old == NULL) {
298 atomic_inc(&gss_msg->count);
299 list_add(&gss_msg->list, &gss_auth->upcalls);
300 } else
301 gss_msg = old;
302 spin_unlock(&gss_auth->lock);
303 return gss_msg;
306 static void
307 __gss_unhash_msg(struct gss_upcall_msg *gss_msg)
309 if (list_empty(&gss_msg->list))
310 return;
311 list_del_init(&gss_msg->list);
312 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
313 wake_up_all(&gss_msg->waitqueue);
314 atomic_dec(&gss_msg->count);
317 static void
318 gss_unhash_msg(struct gss_upcall_msg *gss_msg)
320 struct gss_auth *gss_auth = gss_msg->auth;
322 spin_lock(&gss_auth->lock);
323 __gss_unhash_msg(gss_msg);
324 spin_unlock(&gss_auth->lock);
327 static void
328 gss_upcall_callback(struct rpc_task *task)
330 struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred,
331 struct gss_cred, gc_base);
332 struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
334 BUG_ON(gss_msg == NULL);
335 if (gss_msg->ctx)
336 gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_get_ctx(gss_msg->ctx));
337 else
338 task->tk_status = gss_msg->msg.errno;
339 spin_lock(&gss_msg->auth->lock);
340 gss_cred->gc_upcall = NULL;
341 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
342 spin_unlock(&gss_msg->auth->lock);
343 gss_release_msg(gss_msg);
346 static inline struct gss_upcall_msg *
347 gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid)
349 struct gss_upcall_msg *gss_msg;
351 gss_msg = kzalloc(sizeof(*gss_msg), GFP_KERNEL);
352 if (gss_msg != NULL) {
353 INIT_LIST_HEAD(&gss_msg->list);
354 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
355 init_waitqueue_head(&gss_msg->waitqueue);
356 atomic_set(&gss_msg->count, 1);
357 gss_msg->msg.data = &gss_msg->uid;
358 gss_msg->msg.len = sizeof(gss_msg->uid);
359 gss_msg->uid = uid;
360 gss_msg->auth = gss_auth;
362 return gss_msg;
365 static struct gss_upcall_msg *
366 gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cred *cred)
368 struct gss_upcall_msg *gss_new, *gss_msg;
370 gss_new = gss_alloc_msg(gss_auth, cred->cr_uid);
371 if (gss_new == NULL)
372 return ERR_PTR(-ENOMEM);
373 gss_msg = gss_add_msg(gss_auth, gss_new);
374 if (gss_msg == gss_new) {
375 int res = rpc_queue_upcall(gss_auth->dentry->d_inode, &gss_new->msg);
376 if (res) {
377 gss_unhash_msg(gss_new);
378 gss_msg = ERR_PTR(res);
380 } else
381 gss_release_msg(gss_new);
382 return gss_msg;
385 static inline int
386 gss_refresh_upcall(struct rpc_task *task)
388 struct rpc_cred *cred = task->tk_msg.rpc_cred;
389 struct gss_auth *gss_auth = container_of(task->tk_client->cl_auth,
390 struct gss_auth, rpc_auth);
391 struct gss_cred *gss_cred = container_of(cred,
392 struct gss_cred, gc_base);
393 struct gss_upcall_msg *gss_msg;
394 int err = 0;
396 dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid,
397 cred->cr_uid);
398 gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred);
399 if (IS_ERR(gss_msg)) {
400 err = PTR_ERR(gss_msg);
401 goto out;
403 spin_lock(&gss_auth->lock);
404 if (gss_cred->gc_upcall != NULL)
405 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL, NULL);
406 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
407 task->tk_timeout = 0;
408 gss_cred->gc_upcall = gss_msg;
409 /* gss_upcall_callback will release the reference to gss_upcall_msg */
410 atomic_inc(&gss_msg->count);
411 rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback, NULL);
412 } else
413 err = gss_msg->msg.errno;
414 spin_unlock(&gss_auth->lock);
415 gss_release_msg(gss_msg);
416 out:
417 dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n",
418 task->tk_pid, cred->cr_uid, err);
419 return err;
422 static inline int
423 gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
425 struct rpc_cred *cred = &gss_cred->gc_base;
426 struct gss_upcall_msg *gss_msg;
427 DEFINE_WAIT(wait);
428 int err = 0;
430 dprintk("RPC: gss_upcall for uid %u\n", cred->cr_uid);
431 gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred);
432 if (IS_ERR(gss_msg)) {
433 err = PTR_ERR(gss_msg);
434 goto out;
436 for (;;) {
437 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE);
438 spin_lock(&gss_auth->lock);
439 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
440 spin_unlock(&gss_auth->lock);
441 break;
443 spin_unlock(&gss_auth->lock);
444 if (signalled()) {
445 err = -ERESTARTSYS;
446 goto out_intr;
448 schedule();
450 if (gss_msg->ctx)
451 gss_cred_set_ctx(cred, gss_get_ctx(gss_msg->ctx));
452 else
453 err = gss_msg->msg.errno;
454 out_intr:
455 finish_wait(&gss_msg->waitqueue, &wait);
456 gss_release_msg(gss_msg);
457 out:
458 dprintk("RPC: gss_create_upcall for uid %u result %d\n",
459 cred->cr_uid, err);
460 return err;
463 static ssize_t
464 gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
465 char __user *dst, size_t buflen)
467 char *data = (char *)msg->data + msg->copied;
468 ssize_t mlen = msg->len;
469 ssize_t left;
471 if (mlen > buflen)
472 mlen = buflen;
473 left = copy_to_user(dst, data, mlen);
474 if (left < 0) {
475 msg->errno = left;
476 return left;
478 mlen -= left;
479 msg->copied += mlen;
480 msg->errno = 0;
481 return mlen;
484 #define MSG_BUF_MAXSIZE 1024
486 static ssize_t
487 gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
489 const void *p, *end;
490 void *buf;
491 struct rpc_clnt *clnt;
492 struct gss_auth *gss_auth;
493 struct rpc_cred *cred;
494 struct gss_upcall_msg *gss_msg;
495 struct gss_cl_ctx *ctx;
496 uid_t uid;
497 int err = -EFBIG;
499 if (mlen > MSG_BUF_MAXSIZE)
500 goto out;
501 err = -ENOMEM;
502 buf = kmalloc(mlen, GFP_KERNEL);
503 if (!buf)
504 goto out;
506 clnt = RPC_I(filp->f_path.dentry->d_inode)->private;
507 err = -EFAULT;
508 if (copy_from_user(buf, src, mlen))
509 goto err;
511 end = (const void *)((char *)buf + mlen);
512 p = simple_get_bytes(buf, end, &uid, sizeof(uid));
513 if (IS_ERR(p)) {
514 err = PTR_ERR(p);
515 goto err;
518 err = -ENOMEM;
519 ctx = gss_alloc_context();
520 if (ctx == NULL)
521 goto err;
522 err = 0;
523 gss_auth = container_of(clnt->cl_auth, struct gss_auth, rpc_auth);
524 p = gss_fill_context(p, end, ctx, gss_auth->mech);
525 if (IS_ERR(p)) {
526 err = PTR_ERR(p);
527 if (err != -EACCES)
528 goto err_put_ctx;
530 spin_lock(&gss_auth->lock);
531 gss_msg = __gss_find_upcall(gss_auth, uid);
532 if (gss_msg) {
533 if (err == 0 && gss_msg->ctx == NULL)
534 gss_msg->ctx = gss_get_ctx(ctx);
535 gss_msg->msg.errno = err;
536 __gss_unhash_msg(gss_msg);
537 spin_unlock(&gss_auth->lock);
538 gss_release_msg(gss_msg);
539 } else {
540 struct auth_cred acred = { .uid = uid };
541 spin_unlock(&gss_auth->lock);
542 cred = rpcauth_lookup_credcache(clnt->cl_auth, &acred, RPCAUTH_LOOKUP_NEW);
543 if (IS_ERR(cred)) {
544 err = PTR_ERR(cred);
545 goto err_put_ctx;
547 gss_cred_set_ctx(cred, gss_get_ctx(ctx));
549 gss_put_ctx(ctx);
550 kfree(buf);
551 dprintk("RPC: gss_pipe_downcall returning length %Zu\n", mlen);
552 return mlen;
553 err_put_ctx:
554 gss_put_ctx(ctx);
555 err:
556 kfree(buf);
557 out:
558 dprintk("RPC: gss_pipe_downcall returning %d\n", err);
559 return err;
562 static void
563 gss_pipe_release(struct inode *inode)
565 struct rpc_inode *rpci = RPC_I(inode);
566 struct rpc_clnt *clnt;
567 struct rpc_auth *auth;
568 struct gss_auth *gss_auth;
570 clnt = rpci->private;
571 auth = clnt->cl_auth;
572 gss_auth = container_of(auth, struct gss_auth, rpc_auth);
573 spin_lock(&gss_auth->lock);
574 while (!list_empty(&gss_auth->upcalls)) {
575 struct gss_upcall_msg *gss_msg;
577 gss_msg = list_entry(gss_auth->upcalls.next,
578 struct gss_upcall_msg, list);
579 gss_msg->msg.errno = -EPIPE;
580 atomic_inc(&gss_msg->count);
581 __gss_unhash_msg(gss_msg);
582 spin_unlock(&gss_auth->lock);
583 gss_release_msg(gss_msg);
584 spin_lock(&gss_auth->lock);
586 spin_unlock(&gss_auth->lock);
589 static void
590 gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
592 struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
593 static unsigned long ratelimit;
595 if (msg->errno < 0) {
596 dprintk("RPC: gss_pipe_destroy_msg releasing msg %p\n",
597 gss_msg);
598 atomic_inc(&gss_msg->count);
599 gss_unhash_msg(gss_msg);
600 if (msg->errno == -ETIMEDOUT) {
601 unsigned long now = jiffies;
602 if (time_after(now, ratelimit)) {
603 printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n"
604 "Please check user daemon is running!\n");
605 ratelimit = now + 15*HZ;
608 gss_release_msg(gss_msg);
613 * NOTE: we have the opportunity to use different
614 * parameters based on the input flavor (which must be a pseudoflavor)
616 static struct rpc_auth *
617 gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
619 struct gss_auth *gss_auth;
620 struct rpc_auth * auth;
621 int err = -ENOMEM; /* XXX? */
623 dprintk("RPC: creating GSS authenticator for client %p\n", clnt);
625 if (!try_module_get(THIS_MODULE))
626 return ERR_PTR(err);
627 if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL)))
628 goto out_dec;
629 gss_auth->client = clnt;
630 err = -EINVAL;
631 gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
632 if (!gss_auth->mech) {
633 printk(KERN_WARNING "%s: Pseudoflavor %d not found!",
634 __FUNCTION__, flavor);
635 goto err_free;
637 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
638 if (gss_auth->service == 0)
639 goto err_put_mech;
640 INIT_LIST_HEAD(&gss_auth->upcalls);
641 spin_lock_init(&gss_auth->lock);
642 auth = &gss_auth->rpc_auth;
643 auth->au_cslack = GSS_CRED_SLACK >> 2;
644 auth->au_rslack = GSS_VERF_SLACK >> 2;
645 auth->au_ops = &authgss_ops;
646 auth->au_flavor = flavor;
647 atomic_set(&auth->au_count, 1);
649 err = rpcauth_init_credcache(auth, GSS_CRED_EXPIRE);
650 if (err)
651 goto err_put_mech;
653 gss_auth->dentry = rpc_mkpipe(clnt->cl_dentry, gss_auth->mech->gm_name,
654 clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
655 if (IS_ERR(gss_auth->dentry)) {
656 err = PTR_ERR(gss_auth->dentry);
657 goto err_put_mech;
660 return auth;
661 err_put_mech:
662 gss_mech_put(gss_auth->mech);
663 err_free:
664 kfree(gss_auth);
665 out_dec:
666 module_put(THIS_MODULE);
667 return ERR_PTR(err);
670 static void
671 gss_destroy(struct rpc_auth *auth)
673 struct gss_auth *gss_auth;
675 dprintk("RPC: destroying GSS authenticator %p flavor %d\n",
676 auth, auth->au_flavor);
678 gss_auth = container_of(auth, struct gss_auth, rpc_auth);
679 rpc_unlink(gss_auth->dentry);
680 gss_auth->dentry = NULL;
681 gss_mech_put(gss_auth->mech);
683 rpcauth_free_credcache(auth);
684 kfree(gss_auth);
685 module_put(THIS_MODULE);
688 /* gss_destroy_cred (and gss_destroy_ctx) are used to clean up after failure
689 * to create a new cred or context, so they check that things have been
690 * allocated before freeing them. */
691 static void
692 gss_destroy_ctx(struct gss_cl_ctx *ctx)
694 dprintk("RPC: gss_destroy_ctx\n");
696 if (ctx->gc_gss_ctx)
697 gss_delete_sec_context(&ctx->gc_gss_ctx);
699 kfree(ctx->gc_wire_ctx.data);
700 kfree(ctx);
703 static void
704 gss_destroy_cred(struct rpc_cred *rc)
706 struct gss_cred *cred = container_of(rc, struct gss_cred, gc_base);
708 dprintk("RPC: gss_destroy_cred \n");
710 if (cred->gc_ctx)
711 gss_put_ctx(cred->gc_ctx);
712 kfree(cred);
716 * Lookup RPCSEC_GSS cred for the current process
718 static struct rpc_cred *
719 gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
721 return rpcauth_lookup_credcache(auth, acred, flags);
724 static struct rpc_cred *
725 gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
727 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
728 struct gss_cred *cred = NULL;
729 int err = -ENOMEM;
731 dprintk("RPC: gss_create_cred for uid %d, flavor %d\n",
732 acred->uid, auth->au_flavor);
734 if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL)))
735 goto out_err;
737 atomic_set(&cred->gc_count, 1);
738 cred->gc_uid = acred->uid;
740 * Note: in order to force a call to call_refresh(), we deliberately
741 * fail to flag the credential as RPCAUTH_CRED_UPTODATE.
743 cred->gc_flags = 0;
744 cred->gc_base.cr_ops = &gss_credops;
745 cred->gc_base.cr_flags = RPCAUTH_CRED_NEW;
746 cred->gc_service = gss_auth->service;
747 return &cred->gc_base;
749 out_err:
750 dprintk("RPC: gss_create_cred failed with error %d\n", err);
751 return ERR_PTR(err);
754 static int
755 gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred)
757 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
758 struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base);
759 int err;
761 do {
762 err = gss_create_upcall(gss_auth, gss_cred);
763 } while (err == -EAGAIN);
764 return err;
767 static int
768 gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
770 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
773 * If the searchflags have set RPCAUTH_LOOKUP_NEW, then
774 * we don't really care if the credential has expired or not,
775 * since the caller should be prepared to reinitialise it.
777 if ((flags & RPCAUTH_LOOKUP_NEW) && (rc->cr_flags & RPCAUTH_CRED_NEW))
778 goto out;
779 /* Don't match with creds that have expired. */
780 if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry))
781 return 0;
782 out:
783 return (rc->cr_uid == acred->uid);
787 * Marshal credentials.
788 * Maybe we should keep a cached credential for performance reasons.
790 static __be32 *
791 gss_marshal(struct rpc_task *task, __be32 *p)
793 struct rpc_cred *cred = task->tk_msg.rpc_cred;
794 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
795 gc_base);
796 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
797 __be32 *cred_len;
798 struct rpc_rqst *req = task->tk_rqstp;
799 u32 maj_stat = 0;
800 struct xdr_netobj mic;
801 struct kvec iov;
802 struct xdr_buf verf_buf;
804 dprintk("RPC: %5u gss_marshal\n", task->tk_pid);
806 *p++ = htonl(RPC_AUTH_GSS);
807 cred_len = p++;
809 spin_lock(&ctx->gc_seq_lock);
810 req->rq_seqno = ctx->gc_seq++;
811 spin_unlock(&ctx->gc_seq_lock);
813 *p++ = htonl((u32) RPC_GSS_VERSION);
814 *p++ = htonl((u32) ctx->gc_proc);
815 *p++ = htonl((u32) req->rq_seqno);
816 *p++ = htonl((u32) gss_cred->gc_service);
817 p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
818 *cred_len = htonl((p - (cred_len + 1)) << 2);
820 /* We compute the checksum for the verifier over the xdr-encoded bytes
821 * starting with the xid and ending at the end of the credential: */
822 iov.iov_base = xprt_skip_transport_header(task->tk_xprt,
823 req->rq_snd_buf.head[0].iov_base);
824 iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
825 xdr_buf_from_iov(&iov, &verf_buf);
827 /* set verifier flavor*/
828 *p++ = htonl(RPC_AUTH_GSS);
830 mic.data = (u8 *)(p + 1);
831 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
832 if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
833 cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
834 } else if (maj_stat != 0) {
835 printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
836 goto out_put_ctx;
838 p = xdr_encode_opaque(p, NULL, mic.len);
839 gss_put_ctx(ctx);
840 return p;
841 out_put_ctx:
842 gss_put_ctx(ctx);
843 return NULL;
847 * Refresh credentials. XXX - finish
849 static int
850 gss_refresh(struct rpc_task *task)
853 if (!gss_cred_is_uptodate_ctx(task->tk_msg.rpc_cred))
854 return gss_refresh_upcall(task);
855 return 0;
858 static __be32 *
859 gss_validate(struct rpc_task *task, __be32 *p)
861 struct rpc_cred *cred = task->tk_msg.rpc_cred;
862 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
863 __be32 seq;
864 struct kvec iov;
865 struct xdr_buf verf_buf;
866 struct xdr_netobj mic;
867 u32 flav,len;
868 u32 maj_stat;
870 dprintk("RPC: %5u gss_validate\n", task->tk_pid);
872 flav = ntohl(*p++);
873 if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE)
874 goto out_bad;
875 if (flav != RPC_AUTH_GSS)
876 goto out_bad;
877 seq = htonl(task->tk_rqstp->rq_seqno);
878 iov.iov_base = &seq;
879 iov.iov_len = sizeof(seq);
880 xdr_buf_from_iov(&iov, &verf_buf);
881 mic.data = (u8 *)p;
882 mic.len = len;
884 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
885 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
886 cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
887 if (maj_stat)
888 goto out_bad;
889 /* We leave it to unwrap to calculate au_rslack. For now we just
890 * calculate the length of the verifier: */
891 task->tk_auth->au_verfsize = XDR_QUADLEN(len) + 2;
892 gss_put_ctx(ctx);
893 dprintk("RPC: %5u gss_validate: gss_verify_mic succeeded.\n",
894 task->tk_pid);
895 return p + XDR_QUADLEN(len);
896 out_bad:
897 gss_put_ctx(ctx);
898 dprintk("RPC: %5u gss_validate failed.\n", task->tk_pid);
899 return NULL;
902 static inline int
903 gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
904 kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
906 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
907 struct xdr_buf integ_buf;
908 __be32 *integ_len = NULL;
909 struct xdr_netobj mic;
910 u32 offset;
911 __be32 *q;
912 struct kvec *iov;
913 u32 maj_stat = 0;
914 int status = -EIO;
916 integ_len = p++;
917 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
918 *p++ = htonl(rqstp->rq_seqno);
920 status = encode(rqstp, p, obj);
921 if (status)
922 return status;
924 if (xdr_buf_subsegment(snd_buf, &integ_buf,
925 offset, snd_buf->len - offset))
926 return status;
927 *integ_len = htonl(integ_buf.len);
929 /* guess whether we're in the head or the tail: */
930 if (snd_buf->page_len || snd_buf->tail[0].iov_len)
931 iov = snd_buf->tail;
932 else
933 iov = snd_buf->head;
934 p = iov->iov_base + iov->iov_len;
935 mic.data = (u8 *)(p + 1);
937 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
938 status = -EIO; /* XXX? */
939 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
940 cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
941 else if (maj_stat)
942 return status;
943 q = xdr_encode_opaque(p, NULL, mic.len);
945 offset = (u8 *)q - (u8 *)p;
946 iov->iov_len += offset;
947 snd_buf->len += offset;
948 return 0;
951 static void
952 priv_release_snd_buf(struct rpc_rqst *rqstp)
954 int i;
956 for (i=0; i < rqstp->rq_enc_pages_num; i++)
957 __free_page(rqstp->rq_enc_pages[i]);
958 kfree(rqstp->rq_enc_pages);
961 static int
962 alloc_enc_pages(struct rpc_rqst *rqstp)
964 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
965 int first, last, i;
967 if (snd_buf->page_len == 0) {
968 rqstp->rq_enc_pages_num = 0;
969 return 0;
972 first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
973 last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT;
974 rqstp->rq_enc_pages_num = last - first + 1 + 1;
975 rqstp->rq_enc_pages
976 = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
977 GFP_NOFS);
978 if (!rqstp->rq_enc_pages)
979 goto out;
980 for (i=0; i < rqstp->rq_enc_pages_num; i++) {
981 rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS);
982 if (rqstp->rq_enc_pages[i] == NULL)
983 goto out_free;
985 rqstp->rq_release_snd_buf = priv_release_snd_buf;
986 return 0;
987 out_free:
988 for (i--; i >= 0; i--) {
989 __free_page(rqstp->rq_enc_pages[i]);
991 out:
992 return -EAGAIN;
995 static inline int
996 gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
997 kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
999 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1000 u32 offset;
1001 u32 maj_stat;
1002 int status;
1003 __be32 *opaque_len;
1004 struct page **inpages;
1005 int first;
1006 int pad;
1007 struct kvec *iov;
1008 char *tmp;
1010 opaque_len = p++;
1011 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1012 *p++ = htonl(rqstp->rq_seqno);
1014 status = encode(rqstp, p, obj);
1015 if (status)
1016 return status;
1018 status = alloc_enc_pages(rqstp);
1019 if (status)
1020 return status;
1021 first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
1022 inpages = snd_buf->pages + first;
1023 snd_buf->pages = rqstp->rq_enc_pages;
1024 snd_buf->page_base -= first << PAGE_CACHE_SHIFT;
1025 /* Give the tail its own page, in case we need extra space in the
1026 * head when wrapping: */
1027 if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
1028 tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
1029 memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
1030 snd_buf->tail[0].iov_base = tmp;
1032 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
1033 /* RPC_SLACK_SPACE should prevent this ever happening: */
1034 BUG_ON(snd_buf->len > snd_buf->buflen);
1035 status = -EIO;
1036 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
1037 * done anyway, so it's safe to put the request on the wire: */
1038 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1039 cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
1040 else if (maj_stat)
1041 return status;
1043 *opaque_len = htonl(snd_buf->len - offset);
1044 /* guess whether we're in the head or the tail: */
1045 if (snd_buf->page_len || snd_buf->tail[0].iov_len)
1046 iov = snd_buf->tail;
1047 else
1048 iov = snd_buf->head;
1049 p = iov->iov_base + iov->iov_len;
1050 pad = 3 - ((snd_buf->len - offset - 1) & 3);
1051 memset(p, 0, pad);
1052 iov->iov_len += pad;
1053 snd_buf->len += pad;
1055 return 0;
1058 static int
1059 gss_wrap_req(struct rpc_task *task,
1060 kxdrproc_t encode, void *rqstp, __be32 *p, void *obj)
1062 struct rpc_cred *cred = task->tk_msg.rpc_cred;
1063 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
1064 gc_base);
1065 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1066 int status = -EIO;
1068 dprintk("RPC: %5u gss_wrap_req\n", task->tk_pid);
1069 if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
1070 /* The spec seems a little ambiguous here, but I think that not
1071 * wrapping context destruction requests makes the most sense.
1073 status = encode(rqstp, p, obj);
1074 goto out;
1076 switch (gss_cred->gc_service) {
1077 case RPC_GSS_SVC_NONE:
1078 status = encode(rqstp, p, obj);
1079 break;
1080 case RPC_GSS_SVC_INTEGRITY:
1081 status = gss_wrap_req_integ(cred, ctx, encode,
1082 rqstp, p, obj);
1083 break;
1084 case RPC_GSS_SVC_PRIVACY:
1085 status = gss_wrap_req_priv(cred, ctx, encode,
1086 rqstp, p, obj);
1087 break;
1089 out:
1090 gss_put_ctx(ctx);
1091 dprintk("RPC: %5u gss_wrap_req returning %d\n", task->tk_pid, status);
1092 return status;
1095 static inline int
1096 gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1097 struct rpc_rqst *rqstp, __be32 **p)
1099 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
1100 struct xdr_buf integ_buf;
1101 struct xdr_netobj mic;
1102 u32 data_offset, mic_offset;
1103 u32 integ_len;
1104 u32 maj_stat;
1105 int status = -EIO;
1107 integ_len = ntohl(*(*p)++);
1108 if (integ_len & 3)
1109 return status;
1110 data_offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
1111 mic_offset = integ_len + data_offset;
1112 if (mic_offset > rcv_buf->len)
1113 return status;
1114 if (ntohl(*(*p)++) != rqstp->rq_seqno)
1115 return status;
1117 if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset,
1118 mic_offset - data_offset))
1119 return status;
1121 if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset))
1122 return status;
1124 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1125 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1126 cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
1127 if (maj_stat != GSS_S_COMPLETE)
1128 return status;
1129 return 0;
1132 static inline int
1133 gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1134 struct rpc_rqst *rqstp, __be32 **p)
1136 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
1137 u32 offset;
1138 u32 opaque_len;
1139 u32 maj_stat;
1140 int status = -EIO;
1142 opaque_len = ntohl(*(*p)++);
1143 offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
1144 if (offset + opaque_len > rcv_buf->len)
1145 return status;
1146 /* remove padding: */
1147 rcv_buf->len = offset + opaque_len;
1149 maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
1150 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1151 cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
1152 if (maj_stat != GSS_S_COMPLETE)
1153 return status;
1154 if (ntohl(*(*p)++) != rqstp->rq_seqno)
1155 return status;
1157 return 0;
1161 static int
1162 gss_unwrap_resp(struct rpc_task *task,
1163 kxdrproc_t decode, void *rqstp, __be32 *p, void *obj)
1165 struct rpc_cred *cred = task->tk_msg.rpc_cred;
1166 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
1167 gc_base);
1168 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1169 __be32 *savedp = p;
1170 struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head;
1171 int savedlen = head->iov_len;
1172 int status = -EIO;
1174 if (ctx->gc_proc != RPC_GSS_PROC_DATA)
1175 goto out_decode;
1176 switch (gss_cred->gc_service) {
1177 case RPC_GSS_SVC_NONE:
1178 break;
1179 case RPC_GSS_SVC_INTEGRITY:
1180 status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p);
1181 if (status)
1182 goto out;
1183 break;
1184 case RPC_GSS_SVC_PRIVACY:
1185 status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p);
1186 if (status)
1187 goto out;
1188 break;
1190 /* take into account extra slack for integrity and privacy cases: */
1191 task->tk_auth->au_rslack = task->tk_auth->au_verfsize + (p - savedp)
1192 + (savedlen - head->iov_len);
1193 out_decode:
1194 status = decode(rqstp, p, obj);
1195 out:
1196 gss_put_ctx(ctx);
1197 dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid,
1198 status);
1199 return status;
1202 static struct rpc_authops authgss_ops = {
1203 .owner = THIS_MODULE,
1204 .au_flavor = RPC_AUTH_GSS,
1205 #ifdef RPC_DEBUG
1206 .au_name = "RPCSEC_GSS",
1207 #endif
1208 .create = gss_create,
1209 .destroy = gss_destroy,
1210 .lookup_cred = gss_lookup_cred,
1211 .crcreate = gss_create_cred
1214 static struct rpc_credops gss_credops = {
1215 .cr_name = "AUTH_GSS",
1216 .crdestroy = gss_destroy_cred,
1217 .cr_init = gss_cred_init,
1218 .crmatch = gss_match,
1219 .crmarshal = gss_marshal,
1220 .crrefresh = gss_refresh,
1221 .crvalidate = gss_validate,
1222 .crwrap_req = gss_wrap_req,
1223 .crunwrap_resp = gss_unwrap_resp,
1226 static struct rpc_pipe_ops gss_upcall_ops = {
1227 .upcall = gss_pipe_upcall,
1228 .downcall = gss_pipe_downcall,
1229 .destroy_msg = gss_pipe_destroy_msg,
1230 .release_pipe = gss_pipe_release,
1234 * Initialize RPCSEC_GSS module
1236 static int __init init_rpcsec_gss(void)
1238 int err = 0;
1240 err = rpcauth_register(&authgss_ops);
1241 if (err)
1242 goto out;
1243 err = gss_svc_init();
1244 if (err)
1245 goto out_unregister;
1246 return 0;
1247 out_unregister:
1248 rpcauth_unregister(&authgss_ops);
1249 out:
1250 return err;
1253 static void __exit exit_rpcsec_gss(void)
1255 gss_svc_shutdown();
1256 rpcauth_unregister(&authgss_ops);
1259 MODULE_LICENSE("GPL");
1260 module_init(init_rpcsec_gss)
1261 module_exit(exit_rpcsec_gss)