authgss build fix
[linux-2.6/openmoko-kernel/knife-kernel.git] / net / sunrpc / auth_gss / auth_gss.c
blobabfda33bac64635a260e79ed2c5c1ce29fd5a53a
1 /*
2 * linux/net/sunrpc/auth_gss/auth_gss.c
4 * RPCSEC_GSS client authentication.
6 * Copyright (c) 2000 The Regents of the University of Michigan.
7 * All rights reserved.
9 * Dug Song <dugsong@monkey.org>
10 * Andy Adamson <andros@umich.edu>
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 * $Id$
41 #include <linux/module.h>
42 #include <linux/init.h>
43 #include <linux/types.h>
44 #include <linux/slab.h>
45 #include <linux/sched.h>
46 #include <linux/smp_lock.h>
47 #include <linux/pagemap.h>
48 #include <linux/sunrpc/clnt.h>
49 #include <linux/sunrpc/auth.h>
50 #include <linux/sunrpc/auth_gss.h>
51 #include <linux/sunrpc/svcauth_gss.h>
52 #include <linux/sunrpc/gss_err.h>
53 #include <linux/workqueue.h>
54 #include <linux/sunrpc/rpc_pipe_fs.h>
55 #include <linux/sunrpc/gss_api.h>
56 #include <asm/uaccess.h>
58 static const struct rpc_authops authgss_ops;
60 static const struct rpc_credops gss_credops;
61 static const struct rpc_credops gss_nullops;
63 #ifdef RPC_DEBUG
64 # define RPCDBG_FACILITY RPCDBG_AUTH
65 #endif
67 #define NFS_NGROUPS 16
69 #define GSS_CRED_SLACK 1024 /* XXX: unused */
70 /* length of a krb5 verifier (48), plus data added before arguments when
71 * using integrity (two 4-byte integers): */
72 #define GSS_VERF_SLACK 100
74 /* XXX this define must match the gssd define
75 * as it is passed to gssd to signal the use of
76 * machine creds should be part of the shared rpc interface */
78 #define CA_RUN_AS_MACHINE 0x00000200
80 /* dump the buffer in `emacs-hexl' style */
81 #define isprint(c) ((c > 0x1f) && (c < 0x7f))
83 struct gss_auth {
84 struct kref kref;
85 struct rpc_auth rpc_auth;
86 struct gss_api_mech *mech;
87 enum rpc_gss_svc service;
88 struct rpc_clnt *client;
89 struct dentry *dentry;
92 static void gss_free_ctx(struct gss_cl_ctx *);
93 static struct rpc_pipe_ops gss_upcall_ops;
95 static inline struct gss_cl_ctx *
96 gss_get_ctx(struct gss_cl_ctx *ctx)
98 atomic_inc(&ctx->count);
99 return ctx;
102 static inline void
103 gss_put_ctx(struct gss_cl_ctx *ctx)
105 if (atomic_dec_and_test(&ctx->count))
106 gss_free_ctx(ctx);
109 /* gss_cred_set_ctx:
110 * called by gss_upcall_callback and gss_create_upcall in order
111 * to set the gss context. The actual exchange of an old context
112 * and a new one is protected by the inode->i_lock.
114 static void
115 gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
117 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
118 struct gss_cl_ctx *old;
120 old = gss_cred->gc_ctx;
121 rcu_assign_pointer(gss_cred->gc_ctx, ctx);
122 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
123 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
124 if (old)
125 gss_put_ctx(old);
128 static int
129 gss_cred_is_uptodate_ctx(struct rpc_cred *cred)
131 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
132 int res = 0;
134 rcu_read_lock();
135 if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) && gss_cred->gc_ctx)
136 res = 1;
137 rcu_read_unlock();
138 return res;
141 static const void *
142 simple_get_bytes(const void *p, const void *end, void *res, size_t len)
144 const void *q = (const void *)((const char *)p + len);
145 if (unlikely(q > end || q < p))
146 return ERR_PTR(-EFAULT);
147 memcpy(res, p, len);
148 return q;
151 static inline const void *
152 simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
154 const void *q;
155 unsigned int len;
157 p = simple_get_bytes(p, end, &len, sizeof(len));
158 if (IS_ERR(p))
159 return p;
160 q = (const void *)((const char *)p + len);
161 if (unlikely(q > end || q < p))
162 return ERR_PTR(-EFAULT);
163 dest->data = kmemdup(p, len, GFP_KERNEL);
164 if (unlikely(dest->data == NULL))
165 return ERR_PTR(-ENOMEM);
166 dest->len = len;
167 return q;
170 static struct gss_cl_ctx *
171 gss_cred_get_ctx(struct rpc_cred *cred)
173 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
174 struct gss_cl_ctx *ctx = NULL;
176 rcu_read_lock();
177 if (gss_cred->gc_ctx)
178 ctx = gss_get_ctx(gss_cred->gc_ctx);
179 rcu_read_unlock();
180 return ctx;
183 static struct gss_cl_ctx *
184 gss_alloc_context(void)
186 struct gss_cl_ctx *ctx;
188 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
189 if (ctx != NULL) {
190 ctx->gc_proc = RPC_GSS_PROC_DATA;
191 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */
192 spin_lock_init(&ctx->gc_seq_lock);
193 atomic_set(&ctx->count,1);
195 return ctx;
198 #define GSSD_MIN_TIMEOUT (60 * 60)
199 static const void *
200 gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm)
202 const void *q;
203 unsigned int seclen;
204 unsigned int timeout;
205 u32 window_size;
206 int ret;
208 /* First unsigned int gives the lifetime (in seconds) of the cred */
209 p = simple_get_bytes(p, end, &timeout, sizeof(timeout));
210 if (IS_ERR(p))
211 goto err;
212 if (timeout == 0)
213 timeout = GSSD_MIN_TIMEOUT;
214 ctx->gc_expiry = jiffies + (unsigned long)timeout * HZ * 3 / 4;
215 /* Sequence number window. Determines the maximum number of simultaneous requests */
216 p = simple_get_bytes(p, end, &window_size, sizeof(window_size));
217 if (IS_ERR(p))
218 goto err;
219 ctx->gc_win = window_size;
220 /* gssd signals an error by passing ctx->gc_win = 0: */
221 if (ctx->gc_win == 0) {
222 /* in which case, p points to an error code which we ignore */
223 p = ERR_PTR(-EACCES);
224 goto err;
226 /* copy the opaque wire context */
227 p = simple_get_netobj(p, end, &ctx->gc_wire_ctx);
228 if (IS_ERR(p))
229 goto err;
230 /* import the opaque security context */
231 p = simple_get_bytes(p, end, &seclen, sizeof(seclen));
232 if (IS_ERR(p))
233 goto err;
234 q = (const void *)((const char *)p + seclen);
235 if (unlikely(q > end || q < p)) {
236 p = ERR_PTR(-EFAULT);
237 goto err;
239 ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx);
240 if (ret < 0) {
241 p = ERR_PTR(ret);
242 goto err;
244 return q;
245 err:
246 dprintk("RPC: gss_fill_context returning %ld\n", -PTR_ERR(p));
247 return p;
251 struct gss_upcall_msg {
252 atomic_t count;
253 uid_t uid;
254 struct rpc_pipe_msg msg;
255 struct list_head list;
256 struct gss_auth *auth;
257 struct rpc_wait_queue rpc_waitqueue;
258 wait_queue_head_t waitqueue;
259 struct gss_cl_ctx *ctx;
262 static void
263 gss_release_msg(struct gss_upcall_msg *gss_msg)
265 if (!atomic_dec_and_test(&gss_msg->count))
266 return;
267 BUG_ON(!list_empty(&gss_msg->list));
268 if (gss_msg->ctx != NULL)
269 gss_put_ctx(gss_msg->ctx);
270 kfree(gss_msg);
273 static struct gss_upcall_msg *
274 __gss_find_upcall(struct rpc_inode *rpci, uid_t uid)
276 struct gss_upcall_msg *pos;
277 list_for_each_entry(pos, &rpci->in_downcall, list) {
278 if (pos->uid != uid)
279 continue;
280 atomic_inc(&pos->count);
281 dprintk("RPC: gss_find_upcall found msg %p\n", pos);
282 return pos;
284 dprintk("RPC: gss_find_upcall found nothing\n");
285 return NULL;
288 /* Try to add a upcall to the pipefs queue.
289 * If an upcall owned by our uid already exists, then we return a reference
290 * to that upcall instead of adding the new upcall.
292 static inline struct gss_upcall_msg *
293 gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg)
295 struct inode *inode = gss_auth->dentry->d_inode;
296 struct rpc_inode *rpci = RPC_I(inode);
297 struct gss_upcall_msg *old;
299 spin_lock(&inode->i_lock);
300 old = __gss_find_upcall(rpci, gss_msg->uid);
301 if (old == NULL) {
302 atomic_inc(&gss_msg->count);
303 list_add(&gss_msg->list, &rpci->in_downcall);
304 } else
305 gss_msg = old;
306 spin_unlock(&inode->i_lock);
307 return gss_msg;
310 static void
311 __gss_unhash_msg(struct gss_upcall_msg *gss_msg)
313 list_del_init(&gss_msg->list);
314 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
315 wake_up_all(&gss_msg->waitqueue);
316 atomic_dec(&gss_msg->count);
319 static void
320 gss_unhash_msg(struct gss_upcall_msg *gss_msg)
322 struct gss_auth *gss_auth = gss_msg->auth;
323 struct inode *inode = gss_auth->dentry->d_inode;
325 if (list_empty(&gss_msg->list))
326 return;
327 spin_lock(&inode->i_lock);
328 if (!list_empty(&gss_msg->list))
329 __gss_unhash_msg(gss_msg);
330 spin_unlock(&inode->i_lock);
333 static void
334 gss_upcall_callback(struct rpc_task *task)
336 struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred,
337 struct gss_cred, gc_base);
338 struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
339 struct inode *inode = gss_msg->auth->dentry->d_inode;
341 spin_lock(&inode->i_lock);
342 if (gss_msg->ctx)
343 gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_get_ctx(gss_msg->ctx));
344 else
345 task->tk_status = gss_msg->msg.errno;
346 gss_cred->gc_upcall = NULL;
347 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
348 spin_unlock(&inode->i_lock);
349 gss_release_msg(gss_msg);
352 static inline struct gss_upcall_msg *
353 gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid)
355 struct gss_upcall_msg *gss_msg;
357 gss_msg = kzalloc(sizeof(*gss_msg), GFP_KERNEL);
358 if (gss_msg != NULL) {
359 INIT_LIST_HEAD(&gss_msg->list);
360 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
361 init_waitqueue_head(&gss_msg->waitqueue);
362 atomic_set(&gss_msg->count, 1);
363 gss_msg->msg.data = &gss_msg->uid;
364 gss_msg->msg.len = sizeof(gss_msg->uid);
365 gss_msg->uid = uid;
366 gss_msg->auth = gss_auth;
368 return gss_msg;
371 static struct gss_upcall_msg *
372 gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cred *cred)
374 struct gss_upcall_msg *gss_new, *gss_msg;
376 gss_new = gss_alloc_msg(gss_auth, cred->cr_uid);
377 if (gss_new == NULL)
378 return ERR_PTR(-ENOMEM);
379 gss_msg = gss_add_msg(gss_auth, gss_new);
380 if (gss_msg == gss_new) {
381 int res = rpc_queue_upcall(gss_auth->dentry->d_inode, &gss_new->msg);
382 if (res) {
383 gss_unhash_msg(gss_new);
384 gss_msg = ERR_PTR(res);
386 } else
387 gss_release_msg(gss_new);
388 return gss_msg;
391 static inline int
392 gss_refresh_upcall(struct rpc_task *task)
394 struct rpc_cred *cred = task->tk_msg.rpc_cred;
395 struct gss_auth *gss_auth = container_of(cred->cr_auth,
396 struct gss_auth, rpc_auth);
397 struct gss_cred *gss_cred = container_of(cred,
398 struct gss_cred, gc_base);
399 struct gss_upcall_msg *gss_msg;
400 struct inode *inode = gss_auth->dentry->d_inode;
401 int err = 0;
403 dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid,
404 cred->cr_uid);
405 gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred);
406 if (IS_ERR(gss_msg)) {
407 err = PTR_ERR(gss_msg);
408 goto out;
410 spin_lock(&inode->i_lock);
411 if (gss_cred->gc_upcall != NULL)
412 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL, NULL);
413 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
414 task->tk_timeout = 0;
415 gss_cred->gc_upcall = gss_msg;
416 /* gss_upcall_callback will release the reference to gss_upcall_msg */
417 atomic_inc(&gss_msg->count);
418 rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback, NULL);
419 } else
420 err = gss_msg->msg.errno;
421 spin_unlock(&inode->i_lock);
422 gss_release_msg(gss_msg);
423 out:
424 dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n",
425 task->tk_pid, cred->cr_uid, err);
426 return err;
429 static inline int
430 gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
432 struct inode *inode = gss_auth->dentry->d_inode;
433 struct rpc_cred *cred = &gss_cred->gc_base;
434 struct gss_upcall_msg *gss_msg;
435 DEFINE_WAIT(wait);
436 int err = 0;
438 dprintk("RPC: gss_upcall for uid %u\n", cred->cr_uid);
439 gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred);
440 if (IS_ERR(gss_msg)) {
441 err = PTR_ERR(gss_msg);
442 goto out;
444 for (;;) {
445 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE);
446 spin_lock(&inode->i_lock);
447 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
448 break;
450 spin_unlock(&inode->i_lock);
451 if (signalled()) {
452 err = -ERESTARTSYS;
453 goto out_intr;
455 schedule();
457 if (gss_msg->ctx)
458 gss_cred_set_ctx(cred, gss_get_ctx(gss_msg->ctx));
459 else
460 err = gss_msg->msg.errno;
461 spin_unlock(&inode->i_lock);
462 out_intr:
463 finish_wait(&gss_msg->waitqueue, &wait);
464 gss_release_msg(gss_msg);
465 out:
466 dprintk("RPC: gss_create_upcall for uid %u result %d\n",
467 cred->cr_uid, err);
468 return err;
471 static ssize_t
472 gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
473 char __user *dst, size_t buflen)
475 char *data = (char *)msg->data + msg->copied;
476 ssize_t mlen = msg->len;
477 ssize_t left;
479 if (mlen > buflen)
480 mlen = buflen;
481 left = copy_to_user(dst, data, mlen);
482 if (left < 0) {
483 msg->errno = left;
484 return left;
486 mlen -= left;
487 msg->copied += mlen;
488 msg->errno = 0;
489 return mlen;
492 #define MSG_BUF_MAXSIZE 1024
494 static ssize_t
495 gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
497 const void *p, *end;
498 void *buf;
499 struct rpc_clnt *clnt;
500 struct gss_upcall_msg *gss_msg;
501 struct inode *inode = filp->f_path.dentry->d_inode;
502 struct gss_cl_ctx *ctx;
503 uid_t uid;
504 ssize_t err = -EFBIG;
506 if (mlen > MSG_BUF_MAXSIZE)
507 goto out;
508 err = -ENOMEM;
509 buf = kmalloc(mlen, GFP_KERNEL);
510 if (!buf)
511 goto out;
513 clnt = RPC_I(inode)->private;
514 err = -EFAULT;
515 if (copy_from_user(buf, src, mlen))
516 goto err;
518 end = (const void *)((char *)buf + mlen);
519 p = simple_get_bytes(buf, end, &uid, sizeof(uid));
520 if (IS_ERR(p)) {
521 err = PTR_ERR(p);
522 goto err;
525 err = -ENOMEM;
526 ctx = gss_alloc_context();
527 if (ctx == NULL)
528 goto err;
530 err = -ENOENT;
531 /* Find a matching upcall */
532 spin_lock(&inode->i_lock);
533 gss_msg = __gss_find_upcall(RPC_I(inode), uid);
534 if (gss_msg == NULL) {
535 spin_unlock(&inode->i_lock);
536 goto err_put_ctx;
538 list_del_init(&gss_msg->list);
539 spin_unlock(&inode->i_lock);
541 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
542 if (IS_ERR(p)) {
543 err = PTR_ERR(p);
544 gss_msg->msg.errno = (err == -EACCES) ? -EACCES : -EAGAIN;
545 goto err_release_msg;
547 gss_msg->ctx = gss_get_ctx(ctx);
548 err = mlen;
550 err_release_msg:
551 spin_lock(&inode->i_lock);
552 __gss_unhash_msg(gss_msg);
553 spin_unlock(&inode->i_lock);
554 gss_release_msg(gss_msg);
555 err_put_ctx:
556 gss_put_ctx(ctx);
557 err:
558 kfree(buf);
559 out:
560 dprintk("RPC: gss_pipe_downcall returning %Zd\n", err);
561 return err;
564 static void
565 gss_pipe_release(struct inode *inode)
567 struct rpc_inode *rpci = RPC_I(inode);
568 struct gss_upcall_msg *gss_msg;
570 spin_lock(&inode->i_lock);
571 while (!list_empty(&rpci->in_downcall)) {
573 gss_msg = list_entry(rpci->in_downcall.next,
574 struct gss_upcall_msg, list);
575 gss_msg->msg.errno = -EPIPE;
576 atomic_inc(&gss_msg->count);
577 __gss_unhash_msg(gss_msg);
578 spin_unlock(&inode->i_lock);
579 gss_release_msg(gss_msg);
580 spin_lock(&inode->i_lock);
582 spin_unlock(&inode->i_lock);
585 static void
586 gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
588 struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
589 static unsigned long ratelimit;
591 if (msg->errno < 0) {
592 dprintk("RPC: gss_pipe_destroy_msg releasing msg %p\n",
593 gss_msg);
594 atomic_inc(&gss_msg->count);
595 gss_unhash_msg(gss_msg);
596 if (msg->errno == -ETIMEDOUT) {
597 unsigned long now = jiffies;
598 if (time_after(now, ratelimit)) {
599 printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n"
600 "Please check user daemon is running!\n");
601 ratelimit = now + 15*HZ;
604 gss_release_msg(gss_msg);
609 * NOTE: we have the opportunity to use different
610 * parameters based on the input flavor (which must be a pseudoflavor)
612 static struct rpc_auth *
613 gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
615 struct gss_auth *gss_auth;
616 struct rpc_auth * auth;
617 int err = -ENOMEM; /* XXX? */
619 dprintk("RPC: creating GSS authenticator for client %p\n", clnt);
621 if (!try_module_get(THIS_MODULE))
622 return ERR_PTR(err);
623 if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL)))
624 goto out_dec;
625 gss_auth->client = clnt;
626 err = -EINVAL;
627 gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
628 if (!gss_auth->mech) {
629 printk(KERN_WARNING "%s: Pseudoflavor %d not found!",
630 __FUNCTION__, flavor);
631 goto err_free;
633 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
634 if (gss_auth->service == 0)
635 goto err_put_mech;
636 auth = &gss_auth->rpc_auth;
637 auth->au_cslack = GSS_CRED_SLACK >> 2;
638 auth->au_rslack = GSS_VERF_SLACK >> 2;
639 auth->au_ops = &authgss_ops;
640 auth->au_flavor = flavor;
641 atomic_set(&auth->au_count, 1);
642 kref_init(&gss_auth->kref);
644 gss_auth->dentry = rpc_mkpipe(clnt->cl_dentry, gss_auth->mech->gm_name,
645 clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
646 if (IS_ERR(gss_auth->dentry)) {
647 err = PTR_ERR(gss_auth->dentry);
648 goto err_put_mech;
651 err = rpcauth_init_credcache(auth);
652 if (err)
653 goto err_unlink_pipe;
655 return auth;
656 err_unlink_pipe:
657 rpc_unlink(gss_auth->dentry);
658 err_put_mech:
659 gss_mech_put(gss_auth->mech);
660 err_free:
661 kfree(gss_auth);
662 out_dec:
663 module_put(THIS_MODULE);
664 return ERR_PTR(err);
667 static void
668 gss_free(struct gss_auth *gss_auth)
670 rpc_unlink(gss_auth->dentry);
671 gss_auth->dentry = NULL;
672 gss_mech_put(gss_auth->mech);
674 kfree(gss_auth);
675 module_put(THIS_MODULE);
678 static void
679 gss_free_callback(struct kref *kref)
681 struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref);
683 gss_free(gss_auth);
686 static void
687 gss_destroy(struct rpc_auth *auth)
689 struct gss_auth *gss_auth;
691 dprintk("RPC: destroying GSS authenticator %p flavor %d\n",
692 auth, auth->au_flavor);
694 rpcauth_destroy_credcache(auth);
696 gss_auth = container_of(auth, struct gss_auth, rpc_auth);
697 kref_put(&gss_auth->kref, gss_free_callback);
701 * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call
702 * to the server with the GSS control procedure field set to
703 * RPC_GSS_PROC_DESTROY. This should normally cause the server to release
704 * all RPCSEC_GSS state associated with that context.
706 static int
707 gss_destroying_context(struct rpc_cred *cred)
709 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
710 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
711 struct rpc_task *task;
713 if (gss_cred->gc_ctx == NULL ||
714 gss_cred->gc_ctx->gc_proc == RPC_GSS_PROC_DESTROY)
715 return 0;
717 gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY;
718 cred->cr_ops = &gss_nullops;
720 /* Take a reference to ensure the cred will be destroyed either
721 * by the RPC call or by the put_rpccred() below */
722 get_rpccred(cred);
724 task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC);
725 if (!IS_ERR(task))
726 rpc_put_task(task);
728 put_rpccred(cred);
729 return 1;
732 /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure
733 * to create a new cred or context, so they check that things have been
734 * allocated before freeing them. */
735 static void
736 gss_do_free_ctx(struct gss_cl_ctx *ctx)
738 dprintk("RPC: gss_free_ctx\n");
740 if (ctx->gc_gss_ctx)
741 gss_delete_sec_context(&ctx->gc_gss_ctx);
743 kfree(ctx->gc_wire_ctx.data);
744 kfree(ctx);
747 static void
748 gss_free_ctx_callback(struct rcu_head *head)
750 struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu);
751 gss_do_free_ctx(ctx);
754 static void
755 gss_free_ctx(struct gss_cl_ctx *ctx)
757 call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
760 static void
761 gss_free_cred(struct gss_cred *gss_cred)
763 dprintk("RPC: gss_free_cred %p\n", gss_cred);
764 kfree(gss_cred);
767 static void
768 gss_free_cred_callback(struct rcu_head *head)
770 struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu);
771 gss_free_cred(gss_cred);
774 static void
775 gss_destroy_cred(struct rpc_cred *cred)
777 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
778 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
779 struct gss_cl_ctx *ctx = gss_cred->gc_ctx;
781 if (gss_destroying_context(cred))
782 return;
783 rcu_assign_pointer(gss_cred->gc_ctx, NULL);
784 call_rcu(&cred->cr_rcu, gss_free_cred_callback);
785 if (ctx)
786 gss_put_ctx(ctx);
787 kref_put(&gss_auth->kref, gss_free_callback);
791 * Lookup RPCSEC_GSS cred for the current process
793 static struct rpc_cred *
794 gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
796 return rpcauth_lookup_credcache(auth, acred, flags);
799 static struct rpc_cred *
800 gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
802 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
803 struct gss_cred *cred = NULL;
804 int err = -ENOMEM;
806 dprintk("RPC: gss_create_cred for uid %d, flavor %d\n",
807 acred->uid, auth->au_flavor);
809 if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL)))
810 goto out_err;
812 rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops);
814 * Note: in order to force a call to call_refresh(), we deliberately
815 * fail to flag the credential as RPCAUTH_CRED_UPTODATE.
817 cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW;
818 cred->gc_service = gss_auth->service;
819 kref_get(&gss_auth->kref);
820 return &cred->gc_base;
822 out_err:
823 dprintk("RPC: gss_create_cred failed with error %d\n", err);
824 return ERR_PTR(err);
827 static int
828 gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred)
830 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
831 struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base);
832 int err;
834 do {
835 err = gss_create_upcall(gss_auth, gss_cred);
836 } while (err == -EAGAIN);
837 return err;
840 static int
841 gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
843 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
846 * If the searchflags have set RPCAUTH_LOOKUP_NEW, then
847 * we don't really care if the credential has expired or not,
848 * since the caller should be prepared to reinitialise it.
850 if ((flags & RPCAUTH_LOOKUP_NEW) && test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags))
851 goto out;
852 /* Don't match with creds that have expired. */
853 if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry))
854 return 0;
855 out:
856 return (rc->cr_uid == acred->uid);
860 * Marshal credentials.
861 * Maybe we should keep a cached credential for performance reasons.
863 static __be32 *
864 gss_marshal(struct rpc_task *task, __be32 *p)
866 struct rpc_cred *cred = task->tk_msg.rpc_cred;
867 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
868 gc_base);
869 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
870 __be32 *cred_len;
871 struct rpc_rqst *req = task->tk_rqstp;
872 u32 maj_stat = 0;
873 struct xdr_netobj mic;
874 struct kvec iov;
875 struct xdr_buf verf_buf;
877 dprintk("RPC: %5u gss_marshal\n", task->tk_pid);
879 *p++ = htonl(RPC_AUTH_GSS);
880 cred_len = p++;
882 spin_lock(&ctx->gc_seq_lock);
883 req->rq_seqno = ctx->gc_seq++;
884 spin_unlock(&ctx->gc_seq_lock);
886 *p++ = htonl((u32) RPC_GSS_VERSION);
887 *p++ = htonl((u32) ctx->gc_proc);
888 *p++ = htonl((u32) req->rq_seqno);
889 *p++ = htonl((u32) gss_cred->gc_service);
890 p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
891 *cred_len = htonl((p - (cred_len + 1)) << 2);
893 /* We compute the checksum for the verifier over the xdr-encoded bytes
894 * starting with the xid and ending at the end of the credential: */
895 iov.iov_base = xprt_skip_transport_header(task->tk_xprt,
896 req->rq_snd_buf.head[0].iov_base);
897 iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
898 xdr_buf_from_iov(&iov, &verf_buf);
900 /* set verifier flavor*/
901 *p++ = htonl(RPC_AUTH_GSS);
903 mic.data = (u8 *)(p + 1);
904 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
905 if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
906 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
907 } else if (maj_stat != 0) {
908 printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
909 goto out_put_ctx;
911 p = xdr_encode_opaque(p, NULL, mic.len);
912 gss_put_ctx(ctx);
913 return p;
914 out_put_ctx:
915 gss_put_ctx(ctx);
916 return NULL;
920 * Refresh credentials. XXX - finish
922 static int
923 gss_refresh(struct rpc_task *task)
926 if (!gss_cred_is_uptodate_ctx(task->tk_msg.rpc_cred))
927 return gss_refresh_upcall(task);
928 return 0;
931 /* Dummy refresh routine: used only when destroying the context */
932 static int
933 gss_refresh_null(struct rpc_task *task)
935 return -EACCES;
938 static __be32 *
939 gss_validate(struct rpc_task *task, __be32 *p)
941 struct rpc_cred *cred = task->tk_msg.rpc_cred;
942 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
943 __be32 seq;
944 struct kvec iov;
945 struct xdr_buf verf_buf;
946 struct xdr_netobj mic;
947 u32 flav,len;
948 u32 maj_stat;
950 dprintk("RPC: %5u gss_validate\n", task->tk_pid);
952 flav = ntohl(*p++);
953 if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE)
954 goto out_bad;
955 if (flav != RPC_AUTH_GSS)
956 goto out_bad;
957 seq = htonl(task->tk_rqstp->rq_seqno);
958 iov.iov_base = &seq;
959 iov.iov_len = sizeof(seq);
960 xdr_buf_from_iov(&iov, &verf_buf);
961 mic.data = (u8 *)p;
962 mic.len = len;
964 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
965 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
966 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
967 if (maj_stat) {
968 dprintk("RPC: %5u gss_validate: gss_verify_mic returned"
969 "error 0x%08x\n", task->tk_pid, maj_stat);
970 goto out_bad;
972 /* We leave it to unwrap to calculate au_rslack. For now we just
973 * calculate the length of the verifier: */
974 cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
975 gss_put_ctx(ctx);
976 dprintk("RPC: %5u gss_validate: gss_verify_mic succeeded.\n",
977 task->tk_pid);
978 return p + XDR_QUADLEN(len);
979 out_bad:
980 gss_put_ctx(ctx);
981 dprintk("RPC: %5u gss_validate failed.\n", task->tk_pid);
982 return NULL;
985 static inline int
986 gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
987 kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
989 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
990 struct xdr_buf integ_buf;
991 __be32 *integ_len = NULL;
992 struct xdr_netobj mic;
993 u32 offset;
994 __be32 *q;
995 struct kvec *iov;
996 u32 maj_stat = 0;
997 int status = -EIO;
999 integ_len = p++;
1000 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1001 *p++ = htonl(rqstp->rq_seqno);
1003 lock_kernel();
1004 status = encode(rqstp, p, obj);
1005 unlock_kernel();
1006 if (status)
1007 return status;
1009 if (xdr_buf_subsegment(snd_buf, &integ_buf,
1010 offset, snd_buf->len - offset))
1011 return status;
1012 *integ_len = htonl(integ_buf.len);
1014 /* guess whether we're in the head or the tail: */
1015 if (snd_buf->page_len || snd_buf->tail[0].iov_len)
1016 iov = snd_buf->tail;
1017 else
1018 iov = snd_buf->head;
1019 p = iov->iov_base + iov->iov_len;
1020 mic.data = (u8 *)(p + 1);
1022 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1023 status = -EIO; /* XXX? */
1024 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1025 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1026 else if (maj_stat)
1027 return status;
1028 q = xdr_encode_opaque(p, NULL, mic.len);
1030 offset = (u8 *)q - (u8 *)p;
1031 iov->iov_len += offset;
1032 snd_buf->len += offset;
1033 return 0;
1036 static void
1037 priv_release_snd_buf(struct rpc_rqst *rqstp)
1039 int i;
1041 for (i=0; i < rqstp->rq_enc_pages_num; i++)
1042 __free_page(rqstp->rq_enc_pages[i]);
1043 kfree(rqstp->rq_enc_pages);
1046 static int
1047 alloc_enc_pages(struct rpc_rqst *rqstp)
1049 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1050 int first, last, i;
1052 if (snd_buf->page_len == 0) {
1053 rqstp->rq_enc_pages_num = 0;
1054 return 0;
1057 first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
1058 last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT;
1059 rqstp->rq_enc_pages_num = last - first + 1 + 1;
1060 rqstp->rq_enc_pages
1061 = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
1062 GFP_NOFS);
1063 if (!rqstp->rq_enc_pages)
1064 goto out;
1065 for (i=0; i < rqstp->rq_enc_pages_num; i++) {
1066 rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS);
1067 if (rqstp->rq_enc_pages[i] == NULL)
1068 goto out_free;
1070 rqstp->rq_release_snd_buf = priv_release_snd_buf;
1071 return 0;
1072 out_free:
1073 for (i--; i >= 0; i--) {
1074 __free_page(rqstp->rq_enc_pages[i]);
1076 out:
1077 return -EAGAIN;
1080 static inline int
1081 gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1082 kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
1084 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1085 u32 offset;
1086 u32 maj_stat;
1087 int status;
1088 __be32 *opaque_len;
1089 struct page **inpages;
1090 int first;
1091 int pad;
1092 struct kvec *iov;
1093 char *tmp;
1095 opaque_len = p++;
1096 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1097 *p++ = htonl(rqstp->rq_seqno);
1099 lock_kernel();
1100 status = encode(rqstp, p, obj);
1101 unlock_kernel();
1102 if (status)
1103 return status;
1105 status = alloc_enc_pages(rqstp);
1106 if (status)
1107 return status;
1108 first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
1109 inpages = snd_buf->pages + first;
1110 snd_buf->pages = rqstp->rq_enc_pages;
1111 snd_buf->page_base -= first << PAGE_CACHE_SHIFT;
1112 /* Give the tail its own page, in case we need extra space in the
1113 * head when wrapping: */
1114 if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
1115 tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
1116 memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
1117 snd_buf->tail[0].iov_base = tmp;
1119 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
1120 /* RPC_SLACK_SPACE should prevent this ever happening: */
1121 BUG_ON(snd_buf->len > snd_buf->buflen);
1122 status = -EIO;
1123 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
1124 * done anyway, so it's safe to put the request on the wire: */
1125 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1126 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1127 else if (maj_stat)
1128 return status;
1130 *opaque_len = htonl(snd_buf->len - offset);
1131 /* guess whether we're in the head or the tail: */
1132 if (snd_buf->page_len || snd_buf->tail[0].iov_len)
1133 iov = snd_buf->tail;
1134 else
1135 iov = snd_buf->head;
1136 p = iov->iov_base + iov->iov_len;
1137 pad = 3 - ((snd_buf->len - offset - 1) & 3);
1138 memset(p, 0, pad);
1139 iov->iov_len += pad;
1140 snd_buf->len += pad;
1142 return 0;
1145 static int
1146 gss_wrap_req(struct rpc_task *task,
1147 kxdrproc_t encode, void *rqstp, __be32 *p, void *obj)
1149 struct rpc_cred *cred = task->tk_msg.rpc_cred;
1150 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
1151 gc_base);
1152 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1153 int status = -EIO;
1155 dprintk("RPC: %5u gss_wrap_req\n", task->tk_pid);
1156 if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
1157 /* The spec seems a little ambiguous here, but I think that not
1158 * wrapping context destruction requests makes the most sense.
1160 lock_kernel();
1161 status = encode(rqstp, p, obj);
1162 unlock_kernel();
1163 goto out;
1165 switch (gss_cred->gc_service) {
1166 case RPC_GSS_SVC_NONE:
1167 lock_kernel();
1168 status = encode(rqstp, p, obj);
1169 unlock_kernel();
1170 break;
1171 case RPC_GSS_SVC_INTEGRITY:
1172 status = gss_wrap_req_integ(cred, ctx, encode,
1173 rqstp, p, obj);
1174 break;
1175 case RPC_GSS_SVC_PRIVACY:
1176 status = gss_wrap_req_priv(cred, ctx, encode,
1177 rqstp, p, obj);
1178 break;
1180 out:
1181 gss_put_ctx(ctx);
1182 dprintk("RPC: %5u gss_wrap_req returning %d\n", task->tk_pid, status);
1183 return status;
1186 static inline int
1187 gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1188 struct rpc_rqst *rqstp, __be32 **p)
1190 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
1191 struct xdr_buf integ_buf;
1192 struct xdr_netobj mic;
1193 u32 data_offset, mic_offset;
1194 u32 integ_len;
1195 u32 maj_stat;
1196 int status = -EIO;
1198 integ_len = ntohl(*(*p)++);
1199 if (integ_len & 3)
1200 return status;
1201 data_offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
1202 mic_offset = integ_len + data_offset;
1203 if (mic_offset > rcv_buf->len)
1204 return status;
1205 if (ntohl(*(*p)++) != rqstp->rq_seqno)
1206 return status;
1208 if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset,
1209 mic_offset - data_offset))
1210 return status;
1212 if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset))
1213 return status;
1215 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1216 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1217 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1218 if (maj_stat != GSS_S_COMPLETE)
1219 return status;
1220 return 0;
1223 static inline int
1224 gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1225 struct rpc_rqst *rqstp, __be32 **p)
1227 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
1228 u32 offset;
1229 u32 opaque_len;
1230 u32 maj_stat;
1231 int status = -EIO;
1233 opaque_len = ntohl(*(*p)++);
1234 offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
1235 if (offset + opaque_len > rcv_buf->len)
1236 return status;
1237 /* remove padding: */
1238 rcv_buf->len = offset + opaque_len;
1240 maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
1241 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1242 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1243 if (maj_stat != GSS_S_COMPLETE)
1244 return status;
1245 if (ntohl(*(*p)++) != rqstp->rq_seqno)
1246 return status;
1248 return 0;
1252 static int
1253 gss_unwrap_resp(struct rpc_task *task,
1254 kxdrproc_t decode, void *rqstp, __be32 *p, void *obj)
1256 struct rpc_cred *cred = task->tk_msg.rpc_cred;
1257 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
1258 gc_base);
1259 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1260 __be32 *savedp = p;
1261 struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head;
1262 int savedlen = head->iov_len;
1263 int status = -EIO;
1265 if (ctx->gc_proc != RPC_GSS_PROC_DATA)
1266 goto out_decode;
1267 switch (gss_cred->gc_service) {
1268 case RPC_GSS_SVC_NONE:
1269 break;
1270 case RPC_GSS_SVC_INTEGRITY:
1271 status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p);
1272 if (status)
1273 goto out;
1274 break;
1275 case RPC_GSS_SVC_PRIVACY:
1276 status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p);
1277 if (status)
1278 goto out;
1279 break;
1281 /* take into account extra slack for integrity and privacy cases: */
1282 cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp)
1283 + (savedlen - head->iov_len);
1284 out_decode:
1285 lock_kernel();
1286 status = decode(rqstp, p, obj);
1287 unlock_kernel();
1288 out:
1289 gss_put_ctx(ctx);
1290 dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid,
1291 status);
1292 return status;
1295 static const struct rpc_authops authgss_ops = {
1296 .owner = THIS_MODULE,
1297 .au_flavor = RPC_AUTH_GSS,
1298 #ifdef RPC_DEBUG
1299 .au_name = "RPCSEC_GSS",
1300 #endif
1301 .create = gss_create,
1302 .destroy = gss_destroy,
1303 .lookup_cred = gss_lookup_cred,
1304 .crcreate = gss_create_cred
1307 static const struct rpc_credops gss_credops = {
1308 .cr_name = "AUTH_GSS",
1309 .crdestroy = gss_destroy_cred,
1310 .cr_init = gss_cred_init,
1311 .crmatch = gss_match,
1312 .crmarshal = gss_marshal,
1313 .crrefresh = gss_refresh,
1314 .crvalidate = gss_validate,
1315 .crwrap_req = gss_wrap_req,
1316 .crunwrap_resp = gss_unwrap_resp,
1319 static const struct rpc_credops gss_nullops = {
1320 .cr_name = "AUTH_GSS",
1321 .crdestroy = gss_destroy_cred,
1322 .crmatch = gss_match,
1323 .crmarshal = gss_marshal,
1324 .crrefresh = gss_refresh_null,
1325 .crvalidate = gss_validate,
1326 .crwrap_req = gss_wrap_req,
1327 .crunwrap_resp = gss_unwrap_resp,
1330 static struct rpc_pipe_ops gss_upcall_ops = {
1331 .upcall = gss_pipe_upcall,
1332 .downcall = gss_pipe_downcall,
1333 .destroy_msg = gss_pipe_destroy_msg,
1334 .release_pipe = gss_pipe_release,
1338 * Initialize RPCSEC_GSS module
1340 static int __init init_rpcsec_gss(void)
1342 int err = 0;
1344 err = rpcauth_register(&authgss_ops);
1345 if (err)
1346 goto out;
1347 err = gss_svc_init();
1348 if (err)
1349 goto out_unregister;
1350 return 0;
1351 out_unregister:
1352 rpcauth_unregister(&authgss_ops);
1353 out:
1354 return err;
1357 static void __exit exit_rpcsec_gss(void)
1359 gss_svc_shutdown();
1360 rpcauth_unregister(&authgss_ops);
1363 MODULE_LICENSE("GPL");
1364 module_init(init_rpcsec_gss)
1365 module_exit(exit_rpcsec_gss)