Driver core: change sysdev classes to use dynamic kobject names
[linux-2.6/verdex.git] / net / sunrpc / svcauth_unix.c
blob411479411b21052a3e138c5109880abd881b77b4
1 #include <linux/types.h>
2 #include <linux/sched.h>
3 #include <linux/module.h>
4 #include <linux/sunrpc/types.h>
5 #include <linux/sunrpc/xdr.h>
6 #include <linux/sunrpc/svcsock.h>
7 #include <linux/sunrpc/svcauth.h>
8 #include <linux/sunrpc/gss_api.h>
9 #include <linux/err.h>
10 #include <linux/seq_file.h>
11 #include <linux/hash.h>
12 #include <linux/string.h>
13 #include <net/sock.h>
15 #define RPCDBG_FACILITY RPCDBG_AUTH
19 * AUTHUNIX and AUTHNULL credentials are both handled here.
20 * AUTHNULL is treated just like AUTHUNIX except that the uid/gid
21 * are always nobody (-2). i.e. we do the same IP address checks for
22 * AUTHNULL as for AUTHUNIX, and that is done here.
26 struct unix_domain {
27 struct auth_domain h;
28 int addr_changes;
29 /* other stuff later */
32 extern struct auth_ops svcauth_unix;
34 struct auth_domain *unix_domain_find(char *name)
36 struct auth_domain *rv;
37 struct unix_domain *new = NULL;
39 rv = auth_domain_lookup(name, NULL);
40 while(1) {
41 if (rv) {
42 if (new && rv != &new->h)
43 auth_domain_put(&new->h);
45 if (rv->flavour != &svcauth_unix) {
46 auth_domain_put(rv);
47 return NULL;
49 return rv;
52 new = kmalloc(sizeof(*new), GFP_KERNEL);
53 if (new == NULL)
54 return NULL;
55 kref_init(&new->h.ref);
56 new->h.name = kstrdup(name, GFP_KERNEL);
57 if (new->h.name == NULL) {
58 kfree(new);
59 return NULL;
61 new->h.flavour = &svcauth_unix;
62 new->addr_changes = 0;
63 rv = auth_domain_lookup(name, &new->h);
67 static void svcauth_unix_domain_release(struct auth_domain *dom)
69 struct unix_domain *ud = container_of(dom, struct unix_domain, h);
71 kfree(dom->name);
72 kfree(ud);
76 /**************************************************
77 * cache for IP address to unix_domain
78 * as needed by AUTH_UNIX
80 #define IP_HASHBITS 8
81 #define IP_HASHMAX (1<<IP_HASHBITS)
82 #define IP_HASHMASK (IP_HASHMAX-1)
84 struct ip_map {
85 struct cache_head h;
86 char m_class[8]; /* e.g. "nfsd" */
87 struct in_addr m_addr;
88 struct unix_domain *m_client;
89 int m_add_change;
91 static struct cache_head *ip_table[IP_HASHMAX];
93 static void ip_map_put(struct kref *kref)
95 struct cache_head *item = container_of(kref, struct cache_head, ref);
96 struct ip_map *im = container_of(item, struct ip_map,h);
98 if (test_bit(CACHE_VALID, &item->flags) &&
99 !test_bit(CACHE_NEGATIVE, &item->flags))
100 auth_domain_put(&im->m_client->h);
101 kfree(im);
104 #if IP_HASHBITS == 8
105 /* hash_long on a 64 bit machine is currently REALLY BAD for
106 * IP addresses in reverse-endian (i.e. on a little-endian machine).
107 * So use a trivial but reliable hash instead
109 static inline int hash_ip(__be32 ip)
111 int hash = (__force u32)ip ^ ((__force u32)ip>>16);
112 return (hash ^ (hash>>8)) & 0xff;
114 #endif
115 static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
117 struct ip_map *orig = container_of(corig, struct ip_map, h);
118 struct ip_map *new = container_of(cnew, struct ip_map, h);
119 return strcmp(orig->m_class, new->m_class) == 0
120 && orig->m_addr.s_addr == new->m_addr.s_addr;
122 static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
124 struct ip_map *new = container_of(cnew, struct ip_map, h);
125 struct ip_map *item = container_of(citem, struct ip_map, h);
127 strcpy(new->m_class, item->m_class);
128 new->m_addr.s_addr = item->m_addr.s_addr;
130 static void update(struct cache_head *cnew, struct cache_head *citem)
132 struct ip_map *new = container_of(cnew, struct ip_map, h);
133 struct ip_map *item = container_of(citem, struct ip_map, h);
135 kref_get(&item->m_client->h.ref);
136 new->m_client = item->m_client;
137 new->m_add_change = item->m_add_change;
139 static struct cache_head *ip_map_alloc(void)
141 struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL);
142 if (i)
143 return &i->h;
144 else
145 return NULL;
148 static void ip_map_request(struct cache_detail *cd,
149 struct cache_head *h,
150 char **bpp, int *blen)
152 char text_addr[20];
153 struct ip_map *im = container_of(h, struct ip_map, h);
154 __be32 addr = im->m_addr.s_addr;
156 snprintf(text_addr, 20, "%u.%u.%u.%u",
157 ntohl(addr) >> 24 & 0xff,
158 ntohl(addr) >> 16 & 0xff,
159 ntohl(addr) >> 8 & 0xff,
160 ntohl(addr) >> 0 & 0xff);
162 qword_add(bpp, blen, im->m_class);
163 qword_add(bpp, blen, text_addr);
164 (*bpp)[-1] = '\n';
167 static struct ip_map *ip_map_lookup(char *class, struct in_addr addr);
168 static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
170 static int ip_map_parse(struct cache_detail *cd,
171 char *mesg, int mlen)
173 /* class ipaddress [domainname] */
174 /* should be safe just to use the start of the input buffer
175 * for scratch: */
176 char *buf = mesg;
177 int len;
178 int b1,b2,b3,b4;
179 char c;
180 char class[8];
181 struct in_addr addr;
182 int err;
184 struct ip_map *ipmp;
185 struct auth_domain *dom;
186 time_t expiry;
188 if (mesg[mlen-1] != '\n')
189 return -EINVAL;
190 mesg[mlen-1] = 0;
192 /* class */
193 len = qword_get(&mesg, class, sizeof(class));
194 if (len <= 0) return -EINVAL;
196 /* ip address */
197 len = qword_get(&mesg, buf, mlen);
198 if (len <= 0) return -EINVAL;
200 if (sscanf(buf, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) != 4)
201 return -EINVAL;
203 expiry = get_expiry(&mesg);
204 if (expiry ==0)
205 return -EINVAL;
207 /* domainname, or empty for NEGATIVE */
208 len = qword_get(&mesg, buf, mlen);
209 if (len < 0) return -EINVAL;
211 if (len) {
212 dom = unix_domain_find(buf);
213 if (dom == NULL)
214 return -ENOENT;
215 } else
216 dom = NULL;
218 addr.s_addr =
219 htonl((((((b1<<8)|b2)<<8)|b3)<<8)|b4);
221 ipmp = ip_map_lookup(class,addr);
222 if (ipmp) {
223 err = ip_map_update(ipmp,
224 container_of(dom, struct unix_domain, h),
225 expiry);
226 } else
227 err = -ENOMEM;
229 if (dom)
230 auth_domain_put(dom);
232 cache_flush();
233 return err;
236 static int ip_map_show(struct seq_file *m,
237 struct cache_detail *cd,
238 struct cache_head *h)
240 struct ip_map *im;
241 struct in_addr addr;
242 char *dom = "-no-domain-";
244 if (h == NULL) {
245 seq_puts(m, "#class IP domain\n");
246 return 0;
248 im = container_of(h, struct ip_map, h);
249 /* class addr domain */
250 addr = im->m_addr;
252 if (test_bit(CACHE_VALID, &h->flags) &&
253 !test_bit(CACHE_NEGATIVE, &h->flags))
254 dom = im->m_client->h.name;
256 seq_printf(m, "%s %d.%d.%d.%d %s\n",
257 im->m_class,
258 ntohl(addr.s_addr) >> 24 & 0xff,
259 ntohl(addr.s_addr) >> 16 & 0xff,
260 ntohl(addr.s_addr) >> 8 & 0xff,
261 ntohl(addr.s_addr) >> 0 & 0xff,
264 return 0;
268 struct cache_detail ip_map_cache = {
269 .owner = THIS_MODULE,
270 .hash_size = IP_HASHMAX,
271 .hash_table = ip_table,
272 .name = "auth.unix.ip",
273 .cache_put = ip_map_put,
274 .cache_request = ip_map_request,
275 .cache_parse = ip_map_parse,
276 .cache_show = ip_map_show,
277 .match = ip_map_match,
278 .init = ip_map_init,
279 .update = update,
280 .alloc = ip_map_alloc,
283 static struct ip_map *ip_map_lookup(char *class, struct in_addr addr)
285 struct ip_map ip;
286 struct cache_head *ch;
288 strcpy(ip.m_class, class);
289 ip.m_addr = addr;
290 ch = sunrpc_cache_lookup(&ip_map_cache, &ip.h,
291 hash_str(class, IP_HASHBITS) ^
292 hash_ip(addr.s_addr));
294 if (ch)
295 return container_of(ch, struct ip_map, h);
296 else
297 return NULL;
300 static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry)
302 struct ip_map ip;
303 struct cache_head *ch;
305 ip.m_client = udom;
306 ip.h.flags = 0;
307 if (!udom)
308 set_bit(CACHE_NEGATIVE, &ip.h.flags);
309 else {
310 ip.m_add_change = udom->addr_changes;
311 /* if this is from the legacy set_client system call,
312 * we need m_add_change to be one higher
314 if (expiry == NEVER)
315 ip.m_add_change++;
317 ip.h.expiry_time = expiry;
318 ch = sunrpc_cache_update(&ip_map_cache,
319 &ip.h, &ipm->h,
320 hash_str(ipm->m_class, IP_HASHBITS) ^
321 hash_ip(ipm->m_addr.s_addr));
322 if (!ch)
323 return -ENOMEM;
324 cache_put(ch, &ip_map_cache);
325 return 0;
328 int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom)
330 struct unix_domain *udom;
331 struct ip_map *ipmp;
333 if (dom->flavour != &svcauth_unix)
334 return -EINVAL;
335 udom = container_of(dom, struct unix_domain, h);
336 ipmp = ip_map_lookup("nfsd", addr);
338 if (ipmp)
339 return ip_map_update(ipmp, udom, NEVER);
340 else
341 return -ENOMEM;
344 int auth_unix_forget_old(struct auth_domain *dom)
346 struct unix_domain *udom;
348 if (dom->flavour != &svcauth_unix)
349 return -EINVAL;
350 udom = container_of(dom, struct unix_domain, h);
351 udom->addr_changes++;
352 return 0;
355 struct auth_domain *auth_unix_lookup(struct in_addr addr)
357 struct ip_map *ipm;
358 struct auth_domain *rv;
360 ipm = ip_map_lookup("nfsd", addr);
362 if (!ipm)
363 return NULL;
364 if (cache_check(&ip_map_cache, &ipm->h, NULL))
365 return NULL;
367 if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) {
368 if (test_and_set_bit(CACHE_NEGATIVE, &ipm->h.flags) == 0)
369 auth_domain_put(&ipm->m_client->h);
370 rv = NULL;
371 } else {
372 rv = &ipm->m_client->h;
373 kref_get(&rv->ref);
375 cache_put(&ipm->h, &ip_map_cache);
376 return rv;
379 void svcauth_unix_purge(void)
381 cache_purge(&ip_map_cache);
384 static inline struct ip_map *
385 ip_map_cached_get(struct svc_rqst *rqstp)
387 struct ip_map *ipm;
388 struct svc_sock *svsk = rqstp->rq_sock;
389 spin_lock(&svsk->sk_lock);
390 ipm = svsk->sk_info_authunix;
391 if (ipm != NULL) {
392 if (!cache_valid(&ipm->h)) {
394 * The entry has been invalidated since it was
395 * remembered, e.g. by a second mount from the
396 * same IP address.
398 svsk->sk_info_authunix = NULL;
399 spin_unlock(&svsk->sk_lock);
400 cache_put(&ipm->h, &ip_map_cache);
401 return NULL;
403 cache_get(&ipm->h);
405 spin_unlock(&svsk->sk_lock);
406 return ipm;
409 static inline void
410 ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm)
412 struct svc_sock *svsk = rqstp->rq_sock;
414 spin_lock(&svsk->sk_lock);
415 if (svsk->sk_sock->type == SOCK_STREAM &&
416 svsk->sk_info_authunix == NULL) {
417 /* newly cached, keep the reference */
418 svsk->sk_info_authunix = ipm;
419 ipm = NULL;
421 spin_unlock(&svsk->sk_lock);
422 if (ipm)
423 cache_put(&ipm->h, &ip_map_cache);
426 void
427 svcauth_unix_info_release(void *info)
429 struct ip_map *ipm = info;
430 cache_put(&ipm->h, &ip_map_cache);
433 /****************************************************************************
434 * auth.unix.gid cache
435 * simple cache to map a UID to a list of GIDs
436 * because AUTH_UNIX aka AUTH_SYS has a max of 16
438 #define GID_HASHBITS 8
439 #define GID_HASHMAX (1<<GID_HASHBITS)
440 #define GID_HASHMASK (GID_HASHMAX - 1)
442 struct unix_gid {
443 struct cache_head h;
444 uid_t uid;
445 struct group_info *gi;
447 static struct cache_head *gid_table[GID_HASHMAX];
449 static void unix_gid_put(struct kref *kref)
451 struct cache_head *item = container_of(kref, struct cache_head, ref);
452 struct unix_gid *ug = container_of(item, struct unix_gid, h);
453 if (test_bit(CACHE_VALID, &item->flags) &&
454 !test_bit(CACHE_NEGATIVE, &item->flags))
455 put_group_info(ug->gi);
456 kfree(ug);
459 static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew)
461 struct unix_gid *orig = container_of(corig, struct unix_gid, h);
462 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
463 return orig->uid == new->uid;
465 static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem)
467 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
468 struct unix_gid *item = container_of(citem, struct unix_gid, h);
469 new->uid = item->uid;
471 static void unix_gid_update(struct cache_head *cnew, struct cache_head *citem)
473 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
474 struct unix_gid *item = container_of(citem, struct unix_gid, h);
476 get_group_info(item->gi);
477 new->gi = item->gi;
479 static struct cache_head *unix_gid_alloc(void)
481 struct unix_gid *g = kmalloc(sizeof(*g), GFP_KERNEL);
482 if (g)
483 return &g->h;
484 else
485 return NULL;
488 static void unix_gid_request(struct cache_detail *cd,
489 struct cache_head *h,
490 char **bpp, int *blen)
492 char tuid[20];
493 struct unix_gid *ug = container_of(h, struct unix_gid, h);
495 snprintf(tuid, 20, "%u", ug->uid);
496 qword_add(bpp, blen, tuid);
497 (*bpp)[-1] = '\n';
500 static struct unix_gid *unix_gid_lookup(uid_t uid);
501 extern struct cache_detail unix_gid_cache;
503 static int unix_gid_parse(struct cache_detail *cd,
504 char *mesg, int mlen)
506 /* uid expiry Ngid gid0 gid1 ... gidN-1 */
507 int uid;
508 int gids;
509 int rv;
510 int i;
511 int err;
512 time_t expiry;
513 struct unix_gid ug, *ugp;
515 if (mlen <= 0 || mesg[mlen-1] != '\n')
516 return -EINVAL;
517 mesg[mlen-1] = 0;
519 rv = get_int(&mesg, &uid);
520 if (rv)
521 return -EINVAL;
522 ug.uid = uid;
524 expiry = get_expiry(&mesg);
525 if (expiry == 0)
526 return -EINVAL;
528 rv = get_int(&mesg, &gids);
529 if (rv || gids < 0 || gids > 8192)
530 return -EINVAL;
532 ug.gi = groups_alloc(gids);
533 if (!ug.gi)
534 return -ENOMEM;
536 for (i = 0 ; i < gids ; i++) {
537 int gid;
538 rv = get_int(&mesg, &gid);
539 err = -EINVAL;
540 if (rv)
541 goto out;
542 GROUP_AT(ug.gi, i) = gid;
545 ugp = unix_gid_lookup(uid);
546 if (ugp) {
547 struct cache_head *ch;
548 ug.h.flags = 0;
549 ug.h.expiry_time = expiry;
550 ch = sunrpc_cache_update(&unix_gid_cache,
551 &ug.h, &ugp->h,
552 hash_long(uid, GID_HASHBITS));
553 if (!ch)
554 err = -ENOMEM;
555 else {
556 err = 0;
557 cache_put(ch, &unix_gid_cache);
559 } else
560 err = -ENOMEM;
561 out:
562 if (ug.gi)
563 put_group_info(ug.gi);
564 return err;
567 static int unix_gid_show(struct seq_file *m,
568 struct cache_detail *cd,
569 struct cache_head *h)
571 struct unix_gid *ug;
572 int i;
573 int glen;
575 if (h == NULL) {
576 seq_puts(m, "#uid cnt: gids...\n");
577 return 0;
579 ug = container_of(h, struct unix_gid, h);
580 if (test_bit(CACHE_VALID, &h->flags) &&
581 !test_bit(CACHE_NEGATIVE, &h->flags))
582 glen = ug->gi->ngroups;
583 else
584 glen = 0;
586 seq_printf(m, "%d %d:", ug->uid, glen);
587 for (i = 0; i < glen; i++)
588 seq_printf(m, " %d", GROUP_AT(ug->gi, i));
589 seq_printf(m, "\n");
590 return 0;
593 struct cache_detail unix_gid_cache = {
594 .owner = THIS_MODULE,
595 .hash_size = GID_HASHMAX,
596 .hash_table = gid_table,
597 .name = "auth.unix.gid",
598 .cache_put = unix_gid_put,
599 .cache_request = unix_gid_request,
600 .cache_parse = unix_gid_parse,
601 .cache_show = unix_gid_show,
602 .match = unix_gid_match,
603 .init = unix_gid_init,
604 .update = unix_gid_update,
605 .alloc = unix_gid_alloc,
608 static struct unix_gid *unix_gid_lookup(uid_t uid)
610 struct unix_gid ug;
611 struct cache_head *ch;
613 ug.uid = uid;
614 ch = sunrpc_cache_lookup(&unix_gid_cache, &ug.h,
615 hash_long(uid, GID_HASHBITS));
616 if (ch)
617 return container_of(ch, struct unix_gid, h);
618 else
619 return NULL;
622 static int unix_gid_find(uid_t uid, struct group_info **gip,
623 struct svc_rqst *rqstp)
625 struct unix_gid *ug = unix_gid_lookup(uid);
626 if (!ug)
627 return -EAGAIN;
628 switch (cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle)) {
629 case -ENOENT:
630 *gip = NULL;
631 return 0;
632 case 0:
633 *gip = ug->gi;
634 get_group_info(*gip);
635 return 0;
636 default:
637 return -EAGAIN;
642 svcauth_unix_set_client(struct svc_rqst *rqstp)
644 struct sockaddr_in *sin = svc_addr_in(rqstp);
645 struct ip_map *ipm;
647 rqstp->rq_client = NULL;
648 if (rqstp->rq_proc == 0)
649 return SVC_OK;
651 ipm = ip_map_cached_get(rqstp);
652 if (ipm == NULL)
653 ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class,
654 sin->sin_addr);
656 if (ipm == NULL)
657 return SVC_DENIED;
659 switch (cache_check(&ip_map_cache, &ipm->h, &rqstp->rq_chandle)) {
660 default:
661 BUG();
662 case -EAGAIN:
663 case -ETIMEDOUT:
664 return SVC_DROP;
665 case -ENOENT:
666 return SVC_DENIED;
667 case 0:
668 rqstp->rq_client = &ipm->m_client->h;
669 kref_get(&rqstp->rq_client->ref);
670 ip_map_cached_put(rqstp, ipm);
671 break;
673 return SVC_OK;
676 EXPORT_SYMBOL(svcauth_unix_set_client);
678 static int
679 svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
681 struct kvec *argv = &rqstp->rq_arg.head[0];
682 struct kvec *resv = &rqstp->rq_res.head[0];
683 struct svc_cred *cred = &rqstp->rq_cred;
685 cred->cr_group_info = NULL;
686 rqstp->rq_client = NULL;
688 if (argv->iov_len < 3*4)
689 return SVC_GARBAGE;
691 if (svc_getu32(argv) != 0) {
692 dprintk("svc: bad null cred\n");
693 *authp = rpc_autherr_badcred;
694 return SVC_DENIED;
696 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
697 dprintk("svc: bad null verf\n");
698 *authp = rpc_autherr_badverf;
699 return SVC_DENIED;
702 /* Signal that mapping to nobody uid/gid is required */
703 cred->cr_uid = (uid_t) -1;
704 cred->cr_gid = (gid_t) -1;
705 cred->cr_group_info = groups_alloc(0);
706 if (cred->cr_group_info == NULL)
707 return SVC_DROP; /* kmalloc failure - client must retry */
709 /* Put NULL verifier */
710 svc_putnl(resv, RPC_AUTH_NULL);
711 svc_putnl(resv, 0);
713 rqstp->rq_flavor = RPC_AUTH_NULL;
714 return SVC_OK;
717 static int
718 svcauth_null_release(struct svc_rqst *rqstp)
720 if (rqstp->rq_client)
721 auth_domain_put(rqstp->rq_client);
722 rqstp->rq_client = NULL;
723 if (rqstp->rq_cred.cr_group_info)
724 put_group_info(rqstp->rq_cred.cr_group_info);
725 rqstp->rq_cred.cr_group_info = NULL;
727 return 0; /* don't drop */
731 struct auth_ops svcauth_null = {
732 .name = "null",
733 .owner = THIS_MODULE,
734 .flavour = RPC_AUTH_NULL,
735 .accept = svcauth_null_accept,
736 .release = svcauth_null_release,
737 .set_client = svcauth_unix_set_client,
741 static int
742 svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
744 struct kvec *argv = &rqstp->rq_arg.head[0];
745 struct kvec *resv = &rqstp->rq_res.head[0];
746 struct svc_cred *cred = &rqstp->rq_cred;
747 u32 slen, i;
748 int len = argv->iov_len;
750 cred->cr_group_info = NULL;
751 rqstp->rq_client = NULL;
753 if ((len -= 3*4) < 0)
754 return SVC_GARBAGE;
756 svc_getu32(argv); /* length */
757 svc_getu32(argv); /* time stamp */
758 slen = XDR_QUADLEN(svc_getnl(argv)); /* machname length */
759 if (slen > 64 || (len -= (slen + 3)*4) < 0)
760 goto badcred;
761 argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */
762 argv->iov_len -= slen*4;
764 cred->cr_uid = svc_getnl(argv); /* uid */
765 cred->cr_gid = svc_getnl(argv); /* gid */
766 slen = svc_getnl(argv); /* gids length */
767 if (slen > 16 || (len -= (slen + 2)*4) < 0)
768 goto badcred;
769 if (unix_gid_find(cred->cr_uid, &cred->cr_group_info, rqstp)
770 == -EAGAIN)
771 return SVC_DROP;
772 if (cred->cr_group_info == NULL) {
773 cred->cr_group_info = groups_alloc(slen);
774 if (cred->cr_group_info == NULL)
775 return SVC_DROP;
776 for (i = 0; i < slen; i++)
777 GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
778 } else {
779 for (i = 0; i < slen ; i++)
780 svc_getnl(argv);
782 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
783 *authp = rpc_autherr_badverf;
784 return SVC_DENIED;
787 /* Put NULL verifier */
788 svc_putnl(resv, RPC_AUTH_NULL);
789 svc_putnl(resv, 0);
791 rqstp->rq_flavor = RPC_AUTH_UNIX;
792 return SVC_OK;
794 badcred:
795 *authp = rpc_autherr_badcred;
796 return SVC_DENIED;
799 static int
800 svcauth_unix_release(struct svc_rqst *rqstp)
802 /* Verifier (such as it is) is already in place.
804 if (rqstp->rq_client)
805 auth_domain_put(rqstp->rq_client);
806 rqstp->rq_client = NULL;
807 if (rqstp->rq_cred.cr_group_info)
808 put_group_info(rqstp->rq_cred.cr_group_info);
809 rqstp->rq_cred.cr_group_info = NULL;
811 return 0;
815 struct auth_ops svcauth_unix = {
816 .name = "unix",
817 .owner = THIS_MODULE,
818 .flavour = RPC_AUTH_UNIX,
819 .accept = svcauth_unix_accept,
820 .release = svcauth_unix_release,
821 .domain_release = svcauth_unix_domain_release,
822 .set_client = svcauth_unix_set_client,