drivers/net/stmmac/: add HAS_IOMEM dependency
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / sunrpc / svcauth_unix.c
blob207311610988a3c3f29a2bf3ce2ea37320d237f1
1 #include <linux/types.h>
2 #include <linux/sched.h>
3 #include <linux/module.h>
4 #include <linux/sunrpc/types.h>
5 #include <linux/sunrpc/xdr.h>
6 #include <linux/sunrpc/svcsock.h>
7 #include <linux/sunrpc/svcauth.h>
8 #include <linux/sunrpc/gss_api.h>
9 #include <linux/err.h>
10 #include <linux/seq_file.h>
11 #include <linux/hash.h>
12 #include <linux/string.h>
13 #include <linux/slab.h>
14 #include <net/sock.h>
15 #include <net/ipv6.h>
16 #include <linux/kernel.h>
17 #define RPCDBG_FACILITY RPCDBG_AUTH
19 #include <linux/sunrpc/clnt.h>
22 * AUTHUNIX and AUTHNULL credentials are both handled here.
23 * AUTHNULL is treated just like AUTHUNIX except that the uid/gid
24 * are always nobody (-2). i.e. we do the same IP address checks for
25 * AUTHNULL as for AUTHUNIX, and that is done here.
29 struct unix_domain {
30 struct auth_domain h;
31 int addr_changes;
32 /* other stuff later */
35 extern struct auth_ops svcauth_unix;
37 struct auth_domain *unix_domain_find(char *name)
39 struct auth_domain *rv;
40 struct unix_domain *new = NULL;
42 rv = auth_domain_lookup(name, NULL);
43 while(1) {
44 if (rv) {
45 if (new && rv != &new->h)
46 auth_domain_put(&new->h);
48 if (rv->flavour != &svcauth_unix) {
49 auth_domain_put(rv);
50 return NULL;
52 return rv;
55 new = kmalloc(sizeof(*new), GFP_KERNEL);
56 if (new == NULL)
57 return NULL;
58 kref_init(&new->h.ref);
59 new->h.name = kstrdup(name, GFP_KERNEL);
60 if (new->h.name == NULL) {
61 kfree(new);
62 return NULL;
64 new->h.flavour = &svcauth_unix;
65 new->addr_changes = 0;
66 rv = auth_domain_lookup(name, &new->h);
69 EXPORT_SYMBOL_GPL(unix_domain_find);
71 static void svcauth_unix_domain_release(struct auth_domain *dom)
73 struct unix_domain *ud = container_of(dom, struct unix_domain, h);
75 kfree(dom->name);
76 kfree(ud);
80 /**************************************************
81 * cache for IP address to unix_domain
82 * as needed by AUTH_UNIX
84 #define IP_HASHBITS 8
85 #define IP_HASHMAX (1<<IP_HASHBITS)
86 #define IP_HASHMASK (IP_HASHMAX-1)
88 struct ip_map {
89 struct cache_head h;
90 char m_class[8]; /* e.g. "nfsd" */
91 struct in6_addr m_addr;
92 struct unix_domain *m_client;
93 int m_add_change;
95 static struct cache_head *ip_table[IP_HASHMAX];
97 static void ip_map_put(struct kref *kref)
99 struct cache_head *item = container_of(kref, struct cache_head, ref);
100 struct ip_map *im = container_of(item, struct ip_map,h);
102 if (test_bit(CACHE_VALID, &item->flags) &&
103 !test_bit(CACHE_NEGATIVE, &item->flags))
104 auth_domain_put(&im->m_client->h);
105 kfree(im);
108 #if IP_HASHBITS == 8
109 /* hash_long on a 64 bit machine is currently REALLY BAD for
110 * IP addresses in reverse-endian (i.e. on a little-endian machine).
111 * So use a trivial but reliable hash instead
113 static inline int hash_ip(__be32 ip)
115 int hash = (__force u32)ip ^ ((__force u32)ip>>16);
116 return (hash ^ (hash>>8)) & 0xff;
118 #endif
119 static inline int hash_ip6(struct in6_addr ip)
121 return (hash_ip(ip.s6_addr32[0]) ^
122 hash_ip(ip.s6_addr32[1]) ^
123 hash_ip(ip.s6_addr32[2]) ^
124 hash_ip(ip.s6_addr32[3]));
126 static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
128 struct ip_map *orig = container_of(corig, struct ip_map, h);
129 struct ip_map *new = container_of(cnew, struct ip_map, h);
130 return strcmp(orig->m_class, new->m_class) == 0 &&
131 ipv6_addr_equal(&orig->m_addr, &new->m_addr);
133 static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
135 struct ip_map *new = container_of(cnew, struct ip_map, h);
136 struct ip_map *item = container_of(citem, struct ip_map, h);
138 strcpy(new->m_class, item->m_class);
139 ipv6_addr_copy(&new->m_addr, &item->m_addr);
141 static void update(struct cache_head *cnew, struct cache_head *citem)
143 struct ip_map *new = container_of(cnew, struct ip_map, h);
144 struct ip_map *item = container_of(citem, struct ip_map, h);
146 kref_get(&item->m_client->h.ref);
147 new->m_client = item->m_client;
148 new->m_add_change = item->m_add_change;
150 static struct cache_head *ip_map_alloc(void)
152 struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL);
153 if (i)
154 return &i->h;
155 else
156 return NULL;
159 static void ip_map_request(struct cache_detail *cd,
160 struct cache_head *h,
161 char **bpp, int *blen)
163 char text_addr[40];
164 struct ip_map *im = container_of(h, struct ip_map, h);
166 if (ipv6_addr_v4mapped(&(im->m_addr))) {
167 snprintf(text_addr, 20, "%pI4", &im->m_addr.s6_addr32[3]);
168 } else {
169 snprintf(text_addr, 40, "%pI6", &im->m_addr);
171 qword_add(bpp, blen, im->m_class);
172 qword_add(bpp, blen, text_addr);
173 (*bpp)[-1] = '\n';
176 static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h)
178 return sunrpc_cache_pipe_upcall(cd, h, ip_map_request);
181 static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr);
182 static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
184 static int ip_map_parse(struct cache_detail *cd,
185 char *mesg, int mlen)
187 /* class ipaddress [domainname] */
188 /* should be safe just to use the start of the input buffer
189 * for scratch: */
190 char *buf = mesg;
191 int len;
192 char class[8];
193 union {
194 struct sockaddr sa;
195 struct sockaddr_in s4;
196 struct sockaddr_in6 s6;
197 } address;
198 struct sockaddr_in6 sin6;
199 int err;
201 struct ip_map *ipmp;
202 struct auth_domain *dom;
203 time_t expiry;
205 if (mesg[mlen-1] != '\n')
206 return -EINVAL;
207 mesg[mlen-1] = 0;
209 /* class */
210 len = qword_get(&mesg, class, sizeof(class));
211 if (len <= 0) return -EINVAL;
213 /* ip address */
214 len = qword_get(&mesg, buf, mlen);
215 if (len <= 0) return -EINVAL;
217 if (rpc_pton(buf, len, &address.sa, sizeof(address)) == 0)
218 return -EINVAL;
219 switch (address.sa.sa_family) {
220 case AF_INET:
221 /* Form a mapped IPv4 address in sin6 */
222 memset(&sin6, 0, sizeof(sin6));
223 sin6.sin6_family = AF_INET6;
224 sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
225 sin6.sin6_addr.s6_addr32[3] = address.s4.sin_addr.s_addr;
226 break;
227 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
228 case AF_INET6:
229 memcpy(&sin6, &address.s6, sizeof(sin6));
230 break;
231 #endif
232 default:
233 return -EINVAL;
236 expiry = get_expiry(&mesg);
237 if (expiry ==0)
238 return -EINVAL;
240 /* domainname, or empty for NEGATIVE */
241 len = qword_get(&mesg, buf, mlen);
242 if (len < 0) return -EINVAL;
244 if (len) {
245 dom = unix_domain_find(buf);
246 if (dom == NULL)
247 return -ENOENT;
248 } else
249 dom = NULL;
251 /* IPv6 scope IDs are ignored for now */
252 ipmp = ip_map_lookup(class, &sin6.sin6_addr);
253 if (ipmp) {
254 err = ip_map_update(ipmp,
255 container_of(dom, struct unix_domain, h),
256 expiry);
257 } else
258 err = -ENOMEM;
260 if (dom)
261 auth_domain_put(dom);
263 cache_flush();
264 return err;
267 static int ip_map_show(struct seq_file *m,
268 struct cache_detail *cd,
269 struct cache_head *h)
271 struct ip_map *im;
272 struct in6_addr addr;
273 char *dom = "-no-domain-";
275 if (h == NULL) {
276 seq_puts(m, "#class IP domain\n");
277 return 0;
279 im = container_of(h, struct ip_map, h);
280 /* class addr domain */
281 ipv6_addr_copy(&addr, &im->m_addr);
283 if (test_bit(CACHE_VALID, &h->flags) &&
284 !test_bit(CACHE_NEGATIVE, &h->flags))
285 dom = im->m_client->h.name;
287 if (ipv6_addr_v4mapped(&addr)) {
288 seq_printf(m, "%s %pI4 %s\n",
289 im->m_class, &addr.s6_addr32[3], dom);
290 } else {
291 seq_printf(m, "%s %pI6 %s\n", im->m_class, &addr, dom);
293 return 0;
297 struct cache_detail ip_map_cache = {
298 .owner = THIS_MODULE,
299 .hash_size = IP_HASHMAX,
300 .hash_table = ip_table,
301 .name = "auth.unix.ip",
302 .cache_put = ip_map_put,
303 .cache_upcall = ip_map_upcall,
304 .cache_parse = ip_map_parse,
305 .cache_show = ip_map_show,
306 .match = ip_map_match,
307 .init = ip_map_init,
308 .update = update,
309 .alloc = ip_map_alloc,
312 static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr)
314 struct ip_map ip;
315 struct cache_head *ch;
317 strcpy(ip.m_class, class);
318 ipv6_addr_copy(&ip.m_addr, addr);
319 ch = sunrpc_cache_lookup(&ip_map_cache, &ip.h,
320 hash_str(class, IP_HASHBITS) ^
321 hash_ip6(*addr));
323 if (ch)
324 return container_of(ch, struct ip_map, h);
325 else
326 return NULL;
329 static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry)
331 struct ip_map ip;
332 struct cache_head *ch;
334 ip.m_client = udom;
335 ip.h.flags = 0;
336 if (!udom)
337 set_bit(CACHE_NEGATIVE, &ip.h.flags);
338 else {
339 ip.m_add_change = udom->addr_changes;
340 /* if this is from the legacy set_client system call,
341 * we need m_add_change to be one higher
343 if (expiry == NEVER)
344 ip.m_add_change++;
346 ip.h.expiry_time = expiry;
347 ch = sunrpc_cache_update(&ip_map_cache,
348 &ip.h, &ipm->h,
349 hash_str(ipm->m_class, IP_HASHBITS) ^
350 hash_ip6(ipm->m_addr));
351 if (!ch)
352 return -ENOMEM;
353 cache_put(ch, &ip_map_cache);
354 return 0;
357 int auth_unix_add_addr(struct in6_addr *addr, struct auth_domain *dom)
359 struct unix_domain *udom;
360 struct ip_map *ipmp;
362 if (dom->flavour != &svcauth_unix)
363 return -EINVAL;
364 udom = container_of(dom, struct unix_domain, h);
365 ipmp = ip_map_lookup("nfsd", addr);
367 if (ipmp)
368 return ip_map_update(ipmp, udom, NEVER);
369 else
370 return -ENOMEM;
372 EXPORT_SYMBOL_GPL(auth_unix_add_addr);
374 int auth_unix_forget_old(struct auth_domain *dom)
376 struct unix_domain *udom;
378 if (dom->flavour != &svcauth_unix)
379 return -EINVAL;
380 udom = container_of(dom, struct unix_domain, h);
381 udom->addr_changes++;
382 return 0;
384 EXPORT_SYMBOL_GPL(auth_unix_forget_old);
386 struct auth_domain *auth_unix_lookup(struct in6_addr *addr)
388 struct ip_map *ipm;
389 struct auth_domain *rv;
391 ipm = ip_map_lookup("nfsd", addr);
393 if (!ipm)
394 return NULL;
395 if (cache_check(&ip_map_cache, &ipm->h, NULL))
396 return NULL;
398 if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) {
399 if (test_and_set_bit(CACHE_NEGATIVE, &ipm->h.flags) == 0)
400 auth_domain_put(&ipm->m_client->h);
401 rv = NULL;
402 } else {
403 rv = &ipm->m_client->h;
404 kref_get(&rv->ref);
406 cache_put(&ipm->h, &ip_map_cache);
407 return rv;
409 EXPORT_SYMBOL_GPL(auth_unix_lookup);
411 void svcauth_unix_purge(void)
413 cache_purge(&ip_map_cache);
415 EXPORT_SYMBOL_GPL(svcauth_unix_purge);
417 static inline struct ip_map *
418 ip_map_cached_get(struct svc_rqst *rqstp)
420 struct ip_map *ipm = NULL;
421 struct svc_xprt *xprt = rqstp->rq_xprt;
423 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
424 spin_lock(&xprt->xpt_lock);
425 ipm = xprt->xpt_auth_cache;
426 if (ipm != NULL) {
427 if (!cache_valid(&ipm->h)) {
429 * The entry has been invalidated since it was
430 * remembered, e.g. by a second mount from the
431 * same IP address.
433 xprt->xpt_auth_cache = NULL;
434 spin_unlock(&xprt->xpt_lock);
435 cache_put(&ipm->h, &ip_map_cache);
436 return NULL;
438 cache_get(&ipm->h);
440 spin_unlock(&xprt->xpt_lock);
442 return ipm;
445 static inline void
446 ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm)
448 struct svc_xprt *xprt = rqstp->rq_xprt;
450 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
451 spin_lock(&xprt->xpt_lock);
452 if (xprt->xpt_auth_cache == NULL) {
453 /* newly cached, keep the reference */
454 xprt->xpt_auth_cache = ipm;
455 ipm = NULL;
457 spin_unlock(&xprt->xpt_lock);
459 if (ipm)
460 cache_put(&ipm->h, &ip_map_cache);
463 void
464 svcauth_unix_info_release(void *info)
466 struct ip_map *ipm = info;
467 cache_put(&ipm->h, &ip_map_cache);
470 /****************************************************************************
471 * auth.unix.gid cache
472 * simple cache to map a UID to a list of GIDs
473 * because AUTH_UNIX aka AUTH_SYS has a max of 16
475 #define GID_HASHBITS 8
476 #define GID_HASHMAX (1<<GID_HASHBITS)
477 #define GID_HASHMASK (GID_HASHMAX - 1)
479 struct unix_gid {
480 struct cache_head h;
481 uid_t uid;
482 struct group_info *gi;
484 static struct cache_head *gid_table[GID_HASHMAX];
486 static void unix_gid_put(struct kref *kref)
488 struct cache_head *item = container_of(kref, struct cache_head, ref);
489 struct unix_gid *ug = container_of(item, struct unix_gid, h);
490 if (test_bit(CACHE_VALID, &item->flags) &&
491 !test_bit(CACHE_NEGATIVE, &item->flags))
492 put_group_info(ug->gi);
493 kfree(ug);
496 static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew)
498 struct unix_gid *orig = container_of(corig, struct unix_gid, h);
499 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
500 return orig->uid == new->uid;
502 static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem)
504 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
505 struct unix_gid *item = container_of(citem, struct unix_gid, h);
506 new->uid = item->uid;
508 static void unix_gid_update(struct cache_head *cnew, struct cache_head *citem)
510 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
511 struct unix_gid *item = container_of(citem, struct unix_gid, h);
513 get_group_info(item->gi);
514 new->gi = item->gi;
516 static struct cache_head *unix_gid_alloc(void)
518 struct unix_gid *g = kmalloc(sizeof(*g), GFP_KERNEL);
519 if (g)
520 return &g->h;
521 else
522 return NULL;
525 static void unix_gid_request(struct cache_detail *cd,
526 struct cache_head *h,
527 char **bpp, int *blen)
529 char tuid[20];
530 struct unix_gid *ug = container_of(h, struct unix_gid, h);
532 snprintf(tuid, 20, "%u", ug->uid);
533 qword_add(bpp, blen, tuid);
534 (*bpp)[-1] = '\n';
537 static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h)
539 return sunrpc_cache_pipe_upcall(cd, h, unix_gid_request);
542 static struct unix_gid *unix_gid_lookup(uid_t uid);
543 extern struct cache_detail unix_gid_cache;
545 static int unix_gid_parse(struct cache_detail *cd,
546 char *mesg, int mlen)
548 /* uid expiry Ngid gid0 gid1 ... gidN-1 */
549 int uid;
550 int gids;
551 int rv;
552 int i;
553 int err;
554 time_t expiry;
555 struct unix_gid ug, *ugp;
557 if (mlen <= 0 || mesg[mlen-1] != '\n')
558 return -EINVAL;
559 mesg[mlen-1] = 0;
561 rv = get_int(&mesg, &uid);
562 if (rv)
563 return -EINVAL;
564 ug.uid = uid;
566 expiry = get_expiry(&mesg);
567 if (expiry == 0)
568 return -EINVAL;
570 rv = get_int(&mesg, &gids);
571 if (rv || gids < 0 || gids > 8192)
572 return -EINVAL;
574 ug.gi = groups_alloc(gids);
575 if (!ug.gi)
576 return -ENOMEM;
578 for (i = 0 ; i < gids ; i++) {
579 int gid;
580 rv = get_int(&mesg, &gid);
581 err = -EINVAL;
582 if (rv)
583 goto out;
584 GROUP_AT(ug.gi, i) = gid;
587 ugp = unix_gid_lookup(uid);
588 if (ugp) {
589 struct cache_head *ch;
590 ug.h.flags = 0;
591 ug.h.expiry_time = expiry;
592 ch = sunrpc_cache_update(&unix_gid_cache,
593 &ug.h, &ugp->h,
594 hash_long(uid, GID_HASHBITS));
595 if (!ch)
596 err = -ENOMEM;
597 else {
598 err = 0;
599 cache_put(ch, &unix_gid_cache);
601 } else
602 err = -ENOMEM;
603 out:
604 if (ug.gi)
605 put_group_info(ug.gi);
606 return err;
609 static int unix_gid_show(struct seq_file *m,
610 struct cache_detail *cd,
611 struct cache_head *h)
613 struct unix_gid *ug;
614 int i;
615 int glen;
617 if (h == NULL) {
618 seq_puts(m, "#uid cnt: gids...\n");
619 return 0;
621 ug = container_of(h, struct unix_gid, h);
622 if (test_bit(CACHE_VALID, &h->flags) &&
623 !test_bit(CACHE_NEGATIVE, &h->flags))
624 glen = ug->gi->ngroups;
625 else
626 glen = 0;
628 seq_printf(m, "%u %d:", ug->uid, glen);
629 for (i = 0; i < glen; i++)
630 seq_printf(m, " %d", GROUP_AT(ug->gi, i));
631 seq_printf(m, "\n");
632 return 0;
635 struct cache_detail unix_gid_cache = {
636 .owner = THIS_MODULE,
637 .hash_size = GID_HASHMAX,
638 .hash_table = gid_table,
639 .name = "auth.unix.gid",
640 .cache_put = unix_gid_put,
641 .cache_upcall = unix_gid_upcall,
642 .cache_parse = unix_gid_parse,
643 .cache_show = unix_gid_show,
644 .match = unix_gid_match,
645 .init = unix_gid_init,
646 .update = unix_gid_update,
647 .alloc = unix_gid_alloc,
650 static struct unix_gid *unix_gid_lookup(uid_t uid)
652 struct unix_gid ug;
653 struct cache_head *ch;
655 ug.uid = uid;
656 ch = sunrpc_cache_lookup(&unix_gid_cache, &ug.h,
657 hash_long(uid, GID_HASHBITS));
658 if (ch)
659 return container_of(ch, struct unix_gid, h);
660 else
661 return NULL;
664 static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp)
666 struct unix_gid *ug;
667 struct group_info *gi;
668 int ret;
670 ug = unix_gid_lookup(uid);
671 if (!ug)
672 return ERR_PTR(-EAGAIN);
673 ret = cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle);
674 switch (ret) {
675 case -ENOENT:
676 return ERR_PTR(-ENOENT);
677 case 0:
678 gi = get_group_info(ug->gi);
679 cache_put(&ug->h, &unix_gid_cache);
680 return gi;
681 default:
682 return ERR_PTR(-EAGAIN);
687 svcauth_unix_set_client(struct svc_rqst *rqstp)
689 struct sockaddr_in *sin;
690 struct sockaddr_in6 *sin6, sin6_storage;
691 struct ip_map *ipm;
692 struct group_info *gi;
693 struct svc_cred *cred = &rqstp->rq_cred;
695 switch (rqstp->rq_addr.ss_family) {
696 case AF_INET:
697 sin = svc_addr_in(rqstp);
698 sin6 = &sin6_storage;
699 ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &sin6->sin6_addr);
700 break;
701 case AF_INET6:
702 sin6 = svc_addr_in6(rqstp);
703 break;
704 default:
705 BUG();
708 rqstp->rq_client = NULL;
709 if (rqstp->rq_proc == 0)
710 return SVC_OK;
712 ipm = ip_map_cached_get(rqstp);
713 if (ipm == NULL)
714 ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class,
715 &sin6->sin6_addr);
717 if (ipm == NULL)
718 return SVC_DENIED;
720 switch (cache_check(&ip_map_cache, &ipm->h, &rqstp->rq_chandle)) {
721 default:
722 BUG();
723 case -EAGAIN:
724 case -ETIMEDOUT:
725 return SVC_DROP;
726 case -ENOENT:
727 return SVC_DENIED;
728 case 0:
729 rqstp->rq_client = &ipm->m_client->h;
730 kref_get(&rqstp->rq_client->ref);
731 ip_map_cached_put(rqstp, ipm);
732 break;
735 gi = unix_gid_find(cred->cr_uid, rqstp);
736 switch (PTR_ERR(gi)) {
737 case -EAGAIN:
738 return SVC_DROP;
739 case -ENOENT:
740 break;
741 default:
742 put_group_info(cred->cr_group_info);
743 cred->cr_group_info = gi;
745 return SVC_OK;
748 EXPORT_SYMBOL_GPL(svcauth_unix_set_client);
750 static int
751 svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
753 struct kvec *argv = &rqstp->rq_arg.head[0];
754 struct kvec *resv = &rqstp->rq_res.head[0];
755 struct svc_cred *cred = &rqstp->rq_cred;
757 cred->cr_group_info = NULL;
758 rqstp->rq_client = NULL;
760 if (argv->iov_len < 3*4)
761 return SVC_GARBAGE;
763 if (svc_getu32(argv) != 0) {
764 dprintk("svc: bad null cred\n");
765 *authp = rpc_autherr_badcred;
766 return SVC_DENIED;
768 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
769 dprintk("svc: bad null verf\n");
770 *authp = rpc_autherr_badverf;
771 return SVC_DENIED;
774 /* Signal that mapping to nobody uid/gid is required */
775 cred->cr_uid = (uid_t) -1;
776 cred->cr_gid = (gid_t) -1;
777 cred->cr_group_info = groups_alloc(0);
778 if (cred->cr_group_info == NULL)
779 return SVC_DROP; /* kmalloc failure - client must retry */
781 /* Put NULL verifier */
782 svc_putnl(resv, RPC_AUTH_NULL);
783 svc_putnl(resv, 0);
785 rqstp->rq_flavor = RPC_AUTH_NULL;
786 return SVC_OK;
789 static int
790 svcauth_null_release(struct svc_rqst *rqstp)
792 if (rqstp->rq_client)
793 auth_domain_put(rqstp->rq_client);
794 rqstp->rq_client = NULL;
795 if (rqstp->rq_cred.cr_group_info)
796 put_group_info(rqstp->rq_cred.cr_group_info);
797 rqstp->rq_cred.cr_group_info = NULL;
799 return 0; /* don't drop */
803 struct auth_ops svcauth_null = {
804 .name = "null",
805 .owner = THIS_MODULE,
806 .flavour = RPC_AUTH_NULL,
807 .accept = svcauth_null_accept,
808 .release = svcauth_null_release,
809 .set_client = svcauth_unix_set_client,
813 static int
814 svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
816 struct kvec *argv = &rqstp->rq_arg.head[0];
817 struct kvec *resv = &rqstp->rq_res.head[0];
818 struct svc_cred *cred = &rqstp->rq_cred;
819 u32 slen, i;
820 int len = argv->iov_len;
822 cred->cr_group_info = NULL;
823 rqstp->rq_client = NULL;
825 if ((len -= 3*4) < 0)
826 return SVC_GARBAGE;
828 svc_getu32(argv); /* length */
829 svc_getu32(argv); /* time stamp */
830 slen = XDR_QUADLEN(svc_getnl(argv)); /* machname length */
831 if (slen > 64 || (len -= (slen + 3)*4) < 0)
832 goto badcred;
833 argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */
834 argv->iov_len -= slen*4;
836 cred->cr_uid = svc_getnl(argv); /* uid */
837 cred->cr_gid = svc_getnl(argv); /* gid */
838 slen = svc_getnl(argv); /* gids length */
839 if (slen > 16 || (len -= (slen + 2)*4) < 0)
840 goto badcred;
841 cred->cr_group_info = groups_alloc(slen);
842 if (cred->cr_group_info == NULL)
843 return SVC_DROP;
844 for (i = 0; i < slen; i++)
845 GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
846 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
847 *authp = rpc_autherr_badverf;
848 return SVC_DENIED;
851 /* Put NULL verifier */
852 svc_putnl(resv, RPC_AUTH_NULL);
853 svc_putnl(resv, 0);
855 rqstp->rq_flavor = RPC_AUTH_UNIX;
856 return SVC_OK;
858 badcred:
859 *authp = rpc_autherr_badcred;
860 return SVC_DENIED;
863 static int
864 svcauth_unix_release(struct svc_rqst *rqstp)
866 /* Verifier (such as it is) is already in place.
868 if (rqstp->rq_client)
869 auth_domain_put(rqstp->rq_client);
870 rqstp->rq_client = NULL;
871 if (rqstp->rq_cred.cr_group_info)
872 put_group_info(rqstp->rq_cred.cr_group_info);
873 rqstp->rq_cred.cr_group_info = NULL;
875 return 0;
879 struct auth_ops svcauth_unix = {
880 .name = "unix",
881 .owner = THIS_MODULE,
882 .flavour = RPC_AUTH_UNIX,
883 .accept = svcauth_unix_accept,
884 .release = svcauth_unix_release,
885 .domain_release = svcauth_unix_domain_release,
886 .set_client = svcauth_unix_set_client,