atmel_serial: fix spinlock lockup in RS485 code
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / sunrpc / svcauth_unix.c
blobce136323da8b1540f8618348b851c86d7b8a6369
1 #include <linux/types.h>
2 #include <linux/sched.h>
3 #include <linux/module.h>
4 #include <linux/sunrpc/types.h>
5 #include <linux/sunrpc/xdr.h>
6 #include <linux/sunrpc/svcsock.h>
7 #include <linux/sunrpc/svcauth.h>
8 #include <linux/sunrpc/gss_api.h>
9 #include <linux/err.h>
10 #include <linux/seq_file.h>
11 #include <linux/hash.h>
12 #include <linux/string.h>
13 #include <linux/slab.h>
14 #include <net/sock.h>
15 #include <net/ipv6.h>
16 #include <linux/kernel.h>
17 #define RPCDBG_FACILITY RPCDBG_AUTH
19 #include <linux/sunrpc/clnt.h>
21 #include "netns.h"
24 * AUTHUNIX and AUTHNULL credentials are both handled here.
25 * AUTHNULL is treated just like AUTHUNIX except that the uid/gid
26 * are always nobody (-2). i.e. we do the same IP address checks for
27 * AUTHNULL as for AUTHUNIX, and that is done here.
31 struct unix_domain {
32 struct auth_domain h;
33 /* other stuff later */
36 extern struct auth_ops svcauth_null;
37 extern struct auth_ops svcauth_unix;
39 static void svcauth_unix_domain_release(struct auth_domain *dom)
41 struct unix_domain *ud = container_of(dom, struct unix_domain, h);
43 kfree(dom->name);
44 kfree(ud);
47 struct auth_domain *unix_domain_find(char *name)
49 struct auth_domain *rv;
50 struct unix_domain *new = NULL;
52 rv = auth_domain_lookup(name, NULL);
53 while(1) {
54 if (rv) {
55 if (new && rv != &new->h)
56 svcauth_unix_domain_release(&new->h);
58 if (rv->flavour != &svcauth_unix) {
59 auth_domain_put(rv);
60 return NULL;
62 return rv;
65 new = kmalloc(sizeof(*new), GFP_KERNEL);
66 if (new == NULL)
67 return NULL;
68 kref_init(&new->h.ref);
69 new->h.name = kstrdup(name, GFP_KERNEL);
70 if (new->h.name == NULL) {
71 kfree(new);
72 return NULL;
74 new->h.flavour = &svcauth_unix;
75 rv = auth_domain_lookup(name, &new->h);
78 EXPORT_SYMBOL_GPL(unix_domain_find);
81 /**************************************************
82 * cache for IP address to unix_domain
83 * as needed by AUTH_UNIX
85 #define IP_HASHBITS 8
86 #define IP_HASHMAX (1<<IP_HASHBITS)
88 struct ip_map {
89 struct cache_head h;
90 char m_class[8]; /* e.g. "nfsd" */
91 struct in6_addr m_addr;
92 struct unix_domain *m_client;
95 static void ip_map_put(struct kref *kref)
97 struct cache_head *item = container_of(kref, struct cache_head, ref);
98 struct ip_map *im = container_of(item, struct ip_map,h);
100 if (test_bit(CACHE_VALID, &item->flags) &&
101 !test_bit(CACHE_NEGATIVE, &item->flags))
102 auth_domain_put(&im->m_client->h);
103 kfree(im);
106 #if IP_HASHBITS == 8
107 /* hash_long on a 64 bit machine is currently REALLY BAD for
108 * IP addresses in reverse-endian (i.e. on a little-endian machine).
109 * So use a trivial but reliable hash instead
111 static inline int hash_ip(__be32 ip)
113 int hash = (__force u32)ip ^ ((__force u32)ip>>16);
114 return (hash ^ (hash>>8)) & 0xff;
116 #endif
117 static inline int hash_ip6(struct in6_addr ip)
119 return (hash_ip(ip.s6_addr32[0]) ^
120 hash_ip(ip.s6_addr32[1]) ^
121 hash_ip(ip.s6_addr32[2]) ^
122 hash_ip(ip.s6_addr32[3]));
124 static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
126 struct ip_map *orig = container_of(corig, struct ip_map, h);
127 struct ip_map *new = container_of(cnew, struct ip_map, h);
128 return strcmp(orig->m_class, new->m_class) == 0 &&
129 ipv6_addr_equal(&orig->m_addr, &new->m_addr);
131 static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
133 struct ip_map *new = container_of(cnew, struct ip_map, h);
134 struct ip_map *item = container_of(citem, struct ip_map, h);
136 strcpy(new->m_class, item->m_class);
137 ipv6_addr_copy(&new->m_addr, &item->m_addr);
139 static void update(struct cache_head *cnew, struct cache_head *citem)
141 struct ip_map *new = container_of(cnew, struct ip_map, h);
142 struct ip_map *item = container_of(citem, struct ip_map, h);
144 kref_get(&item->m_client->h.ref);
145 new->m_client = item->m_client;
147 static struct cache_head *ip_map_alloc(void)
149 struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL);
150 if (i)
151 return &i->h;
152 else
153 return NULL;
156 static void ip_map_request(struct cache_detail *cd,
157 struct cache_head *h,
158 char **bpp, int *blen)
160 char text_addr[40];
161 struct ip_map *im = container_of(h, struct ip_map, h);
163 if (ipv6_addr_v4mapped(&(im->m_addr))) {
164 snprintf(text_addr, 20, "%pI4", &im->m_addr.s6_addr32[3]);
165 } else {
166 snprintf(text_addr, 40, "%pI6", &im->m_addr);
168 qword_add(bpp, blen, im->m_class);
169 qword_add(bpp, blen, text_addr);
170 (*bpp)[-1] = '\n';
173 static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h)
175 return sunrpc_cache_pipe_upcall(cd, h, ip_map_request);
178 static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr);
179 static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
181 static int ip_map_parse(struct cache_detail *cd,
182 char *mesg, int mlen)
184 /* class ipaddress [domainname] */
185 /* should be safe just to use the start of the input buffer
186 * for scratch: */
187 char *buf = mesg;
188 int len;
189 char class[8];
190 union {
191 struct sockaddr sa;
192 struct sockaddr_in s4;
193 struct sockaddr_in6 s6;
194 } address;
195 struct sockaddr_in6 sin6;
196 int err;
198 struct ip_map *ipmp;
199 struct auth_domain *dom;
200 time_t expiry;
202 if (mesg[mlen-1] != '\n')
203 return -EINVAL;
204 mesg[mlen-1] = 0;
206 /* class */
207 len = qword_get(&mesg, class, sizeof(class));
208 if (len <= 0) return -EINVAL;
210 /* ip address */
211 len = qword_get(&mesg, buf, mlen);
212 if (len <= 0) return -EINVAL;
214 if (rpc_pton(buf, len, &address.sa, sizeof(address)) == 0)
215 return -EINVAL;
216 switch (address.sa.sa_family) {
217 case AF_INET:
218 /* Form a mapped IPv4 address in sin6 */
219 sin6.sin6_family = AF_INET6;
220 ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr,
221 &sin6.sin6_addr);
222 break;
223 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
224 case AF_INET6:
225 memcpy(&sin6, &address.s6, sizeof(sin6));
226 break;
227 #endif
228 default:
229 return -EINVAL;
232 expiry = get_expiry(&mesg);
233 if (expiry ==0)
234 return -EINVAL;
236 /* domainname, or empty for NEGATIVE */
237 len = qword_get(&mesg, buf, mlen);
238 if (len < 0) return -EINVAL;
240 if (len) {
241 dom = unix_domain_find(buf);
242 if (dom == NULL)
243 return -ENOENT;
244 } else
245 dom = NULL;
247 /* IPv6 scope IDs are ignored for now */
248 ipmp = __ip_map_lookup(cd, class, &sin6.sin6_addr);
249 if (ipmp) {
250 err = __ip_map_update(cd, ipmp,
251 container_of(dom, struct unix_domain, h),
252 expiry);
253 } else
254 err = -ENOMEM;
256 if (dom)
257 auth_domain_put(dom);
259 cache_flush();
260 return err;
263 static int ip_map_show(struct seq_file *m,
264 struct cache_detail *cd,
265 struct cache_head *h)
267 struct ip_map *im;
268 struct in6_addr addr;
269 char *dom = "-no-domain-";
271 if (h == NULL) {
272 seq_puts(m, "#class IP domain\n");
273 return 0;
275 im = container_of(h, struct ip_map, h);
276 /* class addr domain */
277 ipv6_addr_copy(&addr, &im->m_addr);
279 if (test_bit(CACHE_VALID, &h->flags) &&
280 !test_bit(CACHE_NEGATIVE, &h->flags))
281 dom = im->m_client->h.name;
283 if (ipv6_addr_v4mapped(&addr)) {
284 seq_printf(m, "%s %pI4 %s\n",
285 im->m_class, &addr.s6_addr32[3], dom);
286 } else {
287 seq_printf(m, "%s %pI6 %s\n", im->m_class, &addr, dom);
289 return 0;
293 static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class,
294 struct in6_addr *addr)
296 struct ip_map ip;
297 struct cache_head *ch;
299 strcpy(ip.m_class, class);
300 ipv6_addr_copy(&ip.m_addr, addr);
301 ch = sunrpc_cache_lookup(cd, &ip.h,
302 hash_str(class, IP_HASHBITS) ^
303 hash_ip6(*addr));
305 if (ch)
306 return container_of(ch, struct ip_map, h);
307 else
308 return NULL;
311 static inline struct ip_map *ip_map_lookup(struct net *net, char *class,
312 struct in6_addr *addr)
314 struct sunrpc_net *sn;
316 sn = net_generic(net, sunrpc_net_id);
317 return __ip_map_lookup(sn->ip_map_cache, class, addr);
320 static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
321 struct unix_domain *udom, time_t expiry)
323 struct ip_map ip;
324 struct cache_head *ch;
326 ip.m_client = udom;
327 ip.h.flags = 0;
328 if (!udom)
329 set_bit(CACHE_NEGATIVE, &ip.h.flags);
330 ip.h.expiry_time = expiry;
331 ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
332 hash_str(ipm->m_class, IP_HASHBITS) ^
333 hash_ip6(ipm->m_addr));
334 if (!ch)
335 return -ENOMEM;
336 cache_put(ch, cd);
337 return 0;
340 static inline int ip_map_update(struct net *net, struct ip_map *ipm,
341 struct unix_domain *udom, time_t expiry)
343 struct sunrpc_net *sn;
345 sn = net_generic(net, sunrpc_net_id);
346 return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
350 void svcauth_unix_purge(void)
352 struct net *net;
354 for_each_net(net) {
355 struct sunrpc_net *sn;
357 sn = net_generic(net, sunrpc_net_id);
358 cache_purge(sn->ip_map_cache);
361 EXPORT_SYMBOL_GPL(svcauth_unix_purge);
363 static inline struct ip_map *
364 ip_map_cached_get(struct svc_xprt *xprt)
366 struct ip_map *ipm = NULL;
367 struct sunrpc_net *sn;
369 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
370 spin_lock(&xprt->xpt_lock);
371 ipm = xprt->xpt_auth_cache;
372 if (ipm != NULL) {
373 if (!cache_valid(&ipm->h)) {
375 * The entry has been invalidated since it was
376 * remembered, e.g. by a second mount from the
377 * same IP address.
379 sn = net_generic(xprt->xpt_net, sunrpc_net_id);
380 xprt->xpt_auth_cache = NULL;
381 spin_unlock(&xprt->xpt_lock);
382 cache_put(&ipm->h, sn->ip_map_cache);
383 return NULL;
385 cache_get(&ipm->h);
387 spin_unlock(&xprt->xpt_lock);
389 return ipm;
392 static inline void
393 ip_map_cached_put(struct svc_xprt *xprt, struct ip_map *ipm)
395 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
396 spin_lock(&xprt->xpt_lock);
397 if (xprt->xpt_auth_cache == NULL) {
398 /* newly cached, keep the reference */
399 xprt->xpt_auth_cache = ipm;
400 ipm = NULL;
402 spin_unlock(&xprt->xpt_lock);
404 if (ipm) {
405 struct sunrpc_net *sn;
407 sn = net_generic(xprt->xpt_net, sunrpc_net_id);
408 cache_put(&ipm->h, sn->ip_map_cache);
412 void
413 svcauth_unix_info_release(struct svc_xprt *xpt)
415 struct ip_map *ipm;
417 ipm = xpt->xpt_auth_cache;
418 if (ipm != NULL) {
419 struct sunrpc_net *sn;
421 sn = net_generic(xpt->xpt_net, sunrpc_net_id);
422 cache_put(&ipm->h, sn->ip_map_cache);
426 /****************************************************************************
427 * auth.unix.gid cache
428 * simple cache to map a UID to a list of GIDs
429 * because AUTH_UNIX aka AUTH_SYS has a max of 16
431 #define GID_HASHBITS 8
432 #define GID_HASHMAX (1<<GID_HASHBITS)
434 struct unix_gid {
435 struct cache_head h;
436 uid_t uid;
437 struct group_info *gi;
439 static struct cache_head *gid_table[GID_HASHMAX];
441 static void unix_gid_put(struct kref *kref)
443 struct cache_head *item = container_of(kref, struct cache_head, ref);
444 struct unix_gid *ug = container_of(item, struct unix_gid, h);
445 if (test_bit(CACHE_VALID, &item->flags) &&
446 !test_bit(CACHE_NEGATIVE, &item->flags))
447 put_group_info(ug->gi);
448 kfree(ug);
451 static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew)
453 struct unix_gid *orig = container_of(corig, struct unix_gid, h);
454 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
455 return orig->uid == new->uid;
457 static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem)
459 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
460 struct unix_gid *item = container_of(citem, struct unix_gid, h);
461 new->uid = item->uid;
463 static void unix_gid_update(struct cache_head *cnew, struct cache_head *citem)
465 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
466 struct unix_gid *item = container_of(citem, struct unix_gid, h);
468 get_group_info(item->gi);
469 new->gi = item->gi;
471 static struct cache_head *unix_gid_alloc(void)
473 struct unix_gid *g = kmalloc(sizeof(*g), GFP_KERNEL);
474 if (g)
475 return &g->h;
476 else
477 return NULL;
480 static void unix_gid_request(struct cache_detail *cd,
481 struct cache_head *h,
482 char **bpp, int *blen)
484 char tuid[20];
485 struct unix_gid *ug = container_of(h, struct unix_gid, h);
487 snprintf(tuid, 20, "%u", ug->uid);
488 qword_add(bpp, blen, tuid);
489 (*bpp)[-1] = '\n';
492 static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h)
494 return sunrpc_cache_pipe_upcall(cd, h, unix_gid_request);
497 static struct unix_gid *unix_gid_lookup(uid_t uid);
498 extern struct cache_detail unix_gid_cache;
500 static int unix_gid_parse(struct cache_detail *cd,
501 char *mesg, int mlen)
503 /* uid expiry Ngid gid0 gid1 ... gidN-1 */
504 int uid;
505 int gids;
506 int rv;
507 int i;
508 int err;
509 time_t expiry;
510 struct unix_gid ug, *ugp;
512 if (mlen <= 0 || mesg[mlen-1] != '\n')
513 return -EINVAL;
514 mesg[mlen-1] = 0;
516 rv = get_int(&mesg, &uid);
517 if (rv)
518 return -EINVAL;
519 ug.uid = uid;
521 expiry = get_expiry(&mesg);
522 if (expiry == 0)
523 return -EINVAL;
525 rv = get_int(&mesg, &gids);
526 if (rv || gids < 0 || gids > 8192)
527 return -EINVAL;
529 ug.gi = groups_alloc(gids);
530 if (!ug.gi)
531 return -ENOMEM;
533 for (i = 0 ; i < gids ; i++) {
534 int gid;
535 rv = get_int(&mesg, &gid);
536 err = -EINVAL;
537 if (rv)
538 goto out;
539 GROUP_AT(ug.gi, i) = gid;
542 ugp = unix_gid_lookup(uid);
543 if (ugp) {
544 struct cache_head *ch;
545 ug.h.flags = 0;
546 ug.h.expiry_time = expiry;
547 ch = sunrpc_cache_update(&unix_gid_cache,
548 &ug.h, &ugp->h,
549 hash_long(uid, GID_HASHBITS));
550 if (!ch)
551 err = -ENOMEM;
552 else {
553 err = 0;
554 cache_put(ch, &unix_gid_cache);
556 } else
557 err = -ENOMEM;
558 out:
559 if (ug.gi)
560 put_group_info(ug.gi);
561 return err;
564 static int unix_gid_show(struct seq_file *m,
565 struct cache_detail *cd,
566 struct cache_head *h)
568 struct unix_gid *ug;
569 int i;
570 int glen;
572 if (h == NULL) {
573 seq_puts(m, "#uid cnt: gids...\n");
574 return 0;
576 ug = container_of(h, struct unix_gid, h);
577 if (test_bit(CACHE_VALID, &h->flags) &&
578 !test_bit(CACHE_NEGATIVE, &h->flags))
579 glen = ug->gi->ngroups;
580 else
581 glen = 0;
583 seq_printf(m, "%u %d:", ug->uid, glen);
584 for (i = 0; i < glen; i++)
585 seq_printf(m, " %d", GROUP_AT(ug->gi, i));
586 seq_printf(m, "\n");
587 return 0;
590 struct cache_detail unix_gid_cache = {
591 .owner = THIS_MODULE,
592 .hash_size = GID_HASHMAX,
593 .hash_table = gid_table,
594 .name = "auth.unix.gid",
595 .cache_put = unix_gid_put,
596 .cache_upcall = unix_gid_upcall,
597 .cache_parse = unix_gid_parse,
598 .cache_show = unix_gid_show,
599 .match = unix_gid_match,
600 .init = unix_gid_init,
601 .update = unix_gid_update,
602 .alloc = unix_gid_alloc,
605 static struct unix_gid *unix_gid_lookup(uid_t uid)
607 struct unix_gid ug;
608 struct cache_head *ch;
610 ug.uid = uid;
611 ch = sunrpc_cache_lookup(&unix_gid_cache, &ug.h,
612 hash_long(uid, GID_HASHBITS));
613 if (ch)
614 return container_of(ch, struct unix_gid, h);
615 else
616 return NULL;
619 static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp)
621 struct unix_gid *ug;
622 struct group_info *gi;
623 int ret;
625 ug = unix_gid_lookup(uid);
626 if (!ug)
627 return ERR_PTR(-EAGAIN);
628 ret = cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle);
629 switch (ret) {
630 case -ENOENT:
631 return ERR_PTR(-ENOENT);
632 case -ETIMEDOUT:
633 return ERR_PTR(-ESHUTDOWN);
634 case 0:
635 gi = get_group_info(ug->gi);
636 cache_put(&ug->h, &unix_gid_cache);
637 return gi;
638 default:
639 return ERR_PTR(-EAGAIN);
644 svcauth_unix_set_client(struct svc_rqst *rqstp)
646 struct sockaddr_in *sin;
647 struct sockaddr_in6 *sin6, sin6_storage;
648 struct ip_map *ipm;
649 struct group_info *gi;
650 struct svc_cred *cred = &rqstp->rq_cred;
651 struct svc_xprt *xprt = rqstp->rq_xprt;
652 struct net *net = xprt->xpt_net;
653 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
655 switch (rqstp->rq_addr.ss_family) {
656 case AF_INET:
657 sin = svc_addr_in(rqstp);
658 sin6 = &sin6_storage;
659 ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &sin6->sin6_addr);
660 break;
661 case AF_INET6:
662 sin6 = svc_addr_in6(rqstp);
663 break;
664 default:
665 BUG();
668 rqstp->rq_client = NULL;
669 if (rqstp->rq_proc == 0)
670 return SVC_OK;
672 ipm = ip_map_cached_get(xprt);
673 if (ipm == NULL)
674 ipm = __ip_map_lookup(sn->ip_map_cache, rqstp->rq_server->sv_program->pg_class,
675 &sin6->sin6_addr);
677 if (ipm == NULL)
678 return SVC_DENIED;
680 switch (cache_check(sn->ip_map_cache, &ipm->h, &rqstp->rq_chandle)) {
681 default:
682 BUG();
683 case -ETIMEDOUT:
684 return SVC_CLOSE;
685 case -EAGAIN:
686 return SVC_DROP;
687 case -ENOENT:
688 return SVC_DENIED;
689 case 0:
690 rqstp->rq_client = &ipm->m_client->h;
691 kref_get(&rqstp->rq_client->ref);
692 ip_map_cached_put(xprt, ipm);
693 break;
696 gi = unix_gid_find(cred->cr_uid, rqstp);
697 switch (PTR_ERR(gi)) {
698 case -EAGAIN:
699 return SVC_DROP;
700 case -ESHUTDOWN:
701 return SVC_CLOSE;
702 case -ENOENT:
703 break;
704 default:
705 put_group_info(cred->cr_group_info);
706 cred->cr_group_info = gi;
708 return SVC_OK;
711 EXPORT_SYMBOL_GPL(svcauth_unix_set_client);
713 static int
714 svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
716 struct kvec *argv = &rqstp->rq_arg.head[0];
717 struct kvec *resv = &rqstp->rq_res.head[0];
718 struct svc_cred *cred = &rqstp->rq_cred;
720 cred->cr_group_info = NULL;
721 rqstp->rq_client = NULL;
723 if (argv->iov_len < 3*4)
724 return SVC_GARBAGE;
726 if (svc_getu32(argv) != 0) {
727 dprintk("svc: bad null cred\n");
728 *authp = rpc_autherr_badcred;
729 return SVC_DENIED;
731 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
732 dprintk("svc: bad null verf\n");
733 *authp = rpc_autherr_badverf;
734 return SVC_DENIED;
737 /* Signal that mapping to nobody uid/gid is required */
738 cred->cr_uid = (uid_t) -1;
739 cred->cr_gid = (gid_t) -1;
740 cred->cr_group_info = groups_alloc(0);
741 if (cred->cr_group_info == NULL)
742 return SVC_CLOSE; /* kmalloc failure - client must retry */
744 /* Put NULL verifier */
745 svc_putnl(resv, RPC_AUTH_NULL);
746 svc_putnl(resv, 0);
748 rqstp->rq_flavor = RPC_AUTH_NULL;
749 return SVC_OK;
752 static int
753 svcauth_null_release(struct svc_rqst *rqstp)
755 if (rqstp->rq_client)
756 auth_domain_put(rqstp->rq_client);
757 rqstp->rq_client = NULL;
758 if (rqstp->rq_cred.cr_group_info)
759 put_group_info(rqstp->rq_cred.cr_group_info);
760 rqstp->rq_cred.cr_group_info = NULL;
762 return 0; /* don't drop */
766 struct auth_ops svcauth_null = {
767 .name = "null",
768 .owner = THIS_MODULE,
769 .flavour = RPC_AUTH_NULL,
770 .accept = svcauth_null_accept,
771 .release = svcauth_null_release,
772 .set_client = svcauth_unix_set_client,
776 static int
777 svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
779 struct kvec *argv = &rqstp->rq_arg.head[0];
780 struct kvec *resv = &rqstp->rq_res.head[0];
781 struct svc_cred *cred = &rqstp->rq_cred;
782 u32 slen, i;
783 int len = argv->iov_len;
785 cred->cr_group_info = NULL;
786 rqstp->rq_client = NULL;
788 if ((len -= 3*4) < 0)
789 return SVC_GARBAGE;
791 svc_getu32(argv); /* length */
792 svc_getu32(argv); /* time stamp */
793 slen = XDR_QUADLEN(svc_getnl(argv)); /* machname length */
794 if (slen > 64 || (len -= (slen + 3)*4) < 0)
795 goto badcred;
796 argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */
797 argv->iov_len -= slen*4;
799 cred->cr_uid = svc_getnl(argv); /* uid */
800 cred->cr_gid = svc_getnl(argv); /* gid */
801 slen = svc_getnl(argv); /* gids length */
802 if (slen > 16 || (len -= (slen + 2)*4) < 0)
803 goto badcred;
804 cred->cr_group_info = groups_alloc(slen);
805 if (cred->cr_group_info == NULL)
806 return SVC_CLOSE;
807 for (i = 0; i < slen; i++)
808 GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
809 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
810 *authp = rpc_autherr_badverf;
811 return SVC_DENIED;
814 /* Put NULL verifier */
815 svc_putnl(resv, RPC_AUTH_NULL);
816 svc_putnl(resv, 0);
818 rqstp->rq_flavor = RPC_AUTH_UNIX;
819 return SVC_OK;
821 badcred:
822 *authp = rpc_autherr_badcred;
823 return SVC_DENIED;
826 static int
827 svcauth_unix_release(struct svc_rqst *rqstp)
829 /* Verifier (such as it is) is already in place.
831 if (rqstp->rq_client)
832 auth_domain_put(rqstp->rq_client);
833 rqstp->rq_client = NULL;
834 if (rqstp->rq_cred.cr_group_info)
835 put_group_info(rqstp->rq_cred.cr_group_info);
836 rqstp->rq_cred.cr_group_info = NULL;
838 return 0;
842 struct auth_ops svcauth_unix = {
843 .name = "unix",
844 .owner = THIS_MODULE,
845 .flavour = RPC_AUTH_UNIX,
846 .accept = svcauth_unix_accept,
847 .release = svcauth_unix_release,
848 .domain_release = svcauth_unix_domain_release,
849 .set_client = svcauth_unix_set_client,
852 int ip_map_cache_create(struct net *net)
854 int err = -ENOMEM;
855 struct cache_detail *cd;
856 struct cache_head **tbl;
857 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
859 cd = kzalloc(sizeof(struct cache_detail), GFP_KERNEL);
860 if (cd == NULL)
861 goto err_cd;
863 tbl = kzalloc(IP_HASHMAX * sizeof(struct cache_head *), GFP_KERNEL);
864 if (tbl == NULL)
865 goto err_tbl;
867 cd->owner = THIS_MODULE,
868 cd->hash_size = IP_HASHMAX,
869 cd->hash_table = tbl,
870 cd->name = "auth.unix.ip",
871 cd->cache_put = ip_map_put,
872 cd->cache_upcall = ip_map_upcall,
873 cd->cache_parse = ip_map_parse,
874 cd->cache_show = ip_map_show,
875 cd->match = ip_map_match,
876 cd->init = ip_map_init,
877 cd->update = update,
878 cd->alloc = ip_map_alloc,
880 err = cache_register_net(cd, net);
881 if (err)
882 goto err_reg;
884 sn->ip_map_cache = cd;
885 return 0;
887 err_reg:
888 kfree(tbl);
889 err_tbl:
890 kfree(cd);
891 err_cd:
892 return err;
895 void ip_map_cache_destroy(struct net *net)
897 struct sunrpc_net *sn;
899 sn = net_generic(net, sunrpc_net_id);
900 cache_purge(sn->ip_map_cache);
901 cache_unregister_net(sn->ip_map_cache, net);
902 kfree(sn->ip_map_cache->hash_table);
903 kfree(sn->ip_map_cache);