1 #include <linux/types.h>
2 #include <linux/sched.h>
3 #include <linux/module.h>
4 #include <linux/sunrpc/types.h>
5 #include <linux/sunrpc/xdr.h>
6 #include <linux/sunrpc/svcsock.h>
7 #include <linux/sunrpc/svcauth.h>
8 #include <linux/sunrpc/gss_api.h>
10 #include <linux/seq_file.h>
11 #include <linux/hash.h>
12 #include <linux/string.h>
15 #include <linux/kernel.h>
16 #define RPCDBG_FACILITY RPCDBG_AUTH
20 * AUTHUNIX and AUTHNULL credentials are both handled here.
21 * AUTHNULL is treated just like AUTHUNIX except that the uid/gid
22 * are always nobody (-2). i.e. we do the same IP address checks for
23 * AUTHNULL as for AUTHUNIX, and that is done here.
30 /* other stuff later */
33 extern struct auth_ops svcauth_unix
;
35 struct auth_domain
*unix_domain_find(char *name
)
37 struct auth_domain
*rv
;
38 struct unix_domain
*new = NULL
;
40 rv
= auth_domain_lookup(name
, NULL
);
43 if (new && rv
!= &new->h
)
44 auth_domain_put(&new->h
);
46 if (rv
->flavour
!= &svcauth_unix
) {
53 new = kmalloc(sizeof(*new), GFP_KERNEL
);
56 kref_init(&new->h
.ref
);
57 new->h
.name
= kstrdup(name
, GFP_KERNEL
);
58 if (new->h
.name
== NULL
) {
62 new->h
.flavour
= &svcauth_unix
;
63 new->addr_changes
= 0;
64 rv
= auth_domain_lookup(name
, &new->h
);
67 EXPORT_SYMBOL_GPL(unix_domain_find
);
69 static void svcauth_unix_domain_release(struct auth_domain
*dom
)
71 struct unix_domain
*ud
= container_of(dom
, struct unix_domain
, h
);
78 /**************************************************
79 * cache for IP address to unix_domain
80 * as needed by AUTH_UNIX
83 #define IP_HASHMAX (1<<IP_HASHBITS)
84 #define IP_HASHMASK (IP_HASHMAX-1)
88 char m_class
[8]; /* e.g. "nfsd" */
89 struct in6_addr m_addr
;
90 struct unix_domain
*m_client
;
93 static struct cache_head
*ip_table
[IP_HASHMAX
];
95 static void ip_map_put(struct kref
*kref
)
97 struct cache_head
*item
= container_of(kref
, struct cache_head
, ref
);
98 struct ip_map
*im
= container_of(item
, struct ip_map
,h
);
100 if (test_bit(CACHE_VALID
, &item
->flags
) &&
101 !test_bit(CACHE_NEGATIVE
, &item
->flags
))
102 auth_domain_put(&im
->m_client
->h
);
107 /* hash_long on a 64 bit machine is currently REALLY BAD for
108 * IP addresses in reverse-endian (i.e. on a little-endian machine).
109 * So use a trivial but reliable hash instead
111 static inline int hash_ip(__be32 ip
)
113 int hash
= (__force u32
)ip
^ ((__force u32
)ip
>>16);
114 return (hash
^ (hash
>>8)) & 0xff;
117 static inline int hash_ip6(struct in6_addr ip
)
119 return (hash_ip(ip
.s6_addr32
[0]) ^
120 hash_ip(ip
.s6_addr32
[1]) ^
121 hash_ip(ip
.s6_addr32
[2]) ^
122 hash_ip(ip
.s6_addr32
[3]));
124 static int ip_map_match(struct cache_head
*corig
, struct cache_head
*cnew
)
126 struct ip_map
*orig
= container_of(corig
, struct ip_map
, h
);
127 struct ip_map
*new = container_of(cnew
, struct ip_map
, h
);
128 return strcmp(orig
->m_class
, new->m_class
) == 0
129 && ipv6_addr_equal(&orig
->m_addr
, &new->m_addr
);
131 static void ip_map_init(struct cache_head
*cnew
, struct cache_head
*citem
)
133 struct ip_map
*new = container_of(cnew
, struct ip_map
, h
);
134 struct ip_map
*item
= container_of(citem
, struct ip_map
, h
);
136 strcpy(new->m_class
, item
->m_class
);
137 ipv6_addr_copy(&new->m_addr
, &item
->m_addr
);
139 static void update(struct cache_head
*cnew
, struct cache_head
*citem
)
141 struct ip_map
*new = container_of(cnew
, struct ip_map
, h
);
142 struct ip_map
*item
= container_of(citem
, struct ip_map
, h
);
144 kref_get(&item
->m_client
->h
.ref
);
145 new->m_client
= item
->m_client
;
146 new->m_add_change
= item
->m_add_change
;
148 static struct cache_head
*ip_map_alloc(void)
150 struct ip_map
*i
= kmalloc(sizeof(*i
), GFP_KERNEL
);
157 static void ip_map_request(struct cache_detail
*cd
,
158 struct cache_head
*h
,
159 char **bpp
, int *blen
)
162 struct ip_map
*im
= container_of(h
, struct ip_map
, h
);
164 if (ipv6_addr_v4mapped(&(im
->m_addr
))) {
165 snprintf(text_addr
, 20, "%pI4", &im
->m_addr
.s6_addr32
[3]);
167 snprintf(text_addr
, 40, "%pI6", &im
->m_addr
);
169 qword_add(bpp
, blen
, im
->m_class
);
170 qword_add(bpp
, blen
, text_addr
);
174 static struct ip_map
*ip_map_lookup(char *class, struct in6_addr
*addr
);
175 static int ip_map_update(struct ip_map
*ipm
, struct unix_domain
*udom
, time_t expiry
);
177 static int ip_map_parse(struct cache_detail
*cd
,
178 char *mesg
, int mlen
)
180 /* class ipaddress [domainname] */
181 /* should be safe just to use the start of the input buffer
185 int b1
, b2
, b3
, b4
, b5
, b6
, b7
, b8
;
188 struct in6_addr addr
;
192 struct auth_domain
*dom
;
195 if (mesg
[mlen
-1] != '\n')
200 len
= qword_get(&mesg
, class, sizeof(class));
201 if (len
<= 0) return -EINVAL
;
204 len
= qword_get(&mesg
, buf
, mlen
);
205 if (len
<= 0) return -EINVAL
;
207 if (sscanf(buf
, "%u.%u.%u.%u%c", &b1
, &b2
, &b3
, &b4
, &c
) == 4) {
208 addr
.s6_addr32
[0] = 0;
209 addr
.s6_addr32
[1] = 0;
210 addr
.s6_addr32
[2] = htonl(0xffff);
212 htonl((((((b1
<<8)|b2
)<<8)|b3
)<<8)|b4
);
213 } else if (sscanf(buf
, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x%c",
214 &b1
, &b2
, &b3
, &b4
, &b5
, &b6
, &b7
, &b8
, &c
) == 8) {
215 addr
.s6_addr16
[0] = htons(b1
);
216 addr
.s6_addr16
[1] = htons(b2
);
217 addr
.s6_addr16
[2] = htons(b3
);
218 addr
.s6_addr16
[3] = htons(b4
);
219 addr
.s6_addr16
[4] = htons(b5
);
220 addr
.s6_addr16
[5] = htons(b6
);
221 addr
.s6_addr16
[6] = htons(b7
);
222 addr
.s6_addr16
[7] = htons(b8
);
226 expiry
= get_expiry(&mesg
);
230 /* domainname, or empty for NEGATIVE */
231 len
= qword_get(&mesg
, buf
, mlen
);
232 if (len
< 0) return -EINVAL
;
235 dom
= unix_domain_find(buf
);
241 ipmp
= ip_map_lookup(class, &addr
);
243 err
= ip_map_update(ipmp
,
244 container_of(dom
, struct unix_domain
, h
),
250 auth_domain_put(dom
);
256 static int ip_map_show(struct seq_file
*m
,
257 struct cache_detail
*cd
,
258 struct cache_head
*h
)
261 struct in6_addr addr
;
262 char *dom
= "-no-domain-";
265 seq_puts(m
, "#class IP domain\n");
268 im
= container_of(h
, struct ip_map
, h
);
269 /* class addr domain */
270 ipv6_addr_copy(&addr
, &im
->m_addr
);
272 if (test_bit(CACHE_VALID
, &h
->flags
) &&
273 !test_bit(CACHE_NEGATIVE
, &h
->flags
))
274 dom
= im
->m_client
->h
.name
;
276 if (ipv6_addr_v4mapped(&addr
)) {
277 seq_printf(m
, "%s %pI4 %s\n",
278 im
->m_class
, &addr
.s6_addr32
[3], dom
);
280 seq_printf(m
, "%s %pI6 %s\n", im
->m_class
, &addr
, dom
);
286 struct cache_detail ip_map_cache
= {
287 .owner
= THIS_MODULE
,
288 .hash_size
= IP_HASHMAX
,
289 .hash_table
= ip_table
,
290 .name
= "auth.unix.ip",
291 .cache_put
= ip_map_put
,
292 .cache_request
= ip_map_request
,
293 .cache_parse
= ip_map_parse
,
294 .cache_show
= ip_map_show
,
295 .match
= ip_map_match
,
298 .alloc
= ip_map_alloc
,
301 static struct ip_map
*ip_map_lookup(char *class, struct in6_addr
*addr
)
304 struct cache_head
*ch
;
306 strcpy(ip
.m_class
, class);
307 ipv6_addr_copy(&ip
.m_addr
, addr
);
308 ch
= sunrpc_cache_lookup(&ip_map_cache
, &ip
.h
,
309 hash_str(class, IP_HASHBITS
) ^
313 return container_of(ch
, struct ip_map
, h
);
318 static int ip_map_update(struct ip_map
*ipm
, struct unix_domain
*udom
, time_t expiry
)
321 struct cache_head
*ch
;
326 set_bit(CACHE_NEGATIVE
, &ip
.h
.flags
);
328 ip
.m_add_change
= udom
->addr_changes
;
329 /* if this is from the legacy set_client system call,
330 * we need m_add_change to be one higher
335 ip
.h
.expiry_time
= expiry
;
336 ch
= sunrpc_cache_update(&ip_map_cache
,
338 hash_str(ipm
->m_class
, IP_HASHBITS
) ^
339 hash_ip6(ipm
->m_addr
));
342 cache_put(ch
, &ip_map_cache
);
346 int auth_unix_add_addr(struct in6_addr
*addr
, struct auth_domain
*dom
)
348 struct unix_domain
*udom
;
351 if (dom
->flavour
!= &svcauth_unix
)
353 udom
= container_of(dom
, struct unix_domain
, h
);
354 ipmp
= ip_map_lookup("nfsd", addr
);
357 return ip_map_update(ipmp
, udom
, NEVER
);
361 EXPORT_SYMBOL_GPL(auth_unix_add_addr
);
363 int auth_unix_forget_old(struct auth_domain
*dom
)
365 struct unix_domain
*udom
;
367 if (dom
->flavour
!= &svcauth_unix
)
369 udom
= container_of(dom
, struct unix_domain
, h
);
370 udom
->addr_changes
++;
373 EXPORT_SYMBOL_GPL(auth_unix_forget_old
);
375 struct auth_domain
*auth_unix_lookup(struct in6_addr
*addr
)
378 struct auth_domain
*rv
;
380 ipm
= ip_map_lookup("nfsd", addr
);
384 if (cache_check(&ip_map_cache
, &ipm
->h
, NULL
))
387 if ((ipm
->m_client
->addr_changes
- ipm
->m_add_change
) >0) {
388 if (test_and_set_bit(CACHE_NEGATIVE
, &ipm
->h
.flags
) == 0)
389 auth_domain_put(&ipm
->m_client
->h
);
392 rv
= &ipm
->m_client
->h
;
395 cache_put(&ipm
->h
, &ip_map_cache
);
398 EXPORT_SYMBOL_GPL(auth_unix_lookup
);
400 void svcauth_unix_purge(void)
402 cache_purge(&ip_map_cache
);
404 EXPORT_SYMBOL_GPL(svcauth_unix_purge
);
406 static inline struct ip_map
*
407 ip_map_cached_get(struct svc_rqst
*rqstp
)
409 struct ip_map
*ipm
= NULL
;
410 struct svc_xprt
*xprt
= rqstp
->rq_xprt
;
412 if (test_bit(XPT_CACHE_AUTH
, &xprt
->xpt_flags
)) {
413 spin_lock(&xprt
->xpt_lock
);
414 ipm
= xprt
->xpt_auth_cache
;
416 if (!cache_valid(&ipm
->h
)) {
418 * The entry has been invalidated since it was
419 * remembered, e.g. by a second mount from the
422 xprt
->xpt_auth_cache
= NULL
;
423 spin_unlock(&xprt
->xpt_lock
);
424 cache_put(&ipm
->h
, &ip_map_cache
);
429 spin_unlock(&xprt
->xpt_lock
);
435 ip_map_cached_put(struct svc_rqst
*rqstp
, struct ip_map
*ipm
)
437 struct svc_xprt
*xprt
= rqstp
->rq_xprt
;
439 if (test_bit(XPT_CACHE_AUTH
, &xprt
->xpt_flags
)) {
440 spin_lock(&xprt
->xpt_lock
);
441 if (xprt
->xpt_auth_cache
== NULL
) {
442 /* newly cached, keep the reference */
443 xprt
->xpt_auth_cache
= ipm
;
446 spin_unlock(&xprt
->xpt_lock
);
449 cache_put(&ipm
->h
, &ip_map_cache
);
453 svcauth_unix_info_release(void *info
)
455 struct ip_map
*ipm
= info
;
456 cache_put(&ipm
->h
, &ip_map_cache
);
459 /****************************************************************************
460 * auth.unix.gid cache
461 * simple cache to map a UID to a list of GIDs
462 * because AUTH_UNIX aka AUTH_SYS has a max of 16
464 #define GID_HASHBITS 8
465 #define GID_HASHMAX (1<<GID_HASHBITS)
466 #define GID_HASHMASK (GID_HASHMAX - 1)
471 struct group_info
*gi
;
473 static struct cache_head
*gid_table
[GID_HASHMAX
];
475 static void unix_gid_put(struct kref
*kref
)
477 struct cache_head
*item
= container_of(kref
, struct cache_head
, ref
);
478 struct unix_gid
*ug
= container_of(item
, struct unix_gid
, h
);
479 if (test_bit(CACHE_VALID
, &item
->flags
) &&
480 !test_bit(CACHE_NEGATIVE
, &item
->flags
))
481 put_group_info(ug
->gi
);
485 static int unix_gid_match(struct cache_head
*corig
, struct cache_head
*cnew
)
487 struct unix_gid
*orig
= container_of(corig
, struct unix_gid
, h
);
488 struct unix_gid
*new = container_of(cnew
, struct unix_gid
, h
);
489 return orig
->uid
== new->uid
;
491 static void unix_gid_init(struct cache_head
*cnew
, struct cache_head
*citem
)
493 struct unix_gid
*new = container_of(cnew
, struct unix_gid
, h
);
494 struct unix_gid
*item
= container_of(citem
, struct unix_gid
, h
);
495 new->uid
= item
->uid
;
497 static void unix_gid_update(struct cache_head
*cnew
, struct cache_head
*citem
)
499 struct unix_gid
*new = container_of(cnew
, struct unix_gid
, h
);
500 struct unix_gid
*item
= container_of(citem
, struct unix_gid
, h
);
502 get_group_info(item
->gi
);
505 static struct cache_head
*unix_gid_alloc(void)
507 struct unix_gid
*g
= kmalloc(sizeof(*g
), GFP_KERNEL
);
514 static void unix_gid_request(struct cache_detail
*cd
,
515 struct cache_head
*h
,
516 char **bpp
, int *blen
)
519 struct unix_gid
*ug
= container_of(h
, struct unix_gid
, h
);
521 snprintf(tuid
, 20, "%u", ug
->uid
);
522 qword_add(bpp
, blen
, tuid
);
526 static struct unix_gid
*unix_gid_lookup(uid_t uid
);
527 extern struct cache_detail unix_gid_cache
;
529 static int unix_gid_parse(struct cache_detail
*cd
,
530 char *mesg
, int mlen
)
532 /* uid expiry Ngid gid0 gid1 ... gidN-1 */
539 struct unix_gid ug
, *ugp
;
541 if (mlen
<= 0 || mesg
[mlen
-1] != '\n')
545 rv
= get_int(&mesg
, &uid
);
550 expiry
= get_expiry(&mesg
);
554 rv
= get_int(&mesg
, &gids
);
555 if (rv
|| gids
< 0 || gids
> 8192)
558 ug
.gi
= groups_alloc(gids
);
562 for (i
= 0 ; i
< gids
; i
++) {
564 rv
= get_int(&mesg
, &gid
);
568 GROUP_AT(ug
.gi
, i
) = gid
;
571 ugp
= unix_gid_lookup(uid
);
573 struct cache_head
*ch
;
575 ug
.h
.expiry_time
= expiry
;
576 ch
= sunrpc_cache_update(&unix_gid_cache
,
578 hash_long(uid
, GID_HASHBITS
));
583 cache_put(ch
, &unix_gid_cache
);
589 put_group_info(ug
.gi
);
593 static int unix_gid_show(struct seq_file
*m
,
594 struct cache_detail
*cd
,
595 struct cache_head
*h
)
602 seq_puts(m
, "#uid cnt: gids...\n");
605 ug
= container_of(h
, struct unix_gid
, h
);
606 if (test_bit(CACHE_VALID
, &h
->flags
) &&
607 !test_bit(CACHE_NEGATIVE
, &h
->flags
))
608 glen
= ug
->gi
->ngroups
;
612 seq_printf(m
, "%d %d:", ug
->uid
, glen
);
613 for (i
= 0; i
< glen
; i
++)
614 seq_printf(m
, " %d", GROUP_AT(ug
->gi
, i
));
619 struct cache_detail unix_gid_cache
= {
620 .owner
= THIS_MODULE
,
621 .hash_size
= GID_HASHMAX
,
622 .hash_table
= gid_table
,
623 .name
= "auth.unix.gid",
624 .cache_put
= unix_gid_put
,
625 .cache_request
= unix_gid_request
,
626 .cache_parse
= unix_gid_parse
,
627 .cache_show
= unix_gid_show
,
628 .match
= unix_gid_match
,
629 .init
= unix_gid_init
,
630 .update
= unix_gid_update
,
631 .alloc
= unix_gid_alloc
,
634 static struct unix_gid
*unix_gid_lookup(uid_t uid
)
637 struct cache_head
*ch
;
640 ch
= sunrpc_cache_lookup(&unix_gid_cache
, &ug
.h
,
641 hash_long(uid
, GID_HASHBITS
));
643 return container_of(ch
, struct unix_gid
, h
);
648 static int unix_gid_find(uid_t uid
, struct group_info
**gip
,
649 struct svc_rqst
*rqstp
)
651 struct unix_gid
*ug
= unix_gid_lookup(uid
);
654 switch (cache_check(&unix_gid_cache
, &ug
->h
, &rqstp
->rq_chandle
)) {
660 get_group_info(*gip
);
668 svcauth_unix_set_client(struct svc_rqst
*rqstp
)
670 struct sockaddr_in
*sin
;
671 struct sockaddr_in6
*sin6
, sin6_storage
;
674 switch (rqstp
->rq_addr
.ss_family
) {
676 sin
= svc_addr_in(rqstp
);
677 sin6
= &sin6_storage
;
678 ipv6_addr_set(&sin6
->sin6_addr
, 0, 0,
679 htonl(0x0000FFFF), sin
->sin_addr
.s_addr
);
682 sin6
= svc_addr_in6(rqstp
);
688 rqstp
->rq_client
= NULL
;
689 if (rqstp
->rq_proc
== 0)
692 ipm
= ip_map_cached_get(rqstp
);
694 ipm
= ip_map_lookup(rqstp
->rq_server
->sv_program
->pg_class
,
700 switch (cache_check(&ip_map_cache
, &ipm
->h
, &rqstp
->rq_chandle
)) {
709 rqstp
->rq_client
= &ipm
->m_client
->h
;
710 kref_get(&rqstp
->rq_client
->ref
);
711 ip_map_cached_put(rqstp
, ipm
);
717 EXPORT_SYMBOL_GPL(svcauth_unix_set_client
);
720 svcauth_null_accept(struct svc_rqst
*rqstp
, __be32
*authp
)
722 struct kvec
*argv
= &rqstp
->rq_arg
.head
[0];
723 struct kvec
*resv
= &rqstp
->rq_res
.head
[0];
724 struct svc_cred
*cred
= &rqstp
->rq_cred
;
726 cred
->cr_group_info
= NULL
;
727 rqstp
->rq_client
= NULL
;
729 if (argv
->iov_len
< 3*4)
732 if (svc_getu32(argv
) != 0) {
733 dprintk("svc: bad null cred\n");
734 *authp
= rpc_autherr_badcred
;
737 if (svc_getu32(argv
) != htonl(RPC_AUTH_NULL
) || svc_getu32(argv
) != 0) {
738 dprintk("svc: bad null verf\n");
739 *authp
= rpc_autherr_badverf
;
743 /* Signal that mapping to nobody uid/gid is required */
744 cred
->cr_uid
= (uid_t
) -1;
745 cred
->cr_gid
= (gid_t
) -1;
746 cred
->cr_group_info
= groups_alloc(0);
747 if (cred
->cr_group_info
== NULL
)
748 return SVC_DROP
; /* kmalloc failure - client must retry */
750 /* Put NULL verifier */
751 svc_putnl(resv
, RPC_AUTH_NULL
);
754 rqstp
->rq_flavor
= RPC_AUTH_NULL
;
759 svcauth_null_release(struct svc_rqst
*rqstp
)
761 if (rqstp
->rq_client
)
762 auth_domain_put(rqstp
->rq_client
);
763 rqstp
->rq_client
= NULL
;
764 if (rqstp
->rq_cred
.cr_group_info
)
765 put_group_info(rqstp
->rq_cred
.cr_group_info
);
766 rqstp
->rq_cred
.cr_group_info
= NULL
;
768 return 0; /* don't drop */
772 struct auth_ops svcauth_null
= {
774 .owner
= THIS_MODULE
,
775 .flavour
= RPC_AUTH_NULL
,
776 .accept
= svcauth_null_accept
,
777 .release
= svcauth_null_release
,
778 .set_client
= svcauth_unix_set_client
,
783 svcauth_unix_accept(struct svc_rqst
*rqstp
, __be32
*authp
)
785 struct kvec
*argv
= &rqstp
->rq_arg
.head
[0];
786 struct kvec
*resv
= &rqstp
->rq_res
.head
[0];
787 struct svc_cred
*cred
= &rqstp
->rq_cred
;
789 int len
= argv
->iov_len
;
791 cred
->cr_group_info
= NULL
;
792 rqstp
->rq_client
= NULL
;
794 if ((len
-= 3*4) < 0)
797 svc_getu32(argv
); /* length */
798 svc_getu32(argv
); /* time stamp */
799 slen
= XDR_QUADLEN(svc_getnl(argv
)); /* machname length */
800 if (slen
> 64 || (len
-= (slen
+ 3)*4) < 0)
802 argv
->iov_base
= (void*)((__be32
*)argv
->iov_base
+ slen
); /* skip machname */
803 argv
->iov_len
-= slen
*4;
805 cred
->cr_uid
= svc_getnl(argv
); /* uid */
806 cred
->cr_gid
= svc_getnl(argv
); /* gid */
807 slen
= svc_getnl(argv
); /* gids length */
808 if (slen
> 16 || (len
-= (slen
+ 2)*4) < 0)
810 if (unix_gid_find(cred
->cr_uid
, &cred
->cr_group_info
, rqstp
)
813 if (cred
->cr_group_info
== NULL
) {
814 cred
->cr_group_info
= groups_alloc(slen
);
815 if (cred
->cr_group_info
== NULL
)
817 for (i
= 0; i
< slen
; i
++)
818 GROUP_AT(cred
->cr_group_info
, i
) = svc_getnl(argv
);
820 for (i
= 0; i
< slen
; i
++)
823 if (svc_getu32(argv
) != htonl(RPC_AUTH_NULL
) || svc_getu32(argv
) != 0) {
824 *authp
= rpc_autherr_badverf
;
828 /* Put NULL verifier */
829 svc_putnl(resv
, RPC_AUTH_NULL
);
832 rqstp
->rq_flavor
= RPC_AUTH_UNIX
;
836 *authp
= rpc_autherr_badcred
;
841 svcauth_unix_release(struct svc_rqst
*rqstp
)
843 /* Verifier (such as it is) is already in place.
845 if (rqstp
->rq_client
)
846 auth_domain_put(rqstp
->rq_client
);
847 rqstp
->rq_client
= NULL
;
848 if (rqstp
->rq_cred
.cr_group_info
)
849 put_group_info(rqstp
->rq_cred
.cr_group_info
);
850 rqstp
->rq_cred
.cr_group_info
= NULL
;
856 struct auth_ops svcauth_unix
= {
858 .owner
= THIS_MODULE
,
859 .flavour
= RPC_AUTH_UNIX
,
860 .accept
= svcauth_unix_accept
,
861 .release
= svcauth_unix_release
,
862 .domain_release
= svcauth_unix_domain_release
,
863 .set_client
= svcauth_unix_set_client
,