4 * Copyright (C) International Business Machines Corp., 2007,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Contains the routines for mapping CIFS/NTFS ACLs
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/keyctl.h>
28 #include <linux/key-type.h>
29 #include <keys/user-type.h>
33 #include "cifsproto.h"
34 #include "cifs_debug.h"
36 /* security id for everyone/world system group */
37 static const struct cifs_sid sid_everyone
= {
38 1, 1, {0, 0, 0, 0, 0, 1}, {0} };
39 /* security id for Authenticated Users system group */
40 static const struct cifs_sid sid_authusers
= {
41 1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(11)} };
43 static const struct cifs_sid sid_user
= {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
45 const struct cred
*root_cred
;
48 shrink_idmap_tree(struct rb_root
*root
, int nr_to_scan
, int *nr_rem
,
53 struct cifs_sid_id
*psidid
;
55 node
= rb_first(root
);
59 psidid
= rb_entry(tmp
, struct cifs_sid_id
, rbnode
);
60 if (nr_to_scan
== 0 || *nr_del
== nr_to_scan
)
63 if (time_after(jiffies
, psidid
->time
+ SID_MAP_EXPIRE
)
64 && psidid
->refcount
== 0) {
74 * Run idmap cache shrinker.
77 cifs_idmap_shrinker(struct shrinker
*shrink
, struct shrink_control
*sc
)
79 int nr_to_scan
= sc
->nr_to_scan
;
85 spin_lock(&siduidlock
);
86 shrink_idmap_tree(root
, nr_to_scan
, &nr_rem
, &nr_del
);
87 spin_unlock(&siduidlock
);
90 spin_lock(&sidgidlock
);
91 shrink_idmap_tree(root
, nr_to_scan
, &nr_rem
, &nr_del
);
92 spin_unlock(&sidgidlock
);
95 spin_lock(&uidsidlock
);
96 shrink_idmap_tree(root
, nr_to_scan
, &nr_rem
, &nr_del
);
97 spin_unlock(&uidsidlock
);
100 spin_lock(&gidsidlock
);
101 shrink_idmap_tree(root
, nr_to_scan
, &nr_rem
, &nr_del
);
102 spin_unlock(&gidsidlock
);
108 sid_rb_insert(struct rb_root
*root
, unsigned long cid
,
109 struct cifs_sid_id
**psidid
, char *typestr
)
112 struct rb_node
*node
= root
->rb_node
;
113 struct rb_node
*parent
= NULL
;
114 struct rb_node
**linkto
= &(root
->rb_node
);
115 struct cifs_sid_id
*lsidid
;
118 lsidid
= rb_entry(node
, struct cifs_sid_id
, rbnode
);
120 if (cid
> lsidid
->id
) {
121 linkto
= &(node
->rb_left
);
122 node
= node
->rb_left
;
124 if (cid
< lsidid
->id
) {
125 linkto
= &(node
->rb_right
);
126 node
= node
->rb_right
;
131 (*psidid
)->time
= jiffies
- (SID_MAP_RETRY
+ 1);
132 (*psidid
)->refcount
= 0;
134 sprintf((*psidid
)->sidstr
, "%s", typestr
);
135 strptr
= (*psidid
)->sidstr
+ strlen((*psidid
)->sidstr
);
136 sprintf(strptr
, "%ld", cid
);
138 clear_bit(SID_ID_PENDING
, &(*psidid
)->state
);
139 clear_bit(SID_ID_MAPPED
, &(*psidid
)->state
);
141 rb_link_node(&(*psidid
)->rbnode
, parent
, linkto
);
142 rb_insert_color(&(*psidid
)->rbnode
, root
);
145 static struct cifs_sid_id
*
146 sid_rb_search(struct rb_root
*root
, unsigned long cid
)
148 struct rb_node
*node
= root
->rb_node
;
149 struct cifs_sid_id
*lsidid
;
152 lsidid
= rb_entry(node
, struct cifs_sid_id
, rbnode
);
153 if (cid
> lsidid
->id
)
154 node
= node
->rb_left
;
155 else if (cid
< lsidid
->id
)
156 node
= node
->rb_right
;
157 else /* node found */
164 static struct shrinker cifs_shrinker
= {
165 .shrink
= cifs_idmap_shrinker
,
166 .seeks
= DEFAULT_SEEKS
,
170 cifs_idmap_key_instantiate(struct key
*key
, struct key_preparsed_payload
*prep
)
174 payload
= kmalloc(prep
->datalen
, GFP_KERNEL
);
178 memcpy(payload
, prep
->data
, prep
->datalen
);
179 key
->payload
.data
= payload
;
180 key
->datalen
= prep
->datalen
;
185 cifs_idmap_key_destroy(struct key
*key
)
187 kfree(key
->payload
.data
);
190 struct key_type cifs_idmap_key_type
= {
191 .name
= "cifs.idmap",
192 .instantiate
= cifs_idmap_key_instantiate
,
193 .destroy
= cifs_idmap_key_destroy
,
194 .describe
= user_describe
,
199 sid_to_str(struct cifs_sid
*sidptr
, char *sidstr
)
207 sprintf(strptr
, "%s", "S");
208 strptr
= sidstr
+ strlen(sidstr
);
210 sprintf(strptr
, "-%d", sidptr
->revision
);
211 strptr
= sidstr
+ strlen(sidstr
);
213 for (i
= 0; i
< 6; ++i
) {
214 if (sidptr
->authority
[i
]) {
215 sprintf(strptr
, "-%d", sidptr
->authority
[i
]);
216 strptr
= sidstr
+ strlen(sidstr
);
220 for (i
= 0; i
< sidptr
->num_subauth
; ++i
) {
221 saval
= le32_to_cpu(sidptr
->sub_auth
[i
]);
222 sprintf(strptr
, "-%ld", saval
);
223 strptr
= sidstr
+ strlen(sidstr
);
228 id_rb_insert(struct rb_root
*root
, struct cifs_sid
*sidptr
,
229 struct cifs_sid_id
**psidid
, char *typestr
)
233 struct rb_node
*node
= root
->rb_node
;
234 struct rb_node
*parent
= NULL
;
235 struct rb_node
**linkto
= &(root
->rb_node
);
236 struct cifs_sid_id
*lsidid
;
239 lsidid
= rb_entry(node
, struct cifs_sid_id
, rbnode
);
241 rc
= compare_sids(sidptr
, &((lsidid
)->sid
));
243 linkto
= &(node
->rb_left
);
244 node
= node
->rb_left
;
246 linkto
= &(node
->rb_right
);
247 node
= node
->rb_right
;
251 memcpy(&(*psidid
)->sid
, sidptr
, sizeof(struct cifs_sid
));
252 (*psidid
)->time
= jiffies
- (SID_MAP_RETRY
+ 1);
253 (*psidid
)->refcount
= 0;
255 sprintf((*psidid
)->sidstr
, "%s", typestr
);
256 strptr
= (*psidid
)->sidstr
+ strlen((*psidid
)->sidstr
);
257 sid_to_str(&(*psidid
)->sid
, strptr
);
259 clear_bit(SID_ID_PENDING
, &(*psidid
)->state
);
260 clear_bit(SID_ID_MAPPED
, &(*psidid
)->state
);
262 rb_link_node(&(*psidid
)->rbnode
, parent
, linkto
);
263 rb_insert_color(&(*psidid
)->rbnode
, root
);
266 static struct cifs_sid_id
*
267 id_rb_search(struct rb_root
*root
, struct cifs_sid
*sidptr
)
270 struct rb_node
*node
= root
->rb_node
;
271 struct cifs_sid_id
*lsidid
;
274 lsidid
= rb_entry(node
, struct cifs_sid_id
, rbnode
);
275 rc
= compare_sids(sidptr
, &((lsidid
)->sid
));
277 node
= node
->rb_left
;
279 node
= node
->rb_right
;
280 } else /* node found */
288 sidid_pending_wait(void *unused
)
291 return signal_pending(current
) ? -ERESTARTSYS
: 0;
295 id_to_sid(unsigned long cid
, uint sidtype
, struct cifs_sid
*ssid
)
299 const struct cred
*saved_cred
;
300 struct cifs_sid
*lsid
;
301 struct cifs_sid_id
*psidid
, *npsidid
;
302 struct rb_root
*cidtree
;
305 if (sidtype
== SIDOWNER
) {
306 cidlock
= &siduidlock
;
308 } else if (sidtype
== SIDGROUP
) {
309 cidlock
= &sidgidlock
;
315 psidid
= sid_rb_search(cidtree
, cid
);
317 if (!psidid
) { /* node does not exist, allocate one & attempt adding */
318 spin_unlock(cidlock
);
319 npsidid
= kzalloc(sizeof(struct cifs_sid_id
), GFP_KERNEL
);
323 npsidid
->sidstr
= kmalloc(SIDLEN
, GFP_KERNEL
);
324 if (!npsidid
->sidstr
) {
330 psidid
= sid_rb_search(cidtree
, cid
);
331 if (psidid
) { /* node happened to get inserted meanwhile */
333 spin_unlock(cidlock
);
334 kfree(npsidid
->sidstr
);
338 sid_rb_insert(cidtree
, cid
, &psidid
,
339 sidtype
== SIDOWNER
? "oi:" : "gi:");
341 spin_unlock(cidlock
);
345 spin_unlock(cidlock
);
349 * If we are here, it is safe to access psidid and its fields
350 * since a reference was taken earlier while holding the spinlock.
351 * A reference on the node is put without holding the spinlock
352 * and it is OK to do so in this case, shrinker will not erase
353 * this node until all references are put and we do not access
354 * any fields of the node after a reference is put .
356 if (test_bit(SID_ID_MAPPED
, &psidid
->state
)) {
357 memcpy(ssid
, &psidid
->sid
, sizeof(struct cifs_sid
));
358 psidid
->time
= jiffies
; /* update ts for accessing */
362 if (time_after(psidid
->time
+ SID_MAP_RETRY
, jiffies
)) {
367 if (!test_and_set_bit(SID_ID_PENDING
, &psidid
->state
)) {
368 saved_cred
= override_creds(root_cred
);
369 sidkey
= request_key(&cifs_idmap_key_type
, psidid
->sidstr
, "");
370 if (IS_ERR(sidkey
)) {
372 cFYI(1, "%s: Can't map and id to a SID", __func__
);
374 lsid
= (struct cifs_sid
*)sidkey
->payload
.data
;
375 memcpy(&psidid
->sid
, lsid
,
376 sidkey
->datalen
< sizeof(struct cifs_sid
) ?
377 sidkey
->datalen
: sizeof(struct cifs_sid
));
378 memcpy(ssid
, &psidid
->sid
,
379 sidkey
->datalen
< sizeof(struct cifs_sid
) ?
380 sidkey
->datalen
: sizeof(struct cifs_sid
));
381 set_bit(SID_ID_MAPPED
, &psidid
->state
);
383 kfree(psidid
->sidstr
);
385 psidid
->time
= jiffies
; /* update ts for accessing */
386 revert_creds(saved_cred
);
387 clear_bit(SID_ID_PENDING
, &psidid
->state
);
388 wake_up_bit(&psidid
->state
, SID_ID_PENDING
);
390 rc
= wait_on_bit(&psidid
->state
, SID_ID_PENDING
,
391 sidid_pending_wait
, TASK_INTERRUPTIBLE
);
393 cFYI(1, "%s: sidid_pending_wait interrupted %d",
398 if (test_bit(SID_ID_MAPPED
, &psidid
->state
))
399 memcpy(ssid
, &psidid
->sid
, sizeof(struct cifs_sid
));
409 sid_to_id(struct cifs_sb_info
*cifs_sb
, struct cifs_sid
*psid
,
410 struct cifs_fattr
*fattr
, uint sidtype
)
415 const struct cred
*saved_cred
;
416 struct cifs_sid_id
*psidid
, *npsidid
;
417 struct rb_root
*cidtree
;
420 if (sidtype
== SIDOWNER
) {
421 cid
= cifs_sb
->mnt_uid
; /* default uid, in case upcall fails */
422 cidlock
= &siduidlock
;
424 } else if (sidtype
== SIDGROUP
) {
425 cid
= cifs_sb
->mnt_gid
; /* default gid, in case upcall fails */
426 cidlock
= &sidgidlock
;
432 psidid
= id_rb_search(cidtree
, psid
);
434 if (!psidid
) { /* node does not exist, allocate one & attempt adding */
435 spin_unlock(cidlock
);
436 npsidid
= kzalloc(sizeof(struct cifs_sid_id
), GFP_KERNEL
);
440 npsidid
->sidstr
= kmalloc(SIDLEN
, GFP_KERNEL
);
441 if (!npsidid
->sidstr
) {
447 psidid
= id_rb_search(cidtree
, psid
);
448 if (psidid
) { /* node happened to get inserted meanwhile */
450 spin_unlock(cidlock
);
451 kfree(npsidid
->sidstr
);
455 id_rb_insert(cidtree
, psid
, &psidid
,
456 sidtype
== SIDOWNER
? "os:" : "gs:");
458 spin_unlock(cidlock
);
462 spin_unlock(cidlock
);
466 * If we are here, it is safe to access psidid and its fields
467 * since a reference was taken earlier while holding the spinlock.
468 * A reference on the node is put without holding the spinlock
469 * and it is OK to do so in this case, shrinker will not erase
470 * this node until all references are put and we do not access
471 * any fields of the node after a reference is put .
473 if (test_bit(SID_ID_MAPPED
, &psidid
->state
)) {
475 psidid
->time
= jiffies
; /* update ts for accessing */
479 if (time_after(psidid
->time
+ SID_MAP_RETRY
, jiffies
))
482 if (!test_and_set_bit(SID_ID_PENDING
, &psidid
->state
)) {
483 saved_cred
= override_creds(root_cred
);
484 idkey
= request_key(&cifs_idmap_key_type
, psidid
->sidstr
, "");
486 cFYI(1, "%s: Can't map SID to an id", __func__
);
488 cid
= *(unsigned long *)idkey
->payload
.value
;
490 set_bit(SID_ID_MAPPED
, &psidid
->state
);
492 kfree(psidid
->sidstr
);
494 revert_creds(saved_cred
);
495 psidid
->time
= jiffies
; /* update ts for accessing */
496 clear_bit(SID_ID_PENDING
, &psidid
->state
);
497 wake_up_bit(&psidid
->state
, SID_ID_PENDING
);
499 rc
= wait_on_bit(&psidid
->state
, SID_ID_PENDING
,
500 sidid_pending_wait
, TASK_INTERRUPTIBLE
);
502 cFYI(1, "%s: sidid_pending_wait interrupted %d",
504 --psidid
->refcount
; /* decremented without spinlock */
507 if (test_bit(SID_ID_MAPPED
, &psidid
->state
))
512 --psidid
->refcount
; /* decremented without spinlock */
513 if (sidtype
== SIDOWNER
)
522 init_cifs_idmap(void)
528 cFYI(1, "Registering the %s key type", cifs_idmap_key_type
.name
);
530 /* create an override credential set with a special thread keyring in
531 * which requests are cached
533 * this is used to prevent malicious redirections from being installed
536 cred
= prepare_kernel_cred(NULL
);
540 keyring
= key_alloc(&key_type_keyring
, ".cifs_idmap", 0, 0, cred
,
541 (KEY_POS_ALL
& ~KEY_POS_SETATTR
) |
542 KEY_USR_VIEW
| KEY_USR_READ
,
543 KEY_ALLOC_NOT_IN_QUOTA
);
544 if (IS_ERR(keyring
)) {
545 ret
= PTR_ERR(keyring
);
546 goto failed_put_cred
;
549 ret
= key_instantiate_and_link(keyring
, NULL
, 0, NULL
, NULL
);
553 ret
= register_key_type(&cifs_idmap_key_type
);
557 /* instruct request_key() to use this special keyring as a cache for
558 * the results it looks up */
559 set_bit(KEY_FLAG_ROOT_CAN_CLEAR
, &keyring
->flags
);
560 cred
->thread_keyring
= keyring
;
561 cred
->jit_keyring
= KEY_REQKEY_DEFL_THREAD_KEYRING
;
564 spin_lock_init(&siduidlock
);
566 spin_lock_init(&sidgidlock
);
569 spin_lock_init(&uidsidlock
);
570 siduidtree
= RB_ROOT
;
571 spin_lock_init(&gidsidlock
);
572 sidgidtree
= RB_ROOT
;
573 register_shrinker(&cifs_shrinker
);
575 cFYI(1, "cifs idmap keyring: %d", key_serial(keyring
));
586 exit_cifs_idmap(void)
588 key_revoke(root_cred
->thread_keyring
);
589 unregister_key_type(&cifs_idmap_key_type
);
591 unregister_shrinker(&cifs_shrinker
);
592 cFYI(1, "Unregistered %s key type", cifs_idmap_key_type
.name
);
596 cifs_destroy_idmaptrees(void)
598 struct rb_root
*root
;
599 struct rb_node
*node
;
602 spin_lock(&siduidlock
);
603 while ((node
= rb_first(root
)))
604 rb_erase(node
, root
);
605 spin_unlock(&siduidlock
);
608 spin_lock(&sidgidlock
);
609 while ((node
= rb_first(root
)))
610 rb_erase(node
, root
);
611 spin_unlock(&sidgidlock
);
614 spin_lock(&uidsidlock
);
615 while ((node
= rb_first(root
)))
616 rb_erase(node
, root
);
617 spin_unlock(&uidsidlock
);
620 spin_lock(&gidsidlock
);
621 while ((node
= rb_first(root
)))
622 rb_erase(node
, root
);
623 spin_unlock(&gidsidlock
);
626 /* if the two SIDs (roughly equivalent to a UUID for a user or group) are
627 the same returns 1, if they do not match returns 0 */
628 int compare_sids(const struct cifs_sid
*ctsid
, const struct cifs_sid
*cwsid
)
631 int num_subauth
, num_sat
, num_saw
;
633 if ((!ctsid
) || (!cwsid
))
636 /* compare the revision */
637 if (ctsid
->revision
!= cwsid
->revision
) {
638 if (ctsid
->revision
> cwsid
->revision
)
644 /* compare all of the six auth values */
645 for (i
= 0; i
< 6; ++i
) {
646 if (ctsid
->authority
[i
] != cwsid
->authority
[i
]) {
647 if (ctsid
->authority
[i
] > cwsid
->authority
[i
])
654 /* compare all of the subauth values if any */
655 num_sat
= ctsid
->num_subauth
;
656 num_saw
= cwsid
->num_subauth
;
657 num_subauth
= num_sat
< num_saw
? num_sat
: num_saw
;
659 for (i
= 0; i
< num_subauth
; ++i
) {
660 if (ctsid
->sub_auth
[i
] != cwsid
->sub_auth
[i
]) {
661 if (le32_to_cpu(ctsid
->sub_auth
[i
]) >
662 le32_to_cpu(cwsid
->sub_auth
[i
]))
670 return 0; /* sids compare/match */
674 /* copy ntsd, owner sid, and group sid from a security descriptor to another */
675 static void copy_sec_desc(const struct cifs_ntsd
*pntsd
,
676 struct cifs_ntsd
*pnntsd
, __u32 sidsoffset
)
680 struct cifs_sid
*owner_sid_ptr
, *group_sid_ptr
;
681 struct cifs_sid
*nowner_sid_ptr
, *ngroup_sid_ptr
;
683 /* copy security descriptor control portion */
684 pnntsd
->revision
= pntsd
->revision
;
685 pnntsd
->type
= pntsd
->type
;
686 pnntsd
->dacloffset
= cpu_to_le32(sizeof(struct cifs_ntsd
));
687 pnntsd
->sacloffset
= 0;
688 pnntsd
->osidoffset
= cpu_to_le32(sidsoffset
);
689 pnntsd
->gsidoffset
= cpu_to_le32(sidsoffset
+ sizeof(struct cifs_sid
));
692 owner_sid_ptr
= (struct cifs_sid
*)((char *)pntsd
+
693 le32_to_cpu(pntsd
->osidoffset
));
694 nowner_sid_ptr
= (struct cifs_sid
*)((char *)pnntsd
+ sidsoffset
);
696 nowner_sid_ptr
->revision
= owner_sid_ptr
->revision
;
697 nowner_sid_ptr
->num_subauth
= owner_sid_ptr
->num_subauth
;
698 for (i
= 0; i
< 6; i
++)
699 nowner_sid_ptr
->authority
[i
] = owner_sid_ptr
->authority
[i
];
700 for (i
= 0; i
< 5; i
++)
701 nowner_sid_ptr
->sub_auth
[i
] = owner_sid_ptr
->sub_auth
[i
];
704 group_sid_ptr
= (struct cifs_sid
*)((char *)pntsd
+
705 le32_to_cpu(pntsd
->gsidoffset
));
706 ngroup_sid_ptr
= (struct cifs_sid
*)((char *)pnntsd
+ sidsoffset
+
707 sizeof(struct cifs_sid
));
709 ngroup_sid_ptr
->revision
= group_sid_ptr
->revision
;
710 ngroup_sid_ptr
->num_subauth
= group_sid_ptr
->num_subauth
;
711 for (i
= 0; i
< 6; i
++)
712 ngroup_sid_ptr
->authority
[i
] = group_sid_ptr
->authority
[i
];
713 for (i
= 0; i
< 5; i
++)
714 ngroup_sid_ptr
->sub_auth
[i
] = group_sid_ptr
->sub_auth
[i
];
721 change posix mode to reflect permissions
722 pmode is the existing mode (we only want to overwrite part of this
723 bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
725 static void access_flags_to_mode(__le32 ace_flags
, int type
, umode_t
*pmode
,
726 umode_t
*pbits_to_set
)
728 __u32 flags
= le32_to_cpu(ace_flags
);
729 /* the order of ACEs is important. The canonical order is to begin with
730 DENY entries followed by ALLOW, otherwise an allow entry could be
731 encountered first, making the subsequent deny entry like "dead code"
732 which would be superflous since Windows stops when a match is made
733 for the operation you are trying to perform for your user */
735 /* For deny ACEs we change the mask so that subsequent allow access
736 control entries do not turn on the bits we are denying */
737 if (type
== ACCESS_DENIED
) {
738 if (flags
& GENERIC_ALL
)
739 *pbits_to_set
&= ~S_IRWXUGO
;
741 if ((flags
& GENERIC_WRITE
) ||
742 ((flags
& FILE_WRITE_RIGHTS
) == FILE_WRITE_RIGHTS
))
743 *pbits_to_set
&= ~S_IWUGO
;
744 if ((flags
& GENERIC_READ
) ||
745 ((flags
& FILE_READ_RIGHTS
) == FILE_READ_RIGHTS
))
746 *pbits_to_set
&= ~S_IRUGO
;
747 if ((flags
& GENERIC_EXECUTE
) ||
748 ((flags
& FILE_EXEC_RIGHTS
) == FILE_EXEC_RIGHTS
))
749 *pbits_to_set
&= ~S_IXUGO
;
751 } else if (type
!= ACCESS_ALLOWED
) {
752 cERROR(1, "unknown access control type %d", type
);
755 /* else ACCESS_ALLOWED type */
757 if (flags
& GENERIC_ALL
) {
758 *pmode
|= (S_IRWXUGO
& (*pbits_to_set
));
759 cFYI(DBG2
, "all perms");
762 if ((flags
& GENERIC_WRITE
) ||
763 ((flags
& FILE_WRITE_RIGHTS
) == FILE_WRITE_RIGHTS
))
764 *pmode
|= (S_IWUGO
& (*pbits_to_set
));
765 if ((flags
& GENERIC_READ
) ||
766 ((flags
& FILE_READ_RIGHTS
) == FILE_READ_RIGHTS
))
767 *pmode
|= (S_IRUGO
& (*pbits_to_set
));
768 if ((flags
& GENERIC_EXECUTE
) ||
769 ((flags
& FILE_EXEC_RIGHTS
) == FILE_EXEC_RIGHTS
))
770 *pmode
|= (S_IXUGO
& (*pbits_to_set
));
772 cFYI(DBG2
, "access flags 0x%x mode now 0x%x", flags
, *pmode
);
777 Generate access flags to reflect permissions mode is the existing mode.
778 This function is called for every ACE in the DACL whose SID matches
779 with either owner or group or everyone.
782 static void mode_to_access_flags(umode_t mode
, umode_t bits_to_use
,
785 /* reset access mask */
788 /* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
791 /* check for R/W/X UGO since we do not know whose flags
792 is this but we have cleared all the bits sans RWX for
793 either user or group or other as per bits_to_use */
795 *pace_flags
|= SET_FILE_READ_RIGHTS
;
797 *pace_flags
|= SET_FILE_WRITE_RIGHTS
;
799 *pace_flags
|= SET_FILE_EXEC_RIGHTS
;
801 cFYI(DBG2
, "mode: 0x%x, access flags now 0x%x", mode
, *pace_flags
);
805 static __u16
fill_ace_for_sid(struct cifs_ace
*pntace
,
806 const struct cifs_sid
*psid
, __u64 nmode
, umode_t bits
)
810 __u32 access_req
= 0;
812 pntace
->type
= ACCESS_ALLOWED
;
814 mode_to_access_flags(nmode
, bits
, &access_req
);
816 access_req
= SET_MINIMUM_RIGHTS
;
817 pntace
->access_req
= cpu_to_le32(access_req
);
819 pntace
->sid
.revision
= psid
->revision
;
820 pntace
->sid
.num_subauth
= psid
->num_subauth
;
821 for (i
= 0; i
< 6; i
++)
822 pntace
->sid
.authority
[i
] = psid
->authority
[i
];
823 for (i
= 0; i
< psid
->num_subauth
; i
++)
824 pntace
->sid
.sub_auth
[i
] = psid
->sub_auth
[i
];
826 size
= 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid
->num_subauth
* 4);
827 pntace
->size
= cpu_to_le16(size
);
833 #ifdef CONFIG_CIFS_DEBUG2
834 static void dump_ace(struct cifs_ace
*pace
, char *end_of_acl
)
838 /* validate that we do not go past end of acl */
840 if (le16_to_cpu(pace
->size
) < 16) {
841 cERROR(1, "ACE too small %d", le16_to_cpu(pace
->size
));
845 if (end_of_acl
< (char *)pace
+ le16_to_cpu(pace
->size
)) {
846 cERROR(1, "ACL too small to parse ACE");
850 num_subauth
= pace
->sid
.num_subauth
;
853 cFYI(1, "ACE revision %d num_auth %d type %d flags %d size %d",
854 pace
->sid
.revision
, pace
->sid
.num_subauth
, pace
->type
,
855 pace
->flags
, le16_to_cpu(pace
->size
));
856 for (i
= 0; i
< num_subauth
; ++i
) {
857 cFYI(1, "ACE sub_auth[%d]: 0x%x", i
,
858 le32_to_cpu(pace
->sid
.sub_auth
[i
]));
861 /* BB add length check to make sure that we do not have huge
862 num auths and therefore go off the end */
870 static void parse_dacl(struct cifs_acl
*pdacl
, char *end_of_acl
,
871 struct cifs_sid
*pownersid
, struct cifs_sid
*pgrpsid
,
872 struct cifs_fattr
*fattr
)
878 struct cifs_ace
**ppace
;
880 /* BB need to add parm so we can store the SID BB */
883 /* no DACL in the security descriptor, set
884 all the permissions for user/group/other */
885 fattr
->cf_mode
|= S_IRWXUGO
;
889 /* validate that we do not go past end of acl */
890 if (end_of_acl
< (char *)pdacl
+ le16_to_cpu(pdacl
->size
)) {
891 cERROR(1, "ACL too small to parse DACL");
895 cFYI(DBG2
, "DACL revision %d size %d num aces %d",
896 le16_to_cpu(pdacl
->revision
), le16_to_cpu(pdacl
->size
),
897 le32_to_cpu(pdacl
->num_aces
));
899 /* reset rwx permissions for user/group/other.
900 Also, if num_aces is 0 i.e. DACL has no ACEs,
901 user/group/other have no permissions */
902 fattr
->cf_mode
&= ~(S_IRWXUGO
);
904 acl_base
= (char *)pdacl
;
905 acl_size
= sizeof(struct cifs_acl
);
907 num_aces
= le32_to_cpu(pdacl
->num_aces
);
909 umode_t user_mask
= S_IRWXU
;
910 umode_t group_mask
= S_IRWXG
;
911 umode_t other_mask
= S_IRWXU
| S_IRWXG
| S_IRWXO
;
913 if (num_aces
> ULONG_MAX
/ sizeof(struct cifs_ace
*))
915 ppace
= kmalloc(num_aces
* sizeof(struct cifs_ace
*),
918 cERROR(1, "DACL memory allocation error");
922 for (i
= 0; i
< num_aces
; ++i
) {
923 ppace
[i
] = (struct cifs_ace
*) (acl_base
+ acl_size
);
924 #ifdef CONFIG_CIFS_DEBUG2
925 dump_ace(ppace
[i
], end_of_acl
);
927 if (compare_sids(&(ppace
[i
]->sid
), pownersid
) == 0)
928 access_flags_to_mode(ppace
[i
]->access_req
,
932 if (compare_sids(&(ppace
[i
]->sid
), pgrpsid
) == 0)
933 access_flags_to_mode(ppace
[i
]->access_req
,
937 if (compare_sids(&(ppace
[i
]->sid
), &sid_everyone
) == 0)
938 access_flags_to_mode(ppace
[i
]->access_req
,
942 if (compare_sids(&(ppace
[i
]->sid
), &sid_authusers
) == 0)
943 access_flags_to_mode(ppace
[i
]->access_req
,
949 /* memcpy((void *)(&(cifscred->aces[i])),
951 sizeof(struct cifs_ace)); */
953 acl_base
= (char *)ppace
[i
];
954 acl_size
= le16_to_cpu(ppace
[i
]->size
);
964 static int set_chmod_dacl(struct cifs_acl
*pndacl
, struct cifs_sid
*pownersid
,
965 struct cifs_sid
*pgrpsid
, __u64 nmode
)
968 struct cifs_acl
*pnndacl
;
970 pnndacl
= (struct cifs_acl
*)((char *)pndacl
+ sizeof(struct cifs_acl
));
972 size
+= fill_ace_for_sid((struct cifs_ace
*) ((char *)pnndacl
+ size
),
973 pownersid
, nmode
, S_IRWXU
);
974 size
+= fill_ace_for_sid((struct cifs_ace
*)((char *)pnndacl
+ size
),
975 pgrpsid
, nmode
, S_IRWXG
);
976 size
+= fill_ace_for_sid((struct cifs_ace
*)((char *)pnndacl
+ size
),
977 &sid_everyone
, nmode
, S_IRWXO
);
979 pndacl
->size
= cpu_to_le16(size
+ sizeof(struct cifs_acl
));
980 pndacl
->num_aces
= cpu_to_le32(3);
986 static int parse_sid(struct cifs_sid
*psid
, char *end_of_acl
)
988 /* BB need to add parm so we can store the SID BB */
990 /* validate that we do not go past end of ACL - sid must be at least 8
991 bytes long (assuming no sub-auths - e.g. the null SID */
992 if (end_of_acl
< (char *)psid
+ 8) {
993 cERROR(1, "ACL too small to parse SID %p", psid
);
997 if (psid
->num_subauth
) {
998 #ifdef CONFIG_CIFS_DEBUG2
1000 cFYI(1, "SID revision %d num_auth %d",
1001 psid
->revision
, psid
->num_subauth
);
1003 for (i
= 0; i
< psid
->num_subauth
; i
++) {
1004 cFYI(1, "SID sub_auth[%d]: 0x%x ", i
,
1005 le32_to_cpu(psid
->sub_auth
[i
]));
1008 /* BB add length check to make sure that we do not have huge
1009 num auths and therefore go off the end */
1011 le32_to_cpu(psid
->sub_auth
[psid
->num_subauth
-1]));
1019 /* Convert CIFS ACL to POSIX form */
1020 static int parse_sec_desc(struct cifs_sb_info
*cifs_sb
,
1021 struct cifs_ntsd
*pntsd
, int acl_len
, struct cifs_fattr
*fattr
)
1024 struct cifs_sid
*owner_sid_ptr
, *group_sid_ptr
;
1025 struct cifs_acl
*dacl_ptr
; /* no need for SACL ptr */
1026 char *end_of_acl
= ((char *)pntsd
) + acl_len
;
1032 owner_sid_ptr
= (struct cifs_sid
*)((char *)pntsd
+
1033 le32_to_cpu(pntsd
->osidoffset
));
1034 group_sid_ptr
= (struct cifs_sid
*)((char *)pntsd
+
1035 le32_to_cpu(pntsd
->gsidoffset
));
1036 dacloffset
= le32_to_cpu(pntsd
->dacloffset
);
1037 dacl_ptr
= (struct cifs_acl
*)((char *)pntsd
+ dacloffset
);
1038 cFYI(DBG2
, "revision %d type 0x%x ooffset 0x%x goffset 0x%x "
1039 "sacloffset 0x%x dacloffset 0x%x",
1040 pntsd
->revision
, pntsd
->type
, le32_to_cpu(pntsd
->osidoffset
),
1041 le32_to_cpu(pntsd
->gsidoffset
),
1042 le32_to_cpu(pntsd
->sacloffset
), dacloffset
);
1043 /* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
1044 rc
= parse_sid(owner_sid_ptr
, end_of_acl
);
1046 cFYI(1, "%s: Error %d parsing Owner SID", __func__
, rc
);
1049 rc
= sid_to_id(cifs_sb
, owner_sid_ptr
, fattr
, SIDOWNER
);
1051 cFYI(1, "%s: Error %d mapping Owner SID to uid", __func__
, rc
);
1055 rc
= parse_sid(group_sid_ptr
, end_of_acl
);
1057 cFYI(1, "%s: Error %d mapping Owner SID to gid", __func__
, rc
);
1060 rc
= sid_to_id(cifs_sb
, group_sid_ptr
, fattr
, SIDGROUP
);
1062 cFYI(1, "%s: Error %d mapping Group SID to gid", __func__
, rc
);
1067 parse_dacl(dacl_ptr
, end_of_acl
, owner_sid_ptr
,
1068 group_sid_ptr
, fattr
);
1070 cFYI(1, "no ACL"); /* BB grant all or default perms? */
1075 /* Convert permission bits from mode to equivalent CIFS ACL */
1076 static int build_sec_desc(struct cifs_ntsd
*pntsd
, struct cifs_ntsd
*pnntsd
,
1077 __u32 secdesclen
, __u64 nmode
, uid_t uid
, gid_t gid
, int *aclflag
)
1083 struct cifs_sid
*owner_sid_ptr
, *group_sid_ptr
;
1084 struct cifs_sid
*nowner_sid_ptr
, *ngroup_sid_ptr
;
1085 struct cifs_acl
*dacl_ptr
= NULL
; /* no need for SACL ptr */
1086 struct cifs_acl
*ndacl_ptr
= NULL
; /* no need for SACL ptr */
1088 if (nmode
!= NO_CHANGE_64
) { /* chmod */
1089 owner_sid_ptr
= (struct cifs_sid
*)((char *)pntsd
+
1090 le32_to_cpu(pntsd
->osidoffset
));
1091 group_sid_ptr
= (struct cifs_sid
*)((char *)pntsd
+
1092 le32_to_cpu(pntsd
->gsidoffset
));
1093 dacloffset
= le32_to_cpu(pntsd
->dacloffset
);
1094 dacl_ptr
= (struct cifs_acl
*)((char *)pntsd
+ dacloffset
);
1095 ndacloffset
= sizeof(struct cifs_ntsd
);
1096 ndacl_ptr
= (struct cifs_acl
*)((char *)pnntsd
+ ndacloffset
);
1097 ndacl_ptr
->revision
= dacl_ptr
->revision
;
1098 ndacl_ptr
->size
= 0;
1099 ndacl_ptr
->num_aces
= 0;
1101 rc
= set_chmod_dacl(ndacl_ptr
, owner_sid_ptr
, group_sid_ptr
,
1103 sidsoffset
= ndacloffset
+ le16_to_cpu(ndacl_ptr
->size
);
1104 /* copy sec desc control portion & owner and group sids */
1105 copy_sec_desc(pntsd
, pnntsd
, sidsoffset
);
1106 *aclflag
= CIFS_ACL_DACL
;
1108 memcpy(pnntsd
, pntsd
, secdesclen
);
1109 if (uid
!= NO_CHANGE_32
) { /* chown */
1110 owner_sid_ptr
= (struct cifs_sid
*)((char *)pnntsd
+
1111 le32_to_cpu(pnntsd
->osidoffset
));
1112 nowner_sid_ptr
= kmalloc(sizeof(struct cifs_sid
),
1114 if (!nowner_sid_ptr
)
1116 rc
= id_to_sid(uid
, SIDOWNER
, nowner_sid_ptr
);
1118 cFYI(1, "%s: Mapping error %d for owner id %d",
1120 kfree(nowner_sid_ptr
);
1123 memcpy(owner_sid_ptr
, nowner_sid_ptr
,
1124 sizeof(struct cifs_sid
));
1125 kfree(nowner_sid_ptr
);
1126 *aclflag
= CIFS_ACL_OWNER
;
1128 if (gid
!= NO_CHANGE_32
) { /* chgrp */
1129 group_sid_ptr
= (struct cifs_sid
*)((char *)pnntsd
+
1130 le32_to_cpu(pnntsd
->gsidoffset
));
1131 ngroup_sid_ptr
= kmalloc(sizeof(struct cifs_sid
),
1133 if (!ngroup_sid_ptr
)
1135 rc
= id_to_sid(gid
, SIDGROUP
, ngroup_sid_ptr
);
1137 cFYI(1, "%s: Mapping error %d for group id %d",
1139 kfree(ngroup_sid_ptr
);
1142 memcpy(group_sid_ptr
, ngroup_sid_ptr
,
1143 sizeof(struct cifs_sid
));
1144 kfree(ngroup_sid_ptr
);
1145 *aclflag
= CIFS_ACL_GROUP
;
1152 static struct cifs_ntsd
*get_cifs_acl_by_fid(struct cifs_sb_info
*cifs_sb
,
1153 __u16 fid
, u32
*pacllen
)
1155 struct cifs_ntsd
*pntsd
= NULL
;
1158 struct tcon_link
*tlink
= cifs_sb_tlink(cifs_sb
);
1161 return ERR_CAST(tlink
);
1164 rc
= CIFSSMBGetCIFSACL(xid
, tlink_tcon(tlink
), fid
, &pntsd
, pacllen
);
1167 cifs_put_tlink(tlink
);
1169 cFYI(1, "%s: rc = %d ACL len %d", __func__
, rc
, *pacllen
);
1175 static struct cifs_ntsd
*get_cifs_acl_by_path(struct cifs_sb_info
*cifs_sb
,
1176 const char *path
, u32
*pacllen
)
1178 struct cifs_ntsd
*pntsd
= NULL
;
1181 int rc
, create_options
= 0;
1183 struct cifs_tcon
*tcon
;
1184 struct tcon_link
*tlink
= cifs_sb_tlink(cifs_sb
);
1187 return ERR_CAST(tlink
);
1189 tcon
= tlink_tcon(tlink
);
1192 if (backup_cred(cifs_sb
))
1193 create_options
|= CREATE_OPEN_BACKUP_INTENT
;
1195 rc
= CIFSSMBOpen(xid
, tcon
, path
, FILE_OPEN
, READ_CONTROL
,
1196 create_options
, &fid
, &oplock
, NULL
, cifs_sb
->local_nls
,
1197 cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MAP_SPECIAL_CHR
);
1199 rc
= CIFSSMBGetCIFSACL(xid
, tcon
, fid
, &pntsd
, pacllen
);
1200 CIFSSMBClose(xid
, tcon
, fid
);
1203 cifs_put_tlink(tlink
);
1206 cFYI(1, "%s: rc = %d ACL len %d", __func__
, rc
, *pacllen
);
1212 /* Retrieve an ACL from the server */
1213 struct cifs_ntsd
*get_cifs_acl(struct cifs_sb_info
*cifs_sb
,
1214 struct inode
*inode
, const char *path
,
1217 struct cifs_ntsd
*pntsd
= NULL
;
1218 struct cifsFileInfo
*open_file
= NULL
;
1221 open_file
= find_readable_file(CIFS_I(inode
), true);
1223 return get_cifs_acl_by_path(cifs_sb
, path
, pacllen
);
1225 pntsd
= get_cifs_acl_by_fid(cifs_sb
, open_file
->fid
.netfid
, pacllen
);
1226 cifsFileInfo_put(open_file
);
1230 /* Set an ACL on the server */
1231 int set_cifs_acl(struct cifs_ntsd
*pnntsd
, __u32 acllen
,
1232 struct inode
*inode
, const char *path
, int aclflag
)
1236 int rc
, access_flags
, create_options
= 0;
1238 struct cifs_tcon
*tcon
;
1239 struct cifs_sb_info
*cifs_sb
= CIFS_SB(inode
->i_sb
);
1240 struct tcon_link
*tlink
= cifs_sb_tlink(cifs_sb
);
1243 return PTR_ERR(tlink
);
1245 tcon
= tlink_tcon(tlink
);
1248 if (backup_cred(cifs_sb
))
1249 create_options
|= CREATE_OPEN_BACKUP_INTENT
;
1251 if (aclflag
== CIFS_ACL_OWNER
|| aclflag
== CIFS_ACL_GROUP
)
1252 access_flags
= WRITE_OWNER
;
1254 access_flags
= WRITE_DAC
;
1256 rc
= CIFSSMBOpen(xid
, tcon
, path
, FILE_OPEN
, access_flags
,
1257 create_options
, &fid
, &oplock
, NULL
, cifs_sb
->local_nls
,
1258 cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MAP_SPECIAL_CHR
);
1260 cERROR(1, "Unable to open file to set ACL");
1264 rc
= CIFSSMBSetCIFSACL(xid
, tcon
, fid
, pnntsd
, acllen
, aclflag
);
1265 cFYI(DBG2
, "SetCIFSACL rc = %d", rc
);
1267 CIFSSMBClose(xid
, tcon
, fid
);
1270 cifs_put_tlink(tlink
);
1274 /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
1276 cifs_acl_to_fattr(struct cifs_sb_info
*cifs_sb
, struct cifs_fattr
*fattr
,
1277 struct inode
*inode
, const char *path
, const __u16
*pfid
)
1279 struct cifs_ntsd
*pntsd
= NULL
;
1283 cFYI(DBG2
, "converting ACL to mode for %s", path
);
1286 pntsd
= get_cifs_acl_by_fid(cifs_sb
, *pfid
, &acllen
);
1288 pntsd
= get_cifs_acl(cifs_sb
, inode
, path
, &acllen
);
1290 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
1291 if (IS_ERR(pntsd
)) {
1292 rc
= PTR_ERR(pntsd
);
1293 cERROR(1, "%s: error %d getting sec desc", __func__
, rc
);
1295 rc
= parse_sec_desc(cifs_sb
, pntsd
, acllen
, fattr
);
1298 cERROR(1, "parse sec desc failed rc = %d", rc
);
1304 /* Convert mode bits to an ACL so we can update the ACL on the server */
1306 id_mode_to_cifs_acl(struct inode
*inode
, const char *path
, __u64 nmode
,
1307 uid_t uid
, gid_t gid
)
1310 int aclflag
= CIFS_ACL_DACL
; /* default flag to set */
1311 __u32 secdesclen
= 0;
1312 struct cifs_ntsd
*pntsd
= NULL
; /* acl obtained from server */
1313 struct cifs_ntsd
*pnntsd
= NULL
; /* modified acl to be sent to server */
1315 cFYI(DBG2
, "set ACL from mode for %s", path
);
1317 /* Get the security descriptor */
1318 pntsd
= get_cifs_acl(CIFS_SB(inode
->i_sb
), inode
, path
, &secdesclen
);
1320 /* Add three ACEs for owner, group, everyone getting rid of
1321 other ACEs as chmod disables ACEs and set the security descriptor */
1323 if (IS_ERR(pntsd
)) {
1324 rc
= PTR_ERR(pntsd
);
1325 cERROR(1, "%s: error %d getting sec desc", __func__
, rc
);
1327 /* allocate memory for the smb header,
1328 set security descriptor request security descriptor
1329 parameters, and secuirty descriptor itself */
1331 secdesclen
= secdesclen
< DEFSECDESCLEN
?
1332 DEFSECDESCLEN
: secdesclen
;
1333 pnntsd
= kmalloc(secdesclen
, GFP_KERNEL
);
1335 cERROR(1, "Unable to allocate security descriptor");
1340 rc
= build_sec_desc(pntsd
, pnntsd
, secdesclen
, nmode
, uid
, gid
,
1343 cFYI(DBG2
, "build_sec_desc rc: %d", rc
);
1346 /* Set the security descriptor */
1347 rc
= set_cifs_acl(pnntsd
, secdesclen
, inode
,
1349 cFYI(DBG2
, "set_cifs_acl rc: %d", rc
);