[CIFS] Follow on to cifsacl endian patch (__constant_cpu_to_le32 was required)
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / cifs / cifsacl.c
blob8f1700623b41078a35dda35957a858aa9cf08550
1 /*
2 * fs/cifs/cifsacl.c
4 * Copyright (C) International Business Machines Corp., 2007,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Contains the routines for mapping CIFS/NTFS ACLs
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/fs.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/keyctl.h>
28 #include <linux/key-type.h>
29 #include <keys/user-type.h>
30 #include "cifspdu.h"
31 #include "cifsglob.h"
32 #include "cifsacl.h"
33 #include "cifsproto.h"
34 #include "cifs_debug.h"
36 /* security id for everyone/world system group */
37 static const struct cifs_sid sid_everyone = {
38 1, 1, {0, 0, 0, 0, 0, 1}, {0} };
39 /* security id for Authenticated Users system group */
40 static const struct cifs_sid sid_authusers = {
41 1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(11)} };
42 /* group users */
43 static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
45 const struct cred *root_cred;
47 static void
48 shrink_idmap_tree(struct rb_root *root, int nr_to_scan, int *nr_rem,
49 int *nr_del)
51 struct rb_node *node;
52 struct rb_node *tmp;
53 struct cifs_sid_id *psidid;
55 node = rb_first(root);
56 while (node) {
57 tmp = node;
58 node = rb_next(tmp);
59 psidid = rb_entry(tmp, struct cifs_sid_id, rbnode);
60 if (nr_to_scan == 0 || *nr_del == nr_to_scan)
61 ++(*nr_rem);
62 else {
63 if (time_after(jiffies, psidid->time + SID_MAP_EXPIRE)
64 && psidid->refcount == 0) {
65 rb_erase(tmp, root);
66 ++(*nr_del);
67 } else
68 ++(*nr_rem);
74 * Run idmap cache shrinker.
76 static int
77 cifs_idmap_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
79 int nr_del = 0;
80 int nr_rem = 0;
81 struct rb_root *root;
83 root = &uidtree;
84 spin_lock(&siduidlock);
85 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
86 spin_unlock(&siduidlock);
88 root = &gidtree;
89 spin_lock(&sidgidlock);
90 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
91 spin_unlock(&sidgidlock);
93 return nr_rem;
96 static struct shrinker cifs_shrinker = {
97 .shrink = cifs_idmap_shrinker,
98 .seeks = DEFAULT_SEEKS,
101 static int
102 cifs_idmap_key_instantiate(struct key *key, const void *data, size_t datalen)
104 char *payload;
106 payload = kmalloc(datalen, GFP_KERNEL);
107 if (!payload)
108 return -ENOMEM;
110 memcpy(payload, data, datalen);
111 key->payload.data = payload;
112 return 0;
115 static inline void
116 cifs_idmap_key_destroy(struct key *key)
118 kfree(key->payload.data);
121 struct key_type cifs_idmap_key_type = {
122 .name = "cifs.idmap",
123 .instantiate = cifs_idmap_key_instantiate,
124 .destroy = cifs_idmap_key_destroy,
125 .describe = user_describe,
126 .match = user_match,
129 static void
130 sid_to_str(struct cifs_sid *sidptr, char *sidstr)
132 int i;
133 unsigned long saval;
134 char *strptr;
136 strptr = sidstr;
138 sprintf(strptr, "%s", "S");
139 strptr = sidstr + strlen(sidstr);
141 sprintf(strptr, "-%d", sidptr->revision);
142 strptr = sidstr + strlen(sidstr);
144 for (i = 0; i < 6; ++i) {
145 if (sidptr->authority[i]) {
146 sprintf(strptr, "-%d", sidptr->authority[i]);
147 strptr = sidstr + strlen(sidstr);
151 for (i = 0; i < sidptr->num_subauth; ++i) {
152 saval = le32_to_cpu(sidptr->sub_auth[i]);
153 sprintf(strptr, "-%ld", saval);
154 strptr = sidstr + strlen(sidstr);
158 static void
159 id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr,
160 struct cifs_sid_id **psidid, char *typestr)
162 int rc;
163 char *strptr;
164 struct rb_node *node = root->rb_node;
165 struct rb_node *parent = NULL;
166 struct rb_node **linkto = &(root->rb_node);
167 struct cifs_sid_id *lsidid;
169 while (node) {
170 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
171 parent = node;
172 rc = compare_sids(sidptr, &((lsidid)->sid));
173 if (rc > 0) {
174 linkto = &(node->rb_left);
175 node = node->rb_left;
176 } else if (rc < 0) {
177 linkto = &(node->rb_right);
178 node = node->rb_right;
182 memcpy(&(*psidid)->sid, sidptr, sizeof(struct cifs_sid));
183 (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
184 (*psidid)->refcount = 0;
186 sprintf((*psidid)->sidstr, "%s", typestr);
187 strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
188 sid_to_str(&(*psidid)->sid, strptr);
190 clear_bit(SID_ID_PENDING, &(*psidid)->state);
191 clear_bit(SID_ID_MAPPED, &(*psidid)->state);
193 rb_link_node(&(*psidid)->rbnode, parent, linkto);
194 rb_insert_color(&(*psidid)->rbnode, root);
197 static struct cifs_sid_id *
198 id_rb_search(struct rb_root *root, struct cifs_sid *sidptr)
200 int rc;
201 struct rb_node *node = root->rb_node;
202 struct cifs_sid_id *lsidid;
204 while (node) {
205 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
206 rc = compare_sids(sidptr, &((lsidid)->sid));
207 if (rc > 0) {
208 node = node->rb_left;
209 } else if (rc < 0) {
210 node = node->rb_right;
211 } else /* node found */
212 return lsidid;
215 return NULL;
218 static int
219 sidid_pending_wait(void *unused)
221 schedule();
222 return signal_pending(current) ? -ERESTARTSYS : 0;
225 static int
226 sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
227 struct cifs_fattr *fattr, uint sidtype)
229 int rc;
230 unsigned long cid;
231 struct key *idkey;
232 const struct cred *saved_cred;
233 struct cifs_sid_id *psidid, *npsidid;
234 struct rb_root *cidtree;
235 spinlock_t *cidlock;
237 if (sidtype == SIDOWNER) {
238 cid = cifs_sb->mnt_uid; /* default uid, in case upcall fails */
239 cidlock = &siduidlock;
240 cidtree = &uidtree;
241 } else if (sidtype == SIDGROUP) {
242 cid = cifs_sb->mnt_gid; /* default gid, in case upcall fails */
243 cidlock = &sidgidlock;
244 cidtree = &gidtree;
245 } else
246 return -ENOENT;
248 spin_lock(cidlock);
249 psidid = id_rb_search(cidtree, psid);
251 if (!psidid) { /* node does not exist, allocate one & attempt adding */
252 spin_unlock(cidlock);
253 npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
254 if (!npsidid)
255 return -ENOMEM;
257 npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
258 if (!npsidid->sidstr) {
259 kfree(npsidid);
260 return -ENOMEM;
263 spin_lock(cidlock);
264 psidid = id_rb_search(cidtree, psid);
265 if (psidid) { /* node happened to get inserted meanwhile */
266 ++psidid->refcount;
267 spin_unlock(cidlock);
268 kfree(npsidid->sidstr);
269 kfree(npsidid);
270 } else {
271 psidid = npsidid;
272 id_rb_insert(cidtree, psid, &psidid,
273 sidtype == SIDOWNER ? "os:" : "gs:");
274 ++psidid->refcount;
275 spin_unlock(cidlock);
277 } else {
278 ++psidid->refcount;
279 spin_unlock(cidlock);
283 * If we are here, it is safe to access psidid and its fields
284 * since a reference was taken earlier while holding the spinlock.
285 * A reference on the node is put without holding the spinlock
286 * and it is OK to do so in this case, shrinker will not erase
287 * this node until all references are put and we do not access
288 * any fields of the node after a reference is put .
290 if (test_bit(SID_ID_MAPPED, &psidid->state)) {
291 cid = psidid->id;
292 psidid->time = jiffies; /* update ts for accessing */
293 goto sid_to_id_out;
296 if (time_after(psidid->time + SID_MAP_RETRY, jiffies))
297 goto sid_to_id_out;
299 if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
300 saved_cred = override_creds(root_cred);
301 idkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
302 if (IS_ERR(idkey))
303 cFYI(1, "%s: Can't map SID to an id", __func__);
304 else {
305 cid = *(unsigned long *)idkey->payload.value;
306 psidid->id = cid;
307 set_bit(SID_ID_MAPPED, &psidid->state);
308 key_put(idkey);
309 kfree(psidid->sidstr);
311 revert_creds(saved_cred);
312 psidid->time = jiffies; /* update ts for accessing */
313 clear_bit(SID_ID_PENDING, &psidid->state);
314 wake_up_bit(&psidid->state, SID_ID_PENDING);
315 } else {
316 rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
317 sidid_pending_wait, TASK_INTERRUPTIBLE);
318 if (rc) {
319 cFYI(1, "%s: sidid_pending_wait interrupted %d",
320 __func__, rc);
321 --psidid->refcount; /* decremented without spinlock */
322 return rc;
324 if (test_bit(SID_ID_MAPPED, &psidid->state))
325 cid = psidid->id;
328 sid_to_id_out:
329 --psidid->refcount; /* decremented without spinlock */
330 if (sidtype == SIDOWNER)
331 fattr->cf_uid = cid;
332 else
333 fattr->cf_gid = cid;
335 return 0;
339 init_cifs_idmap(void)
341 struct cred *cred;
342 struct key *keyring;
343 int ret;
345 cFYI(1, "Registering the %s key type\n", cifs_idmap_key_type.name);
347 /* create an override credential set with a special thread keyring in
348 * which requests are cached
350 * this is used to prevent malicious redirections from being installed
351 * with add_key().
353 cred = prepare_kernel_cred(NULL);
354 if (!cred)
355 return -ENOMEM;
357 keyring = key_alloc(&key_type_keyring, ".cifs_idmap", 0, 0, cred,
358 (KEY_POS_ALL & ~KEY_POS_SETATTR) |
359 KEY_USR_VIEW | KEY_USR_READ,
360 KEY_ALLOC_NOT_IN_QUOTA);
361 if (IS_ERR(keyring)) {
362 ret = PTR_ERR(keyring);
363 goto failed_put_cred;
366 ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
367 if (ret < 0)
368 goto failed_put_key;
370 ret = register_key_type(&cifs_idmap_key_type);
371 if (ret < 0)
372 goto failed_put_key;
374 /* instruct request_key() to use this special keyring as a cache for
375 * the results it looks up */
376 cred->thread_keyring = keyring;
377 cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
378 root_cred = cred;
380 spin_lock_init(&siduidlock);
381 uidtree = RB_ROOT;
382 spin_lock_init(&sidgidlock);
383 gidtree = RB_ROOT;
385 register_shrinker(&cifs_shrinker);
387 cFYI(1, "cifs idmap keyring: %d\n", key_serial(keyring));
388 return 0;
390 failed_put_key:
391 key_put(keyring);
392 failed_put_cred:
393 put_cred(cred);
394 return ret;
397 void
398 exit_cifs_idmap(void)
400 key_revoke(root_cred->thread_keyring);
401 unregister_key_type(&cifs_idmap_key_type);
402 put_cred(root_cred);
403 unregister_shrinker(&cifs_shrinker);
404 cFYI(1, "Unregistered %s key type\n", cifs_idmap_key_type.name);
407 void
408 cifs_destroy_idmaptrees(void)
410 struct rb_root *root;
411 struct rb_node *node;
413 root = &uidtree;
414 spin_lock(&siduidlock);
415 while ((node = rb_first(root)))
416 rb_erase(node, root);
417 spin_unlock(&siduidlock);
419 root = &gidtree;
420 spin_lock(&sidgidlock);
421 while ((node = rb_first(root)))
422 rb_erase(node, root);
423 spin_unlock(&sidgidlock);
426 /* if the two SIDs (roughly equivalent to a UUID for a user or group) are
427 the same returns 1, if they do not match returns 0 */
428 int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
430 int i;
431 int num_subauth, num_sat, num_saw;
433 if ((!ctsid) || (!cwsid))
434 return 1;
436 /* compare the revision */
437 if (ctsid->revision != cwsid->revision) {
438 if (ctsid->revision > cwsid->revision)
439 return 1;
440 else
441 return -1;
444 /* compare all of the six auth values */
445 for (i = 0; i < 6; ++i) {
446 if (ctsid->authority[i] != cwsid->authority[i]) {
447 if (ctsid->authority[i] > cwsid->authority[i])
448 return 1;
449 else
450 return -1;
454 /* compare all of the subauth values if any */
455 num_sat = ctsid->num_subauth;
456 num_saw = cwsid->num_subauth;
457 num_subauth = num_sat < num_saw ? num_sat : num_saw;
458 if (num_subauth) {
459 for (i = 0; i < num_subauth; ++i) {
460 if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
461 if (le32_to_cpu(ctsid->sub_auth[i]) >
462 le32_to_cpu(cwsid->sub_auth[i]))
463 return 1;
464 else
465 return -1;
470 return 0; /* sids compare/match */
474 /* copy ntsd, owner sid, and group sid from a security descriptor to another */
475 static void copy_sec_desc(const struct cifs_ntsd *pntsd,
476 struct cifs_ntsd *pnntsd, __u32 sidsoffset)
478 int i;
480 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
481 struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
483 /* copy security descriptor control portion */
484 pnntsd->revision = pntsd->revision;
485 pnntsd->type = pntsd->type;
486 pnntsd->dacloffset = cpu_to_le32(sizeof(struct cifs_ntsd));
487 pnntsd->sacloffset = 0;
488 pnntsd->osidoffset = cpu_to_le32(sidsoffset);
489 pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct cifs_sid));
491 /* copy owner sid */
492 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
493 le32_to_cpu(pntsd->osidoffset));
494 nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
496 nowner_sid_ptr->revision = owner_sid_ptr->revision;
497 nowner_sid_ptr->num_subauth = owner_sid_ptr->num_subauth;
498 for (i = 0; i < 6; i++)
499 nowner_sid_ptr->authority[i] = owner_sid_ptr->authority[i];
500 for (i = 0; i < 5; i++)
501 nowner_sid_ptr->sub_auth[i] = owner_sid_ptr->sub_auth[i];
503 /* copy group sid */
504 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
505 le32_to_cpu(pntsd->gsidoffset));
506 ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
507 sizeof(struct cifs_sid));
509 ngroup_sid_ptr->revision = group_sid_ptr->revision;
510 ngroup_sid_ptr->num_subauth = group_sid_ptr->num_subauth;
511 for (i = 0; i < 6; i++)
512 ngroup_sid_ptr->authority[i] = group_sid_ptr->authority[i];
513 for (i = 0; i < 5; i++)
514 ngroup_sid_ptr->sub_auth[i] = group_sid_ptr->sub_auth[i];
516 return;
521 change posix mode to reflect permissions
522 pmode is the existing mode (we only want to overwrite part of this
523 bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
525 static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
526 umode_t *pbits_to_set)
528 __u32 flags = le32_to_cpu(ace_flags);
529 /* the order of ACEs is important. The canonical order is to begin with
530 DENY entries followed by ALLOW, otherwise an allow entry could be
531 encountered first, making the subsequent deny entry like "dead code"
532 which would be superflous since Windows stops when a match is made
533 for the operation you are trying to perform for your user */
535 /* For deny ACEs we change the mask so that subsequent allow access
536 control entries do not turn on the bits we are denying */
537 if (type == ACCESS_DENIED) {
538 if (flags & GENERIC_ALL)
539 *pbits_to_set &= ~S_IRWXUGO;
541 if ((flags & GENERIC_WRITE) ||
542 ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
543 *pbits_to_set &= ~S_IWUGO;
544 if ((flags & GENERIC_READ) ||
545 ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
546 *pbits_to_set &= ~S_IRUGO;
547 if ((flags & GENERIC_EXECUTE) ||
548 ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
549 *pbits_to_set &= ~S_IXUGO;
550 return;
551 } else if (type != ACCESS_ALLOWED) {
552 cERROR(1, "unknown access control type %d", type);
553 return;
555 /* else ACCESS_ALLOWED type */
557 if (flags & GENERIC_ALL) {
558 *pmode |= (S_IRWXUGO & (*pbits_to_set));
559 cFYI(DBG2, "all perms");
560 return;
562 if ((flags & GENERIC_WRITE) ||
563 ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
564 *pmode |= (S_IWUGO & (*pbits_to_set));
565 if ((flags & GENERIC_READ) ||
566 ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
567 *pmode |= (S_IRUGO & (*pbits_to_set));
568 if ((flags & GENERIC_EXECUTE) ||
569 ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
570 *pmode |= (S_IXUGO & (*pbits_to_set));
572 cFYI(DBG2, "access flags 0x%x mode now 0x%x", flags, *pmode);
573 return;
577 Generate access flags to reflect permissions mode is the existing mode.
578 This function is called for every ACE in the DACL whose SID matches
579 with either owner or group or everyone.
582 static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
583 __u32 *pace_flags)
585 /* reset access mask */
586 *pace_flags = 0x0;
588 /* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
589 mode &= bits_to_use;
591 /* check for R/W/X UGO since we do not know whose flags
592 is this but we have cleared all the bits sans RWX for
593 either user or group or other as per bits_to_use */
594 if (mode & S_IRUGO)
595 *pace_flags |= SET_FILE_READ_RIGHTS;
596 if (mode & S_IWUGO)
597 *pace_flags |= SET_FILE_WRITE_RIGHTS;
598 if (mode & S_IXUGO)
599 *pace_flags |= SET_FILE_EXEC_RIGHTS;
601 cFYI(DBG2, "mode: 0x%x, access flags now 0x%x", mode, *pace_flags);
602 return;
605 static __u16 fill_ace_for_sid(struct cifs_ace *pntace,
606 const struct cifs_sid *psid, __u64 nmode, umode_t bits)
608 int i;
609 __u16 size = 0;
610 __u32 access_req = 0;
612 pntace->type = ACCESS_ALLOWED;
613 pntace->flags = 0x0;
614 mode_to_access_flags(nmode, bits, &access_req);
615 if (!access_req)
616 access_req = SET_MINIMUM_RIGHTS;
617 pntace->access_req = cpu_to_le32(access_req);
619 pntace->sid.revision = psid->revision;
620 pntace->sid.num_subauth = psid->num_subauth;
621 for (i = 0; i < 6; i++)
622 pntace->sid.authority[i] = psid->authority[i];
623 for (i = 0; i < psid->num_subauth; i++)
624 pntace->sid.sub_auth[i] = psid->sub_auth[i];
626 size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4);
627 pntace->size = cpu_to_le16(size);
629 return size;
633 #ifdef CONFIG_CIFS_DEBUG2
634 static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
636 int num_subauth;
638 /* validate that we do not go past end of acl */
640 if (le16_to_cpu(pace->size) < 16) {
641 cERROR(1, "ACE too small %d", le16_to_cpu(pace->size));
642 return;
645 if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) {
646 cERROR(1, "ACL too small to parse ACE");
647 return;
650 num_subauth = pace->sid.num_subauth;
651 if (num_subauth) {
652 int i;
653 cFYI(1, "ACE revision %d num_auth %d type %d flags %d size %d",
654 pace->sid.revision, pace->sid.num_subauth, pace->type,
655 pace->flags, le16_to_cpu(pace->size));
656 for (i = 0; i < num_subauth; ++i) {
657 cFYI(1, "ACE sub_auth[%d]: 0x%x", i,
658 le32_to_cpu(pace->sid.sub_auth[i]));
661 /* BB add length check to make sure that we do not have huge
662 num auths and therefore go off the end */
665 return;
667 #endif
670 static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
671 struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
672 struct cifs_fattr *fattr)
674 int i;
675 int num_aces = 0;
676 int acl_size;
677 char *acl_base;
678 struct cifs_ace **ppace;
680 /* BB need to add parm so we can store the SID BB */
682 if (!pdacl) {
683 /* no DACL in the security descriptor, set
684 all the permissions for user/group/other */
685 fattr->cf_mode |= S_IRWXUGO;
686 return;
689 /* validate that we do not go past end of acl */
690 if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
691 cERROR(1, "ACL too small to parse DACL");
692 return;
695 cFYI(DBG2, "DACL revision %d size %d num aces %d",
696 le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
697 le32_to_cpu(pdacl->num_aces));
699 /* reset rwx permissions for user/group/other.
700 Also, if num_aces is 0 i.e. DACL has no ACEs,
701 user/group/other have no permissions */
702 fattr->cf_mode &= ~(S_IRWXUGO);
704 acl_base = (char *)pdacl;
705 acl_size = sizeof(struct cifs_acl);
707 num_aces = le32_to_cpu(pdacl->num_aces);
708 if (num_aces > 0) {
709 umode_t user_mask = S_IRWXU;
710 umode_t group_mask = S_IRWXG;
711 umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
713 ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
714 GFP_KERNEL);
715 if (!ppace) {
716 cERROR(1, "DACL memory allocation error");
717 return;
720 for (i = 0; i < num_aces; ++i) {
721 ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
722 #ifdef CONFIG_CIFS_DEBUG2
723 dump_ace(ppace[i], end_of_acl);
724 #endif
725 if (compare_sids(&(ppace[i]->sid), pownersid) == 0)
726 access_flags_to_mode(ppace[i]->access_req,
727 ppace[i]->type,
728 &fattr->cf_mode,
729 &user_mask);
730 if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0)
731 access_flags_to_mode(ppace[i]->access_req,
732 ppace[i]->type,
733 &fattr->cf_mode,
734 &group_mask);
735 if (compare_sids(&(ppace[i]->sid), &sid_everyone) == 0)
736 access_flags_to_mode(ppace[i]->access_req,
737 ppace[i]->type,
738 &fattr->cf_mode,
739 &other_mask);
740 if (compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)
741 access_flags_to_mode(ppace[i]->access_req,
742 ppace[i]->type,
743 &fattr->cf_mode,
744 &other_mask);
747 /* memcpy((void *)(&(cifscred->aces[i])),
748 (void *)ppace[i],
749 sizeof(struct cifs_ace)); */
751 acl_base = (char *)ppace[i];
752 acl_size = le16_to_cpu(ppace[i]->size);
755 kfree(ppace);
758 return;
762 static int set_chmod_dacl(struct cifs_acl *pndacl, struct cifs_sid *pownersid,
763 struct cifs_sid *pgrpsid, __u64 nmode)
765 u16 size = 0;
766 struct cifs_acl *pnndacl;
768 pnndacl = (struct cifs_acl *)((char *)pndacl + sizeof(struct cifs_acl));
770 size += fill_ace_for_sid((struct cifs_ace *) ((char *)pnndacl + size),
771 pownersid, nmode, S_IRWXU);
772 size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
773 pgrpsid, nmode, S_IRWXG);
774 size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
775 &sid_everyone, nmode, S_IRWXO);
777 pndacl->size = cpu_to_le16(size + sizeof(struct cifs_acl));
778 pndacl->num_aces = cpu_to_le32(3);
780 return 0;
784 static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
786 /* BB need to add parm so we can store the SID BB */
788 /* validate that we do not go past end of ACL - sid must be at least 8
789 bytes long (assuming no sub-auths - e.g. the null SID */
790 if (end_of_acl < (char *)psid + 8) {
791 cERROR(1, "ACL too small to parse SID %p", psid);
792 return -EINVAL;
795 if (psid->num_subauth) {
796 #ifdef CONFIG_CIFS_DEBUG2
797 int i;
798 cFYI(1, "SID revision %d num_auth %d",
799 psid->revision, psid->num_subauth);
801 for (i = 0; i < psid->num_subauth; i++) {
802 cFYI(1, "SID sub_auth[%d]: 0x%x ", i,
803 le32_to_cpu(psid->sub_auth[i]));
806 /* BB add length check to make sure that we do not have huge
807 num auths and therefore go off the end */
808 cFYI(1, "RID 0x%x",
809 le32_to_cpu(psid->sub_auth[psid->num_subauth-1]));
810 #endif
813 return 0;
817 /* Convert CIFS ACL to POSIX form */
818 static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
819 struct cifs_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr)
821 int rc = 0;
822 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
823 struct cifs_acl *dacl_ptr; /* no need for SACL ptr */
824 char *end_of_acl = ((char *)pntsd) + acl_len;
825 __u32 dacloffset;
827 if (pntsd == NULL)
828 return -EIO;
830 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
831 le32_to_cpu(pntsd->osidoffset));
832 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
833 le32_to_cpu(pntsd->gsidoffset));
834 dacloffset = le32_to_cpu(pntsd->dacloffset);
835 dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
836 cFYI(DBG2, "revision %d type 0x%x ooffset 0x%x goffset 0x%x "
837 "sacloffset 0x%x dacloffset 0x%x",
838 pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
839 le32_to_cpu(pntsd->gsidoffset),
840 le32_to_cpu(pntsd->sacloffset), dacloffset);
841 /* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
842 rc = parse_sid(owner_sid_ptr, end_of_acl);
843 if (rc) {
844 cFYI(1, "%s: Error %d parsing Owner SID", __func__, rc);
845 return rc;
847 rc = sid_to_id(cifs_sb, owner_sid_ptr, fattr, SIDOWNER);
848 if (rc) {
849 cFYI(1, "%s: Error %d mapping Owner SID to uid", __func__, rc);
850 return rc;
853 rc = parse_sid(group_sid_ptr, end_of_acl);
854 if (rc) {
855 cFYI(1, "%s: Error %d mapping Owner SID to gid", __func__, rc);
856 return rc;
858 rc = sid_to_id(cifs_sb, group_sid_ptr, fattr, SIDGROUP);
859 if (rc) {
860 cFYI(1, "%s: Error %d mapping Group SID to gid", __func__, rc);
861 return rc;
864 if (dacloffset)
865 parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
866 group_sid_ptr, fattr);
867 else
868 cFYI(1, "no ACL"); /* BB grant all or default perms? */
870 /* cifscred->uid = owner_sid_ptr->rid;
871 cifscred->gid = group_sid_ptr->rid;
872 memcpy((void *)(&(cifscred->osid)), (void *)owner_sid_ptr,
873 sizeof(struct cifs_sid));
874 memcpy((void *)(&(cifscred->gsid)), (void *)group_sid_ptr,
875 sizeof(struct cifs_sid)); */
877 return rc;
881 /* Convert permission bits from mode to equivalent CIFS ACL */
882 static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
883 struct inode *inode, __u64 nmode)
885 int rc = 0;
886 __u32 dacloffset;
887 __u32 ndacloffset;
888 __u32 sidsoffset;
889 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
890 struct cifs_acl *dacl_ptr = NULL; /* no need for SACL ptr */
891 struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
893 if ((inode == NULL) || (pntsd == NULL) || (pnntsd == NULL))
894 return -EIO;
896 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
897 le32_to_cpu(pntsd->osidoffset));
898 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
899 le32_to_cpu(pntsd->gsidoffset));
901 dacloffset = le32_to_cpu(pntsd->dacloffset);
902 dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
904 ndacloffset = sizeof(struct cifs_ntsd);
905 ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
906 ndacl_ptr->revision = dacl_ptr->revision;
907 ndacl_ptr->size = 0;
908 ndacl_ptr->num_aces = 0;
910 rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr, nmode);
912 sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
914 /* copy security descriptor control portion and owner and group sid */
915 copy_sec_desc(pntsd, pnntsd, sidsoffset);
917 return rc;
920 static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
921 __u16 fid, u32 *pacllen)
923 struct cifs_ntsd *pntsd = NULL;
924 int xid, rc;
925 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
927 if (IS_ERR(tlink))
928 return ERR_CAST(tlink);
930 xid = GetXid();
931 rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
932 FreeXid(xid);
934 cifs_put_tlink(tlink);
936 cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
937 if (rc)
938 return ERR_PTR(rc);
939 return pntsd;
942 static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
943 const char *path, u32 *pacllen)
945 struct cifs_ntsd *pntsd = NULL;
946 int oplock = 0;
947 int xid, rc;
948 __u16 fid;
949 struct cifs_tcon *tcon;
950 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
952 if (IS_ERR(tlink))
953 return ERR_CAST(tlink);
955 tcon = tlink_tcon(tlink);
956 xid = GetXid();
958 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL, 0,
959 &fid, &oplock, NULL, cifs_sb->local_nls,
960 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
961 if (!rc) {
962 rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
963 CIFSSMBClose(xid, tcon, fid);
966 cifs_put_tlink(tlink);
967 FreeXid(xid);
969 cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
970 if (rc)
971 return ERR_PTR(rc);
972 return pntsd;
975 /* Retrieve an ACL from the server */
976 struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
977 struct inode *inode, const char *path,
978 u32 *pacllen)
980 struct cifs_ntsd *pntsd = NULL;
981 struct cifsFileInfo *open_file = NULL;
983 if (inode)
984 open_file = find_readable_file(CIFS_I(inode), true);
985 if (!open_file)
986 return get_cifs_acl_by_path(cifs_sb, path, pacllen);
988 pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->netfid, pacllen);
989 cifsFileInfo_put(open_file);
990 return pntsd;
993 static int set_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, __u16 fid,
994 struct cifs_ntsd *pnntsd, u32 acllen)
996 int xid, rc;
997 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
999 if (IS_ERR(tlink))
1000 return PTR_ERR(tlink);
1002 xid = GetXid();
1003 rc = CIFSSMBSetCIFSACL(xid, tlink_tcon(tlink), fid, pnntsd, acllen);
1004 FreeXid(xid);
1005 cifs_put_tlink(tlink);
1007 cFYI(DBG2, "SetCIFSACL rc = %d", rc);
1008 return rc;
1011 static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path,
1012 struct cifs_ntsd *pnntsd, u32 acllen)
1014 int oplock = 0;
1015 int xid, rc;
1016 __u16 fid;
1017 struct cifs_tcon *tcon;
1018 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1020 if (IS_ERR(tlink))
1021 return PTR_ERR(tlink);
1023 tcon = tlink_tcon(tlink);
1024 xid = GetXid();
1026 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, WRITE_DAC, 0,
1027 &fid, &oplock, NULL, cifs_sb->local_nls,
1028 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1029 if (rc) {
1030 cERROR(1, "Unable to open file to set ACL");
1031 goto out;
1034 rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen);
1035 cFYI(DBG2, "SetCIFSACL rc = %d", rc);
1037 CIFSSMBClose(xid, tcon, fid);
1038 out:
1039 FreeXid(xid);
1040 cifs_put_tlink(tlink);
1041 return rc;
1044 /* Set an ACL on the server */
1045 int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
1046 struct inode *inode, const char *path)
1048 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1049 struct cifsFileInfo *open_file;
1050 int rc;
1052 cFYI(DBG2, "set ACL for %s from mode 0x%x", path, inode->i_mode);
1054 open_file = find_readable_file(CIFS_I(inode), true);
1055 if (!open_file)
1056 return set_cifs_acl_by_path(cifs_sb, path, pnntsd, acllen);
1058 rc = set_cifs_acl_by_fid(cifs_sb, open_file->netfid, pnntsd, acllen);
1059 cifsFileInfo_put(open_file);
1060 return rc;
1063 /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
1065 cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
1066 struct inode *inode, const char *path, const __u16 *pfid)
1068 struct cifs_ntsd *pntsd = NULL;
1069 u32 acllen = 0;
1070 int rc = 0;
1072 cFYI(DBG2, "converting ACL to mode for %s", path);
1074 if (pfid)
1075 pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen);
1076 else
1077 pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen);
1079 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
1080 if (IS_ERR(pntsd)) {
1081 rc = PTR_ERR(pntsd);
1082 cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1083 } else {
1084 rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr);
1085 kfree(pntsd);
1086 if (rc)
1087 cERROR(1, "parse sec desc failed rc = %d", rc);
1090 return rc;
1093 /* Convert mode bits to an ACL so we can update the ACL on the server */
1094 int mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode)
1096 int rc = 0;
1097 __u32 secdesclen = 0;
1098 struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
1099 struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
1101 cFYI(DBG2, "set ACL from mode for %s", path);
1103 /* Get the security descriptor */
1104 pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen);
1106 /* Add three ACEs for owner, group, everyone getting rid of
1107 other ACEs as chmod disables ACEs and set the security descriptor */
1109 if (IS_ERR(pntsd)) {
1110 rc = PTR_ERR(pntsd);
1111 cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1112 } else {
1113 /* allocate memory for the smb header,
1114 set security descriptor request security descriptor
1115 parameters, and secuirty descriptor itself */
1117 secdesclen = secdesclen < DEFSECDESCLEN ?
1118 DEFSECDESCLEN : secdesclen;
1119 pnntsd = kmalloc(secdesclen, GFP_KERNEL);
1120 if (!pnntsd) {
1121 cERROR(1, "Unable to allocate security descriptor");
1122 kfree(pntsd);
1123 return -ENOMEM;
1126 rc = build_sec_desc(pntsd, pnntsd, inode, nmode);
1128 cFYI(DBG2, "build_sec_desc rc: %d", rc);
1130 if (!rc) {
1131 /* Set the security descriptor */
1132 rc = set_cifs_acl(pnntsd, secdesclen, inode, path);
1133 cFYI(DBG2, "set_cifs_acl rc: %d", rc);
1136 kfree(pnntsd);
1137 kfree(pntsd);
1140 return rc;