cifs: consolidate SendReceive response checks
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / cifs / cifsacl.c
blobf3c6fb9942ac9f03651cc1b1f6e3b46f954b7af8
1 /*
2 * fs/cifs/cifsacl.c
4 * Copyright (C) International Business Machines Corp., 2007,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Contains the routines for mapping CIFS/NTFS ACLs
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/fs.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/keyctl.h>
28 #include <linux/key-type.h>
29 #include <keys/user-type.h>
30 #include "cifspdu.h"
31 #include "cifsglob.h"
32 #include "cifsacl.h"
33 #include "cifsproto.h"
34 #include "cifs_debug.h"
36 /* security id for everyone/world system group */
37 static const struct cifs_sid sid_everyone = {
38 1, 1, {0, 0, 0, 0, 0, 1}, {0} };
39 /* security id for Authenticated Users system group */
40 static const struct cifs_sid sid_authusers = {
41 1, 1, {0, 0, 0, 0, 0, 5}, {11} };
42 /* group users */
43 static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
45 const struct cred *root_cred;
47 static void
48 shrink_idmap_tree(struct rb_root *root, int nr_to_scan, int *nr_rem,
49 int *nr_del)
51 struct rb_node *node;
52 struct rb_node *tmp;
53 struct cifs_sid_id *psidid;
55 node = rb_first(root);
56 while (node) {
57 tmp = node;
58 node = rb_next(tmp);
59 psidid = rb_entry(tmp, struct cifs_sid_id, rbnode);
60 if (nr_to_scan == 0 || *nr_del == nr_to_scan)
61 ++(*nr_rem);
62 else {
63 if (time_after(jiffies, psidid->time + SID_MAP_EXPIRE)
64 && psidid->refcount == 0) {
65 rb_erase(tmp, root);
66 ++(*nr_del);
67 } else
68 ++(*nr_rem);
74 * Run idmap cache shrinker.
76 static int
77 cifs_idmap_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
79 int nr_del = 0;
80 int nr_rem = 0;
81 struct rb_root *root;
83 root = &uidtree;
84 spin_lock(&siduidlock);
85 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
86 spin_unlock(&siduidlock);
88 root = &gidtree;
89 spin_lock(&sidgidlock);
90 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
91 spin_unlock(&sidgidlock);
93 return nr_rem;
96 static struct shrinker cifs_shrinker = {
97 .shrink = cifs_idmap_shrinker,
98 .seeks = DEFAULT_SEEKS,
101 static int
102 cifs_idmap_key_instantiate(struct key *key, const void *data, size_t datalen)
104 char *payload;
106 payload = kmalloc(datalen, GFP_KERNEL);
107 if (!payload)
108 return -ENOMEM;
110 memcpy(payload, data, datalen);
111 key->payload.data = payload;
112 return 0;
115 static inline void
116 cifs_idmap_key_destroy(struct key *key)
118 kfree(key->payload.data);
121 struct key_type cifs_idmap_key_type = {
122 .name = "cifs.idmap",
123 .instantiate = cifs_idmap_key_instantiate,
124 .destroy = cifs_idmap_key_destroy,
125 .describe = user_describe,
126 .match = user_match,
129 static void
130 sid_to_str(struct cifs_sid *sidptr, char *sidstr)
132 int i;
133 unsigned long saval;
134 char *strptr;
136 strptr = sidstr;
138 sprintf(strptr, "%s", "S");
139 strptr = sidstr + strlen(sidstr);
141 sprintf(strptr, "-%d", sidptr->revision);
142 strptr = sidstr + strlen(sidstr);
144 for (i = 0; i < 6; ++i) {
145 if (sidptr->authority[i]) {
146 sprintf(strptr, "-%d", sidptr->authority[i]);
147 strptr = sidstr + strlen(sidstr);
151 for (i = 0; i < sidptr->num_subauth; ++i) {
152 saval = le32_to_cpu(sidptr->sub_auth[i]);
153 sprintf(strptr, "-%ld", saval);
154 strptr = sidstr + strlen(sidstr);
158 static void
159 id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr,
160 struct cifs_sid_id **psidid, char *typestr)
162 int rc;
163 char *strptr;
164 struct rb_node *node = root->rb_node;
165 struct rb_node *parent = NULL;
166 struct rb_node **linkto = &(root->rb_node);
167 struct cifs_sid_id *lsidid;
169 while (node) {
170 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
171 parent = node;
172 rc = compare_sids(sidptr, &((lsidid)->sid));
173 if (rc > 0) {
174 linkto = &(node->rb_left);
175 node = node->rb_left;
176 } else if (rc < 0) {
177 linkto = &(node->rb_right);
178 node = node->rb_right;
182 memcpy(&(*psidid)->sid, sidptr, sizeof(struct cifs_sid));
183 (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
184 (*psidid)->refcount = 0;
186 sprintf((*psidid)->sidstr, "%s", typestr);
187 strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
188 sid_to_str(&(*psidid)->sid, strptr);
190 clear_bit(SID_ID_PENDING, &(*psidid)->state);
191 clear_bit(SID_ID_MAPPED, &(*psidid)->state);
193 rb_link_node(&(*psidid)->rbnode, parent, linkto);
194 rb_insert_color(&(*psidid)->rbnode, root);
197 static struct cifs_sid_id *
198 id_rb_search(struct rb_root *root, struct cifs_sid *sidptr)
200 int rc;
201 struct rb_node *node = root->rb_node;
202 struct cifs_sid_id *lsidid;
204 while (node) {
205 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
206 rc = compare_sids(sidptr, &((lsidid)->sid));
207 if (rc > 0) {
208 node = node->rb_left;
209 } else if (rc < 0) {
210 node = node->rb_right;
211 } else /* node found */
212 return lsidid;
215 return NULL;
218 static int
219 sidid_pending_wait(void *unused)
221 schedule();
222 return signal_pending(current) ? -ERESTARTSYS : 0;
225 static int
226 sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
227 struct cifs_fattr *fattr, uint sidtype)
229 int rc;
230 unsigned long cid;
231 struct key *idkey;
232 const struct cred *saved_cred;
233 struct cifs_sid_id *psidid, *npsidid;
234 struct rb_root *cidtree;
235 spinlock_t *cidlock;
237 if (sidtype == SIDOWNER) {
238 cid = cifs_sb->mnt_uid; /* default uid, in case upcall fails */
239 cidlock = &siduidlock;
240 cidtree = &uidtree;
241 } else if (sidtype == SIDGROUP) {
242 cid = cifs_sb->mnt_gid; /* default gid, in case upcall fails */
243 cidlock = &sidgidlock;
244 cidtree = &gidtree;
245 } else
246 return -ENOENT;
248 spin_lock(cidlock);
249 psidid = id_rb_search(cidtree, psid);
251 if (!psidid) { /* node does not exist, allocate one & attempt adding */
252 spin_unlock(cidlock);
253 npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
254 if (!npsidid)
255 return -ENOMEM;
257 npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
258 if (!npsidid->sidstr) {
259 kfree(npsidid);
260 return -ENOMEM;
263 spin_lock(cidlock);
264 psidid = id_rb_search(cidtree, psid);
265 if (psidid) { /* node happened to get inserted meanwhile */
266 ++psidid->refcount;
267 spin_unlock(cidlock);
268 kfree(npsidid->sidstr);
269 kfree(npsidid);
270 } else {
271 psidid = npsidid;
272 id_rb_insert(cidtree, psid, &psidid,
273 sidtype == SIDOWNER ? "os:" : "gs:");
274 ++psidid->refcount;
275 spin_unlock(cidlock);
277 } else {
278 ++psidid->refcount;
279 spin_unlock(cidlock);
283 * If we are here, it is safe to access psidid and its fields
284 * since a reference was taken earlier while holding the spinlock.
285 * A reference on the node is put without holding the spinlock
286 * and it is OK to do so in this case, shrinker will not erase
287 * this node until all references are put and we do not access
288 * any fields of the node after a reference is put .
290 if (test_bit(SID_ID_MAPPED, &psidid->state)) {
291 cid = psidid->id;
292 psidid->time = jiffies; /* update ts for accessing */
293 goto sid_to_id_out;
296 if (time_after(psidid->time + SID_MAP_RETRY, jiffies))
297 goto sid_to_id_out;
299 if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
300 saved_cred = override_creds(root_cred);
301 idkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
302 if (IS_ERR(idkey))
303 cFYI(1, "%s: Can't map SID to an id", __func__);
304 else {
305 cid = *(unsigned long *)idkey->payload.value;
306 psidid->id = cid;
307 set_bit(SID_ID_MAPPED, &psidid->state);
308 key_put(idkey);
309 kfree(psidid->sidstr);
311 revert_creds(saved_cred);
312 psidid->time = jiffies; /* update ts for accessing */
313 clear_bit(SID_ID_PENDING, &psidid->state);
314 wake_up_bit(&psidid->state, SID_ID_PENDING);
315 } else {
316 rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
317 sidid_pending_wait, TASK_INTERRUPTIBLE);
318 if (rc) {
319 cFYI(1, "%s: sidid_pending_wait interrupted %d",
320 __func__, rc);
321 --psidid->refcount; /* decremented without spinlock */
322 return rc;
324 if (test_bit(SID_ID_MAPPED, &psidid->state))
325 cid = psidid->id;
328 sid_to_id_out:
329 --psidid->refcount; /* decremented without spinlock */
330 if (sidtype == SIDOWNER)
331 fattr->cf_uid = cid;
332 else
333 fattr->cf_gid = cid;
335 return 0;
339 init_cifs_idmap(void)
341 struct cred *cred;
342 struct key *keyring;
343 int ret;
345 cFYI(1, "Registering the %s key type\n", cifs_idmap_key_type.name);
347 /* create an override credential set with a special thread keyring in
348 * which requests are cached
350 * this is used to prevent malicious redirections from being installed
351 * with add_key().
353 cred = prepare_kernel_cred(NULL);
354 if (!cred)
355 return -ENOMEM;
357 keyring = key_alloc(&key_type_keyring, ".cifs_idmap", 0, 0, cred,
358 (KEY_POS_ALL & ~KEY_POS_SETATTR) |
359 KEY_USR_VIEW | KEY_USR_READ,
360 KEY_ALLOC_NOT_IN_QUOTA);
361 if (IS_ERR(keyring)) {
362 ret = PTR_ERR(keyring);
363 goto failed_put_cred;
366 ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
367 if (ret < 0)
368 goto failed_put_key;
370 ret = register_key_type(&cifs_idmap_key_type);
371 if (ret < 0)
372 goto failed_put_key;
374 /* instruct request_key() to use this special keyring as a cache for
375 * the results it looks up */
376 cred->thread_keyring = keyring;
377 cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
378 root_cred = cred;
380 spin_lock_init(&siduidlock);
381 uidtree = RB_ROOT;
382 spin_lock_init(&sidgidlock);
383 gidtree = RB_ROOT;
385 register_shrinker(&cifs_shrinker);
387 cFYI(1, "cifs idmap keyring: %d\n", key_serial(keyring));
388 return 0;
390 failed_put_key:
391 key_put(keyring);
392 failed_put_cred:
393 put_cred(cred);
394 return ret;
397 void
398 exit_cifs_idmap(void)
400 key_revoke(root_cred->thread_keyring);
401 unregister_key_type(&cifs_idmap_key_type);
402 put_cred(root_cred);
403 unregister_shrinker(&cifs_shrinker);
404 cFYI(1, "Unregistered %s key type\n", cifs_idmap_key_type.name);
407 void
408 cifs_destroy_idmaptrees(void)
410 struct rb_root *root;
411 struct rb_node *node;
413 root = &uidtree;
414 spin_lock(&siduidlock);
415 while ((node = rb_first(root)))
416 rb_erase(node, root);
417 spin_unlock(&siduidlock);
419 root = &gidtree;
420 spin_lock(&sidgidlock);
421 while ((node = rb_first(root)))
422 rb_erase(node, root);
423 spin_unlock(&sidgidlock);
426 /* if the two SIDs (roughly equivalent to a UUID for a user or group) are
427 the same returns 1, if they do not match returns 0 */
428 int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
430 int i;
431 int num_subauth, num_sat, num_saw;
433 if ((!ctsid) || (!cwsid))
434 return 1;
436 /* compare the revision */
437 if (ctsid->revision != cwsid->revision) {
438 if (ctsid->revision > cwsid->revision)
439 return 1;
440 else
441 return -1;
444 /* compare all of the six auth values */
445 for (i = 0; i < 6; ++i) {
446 if (ctsid->authority[i] != cwsid->authority[i]) {
447 if (ctsid->authority[i] > cwsid->authority[i])
448 return 1;
449 else
450 return -1;
454 /* compare all of the subauth values if any */
455 num_sat = ctsid->num_subauth;
456 num_saw = cwsid->num_subauth;
457 num_subauth = num_sat < num_saw ? num_sat : num_saw;
458 if (num_subauth) {
459 for (i = 0; i < num_subauth; ++i) {
460 if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
461 if (ctsid->sub_auth[i] > cwsid->sub_auth[i])
462 return 1;
463 else
464 return -1;
469 return 0; /* sids compare/match */
473 /* copy ntsd, owner sid, and group sid from a security descriptor to another */
474 static void copy_sec_desc(const struct cifs_ntsd *pntsd,
475 struct cifs_ntsd *pnntsd, __u32 sidsoffset)
477 int i;
479 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
480 struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
482 /* copy security descriptor control portion */
483 pnntsd->revision = pntsd->revision;
484 pnntsd->type = pntsd->type;
485 pnntsd->dacloffset = cpu_to_le32(sizeof(struct cifs_ntsd));
486 pnntsd->sacloffset = 0;
487 pnntsd->osidoffset = cpu_to_le32(sidsoffset);
488 pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct cifs_sid));
490 /* copy owner sid */
491 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
492 le32_to_cpu(pntsd->osidoffset));
493 nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
495 nowner_sid_ptr->revision = owner_sid_ptr->revision;
496 nowner_sid_ptr->num_subauth = owner_sid_ptr->num_subauth;
497 for (i = 0; i < 6; i++)
498 nowner_sid_ptr->authority[i] = owner_sid_ptr->authority[i];
499 for (i = 0; i < 5; i++)
500 nowner_sid_ptr->sub_auth[i] = owner_sid_ptr->sub_auth[i];
502 /* copy group sid */
503 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
504 le32_to_cpu(pntsd->gsidoffset));
505 ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
506 sizeof(struct cifs_sid));
508 ngroup_sid_ptr->revision = group_sid_ptr->revision;
509 ngroup_sid_ptr->num_subauth = group_sid_ptr->num_subauth;
510 for (i = 0; i < 6; i++)
511 ngroup_sid_ptr->authority[i] = group_sid_ptr->authority[i];
512 for (i = 0; i < 5; i++)
513 ngroup_sid_ptr->sub_auth[i] = group_sid_ptr->sub_auth[i];
515 return;
520 change posix mode to reflect permissions
521 pmode is the existing mode (we only want to overwrite part of this
522 bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
524 static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
525 umode_t *pbits_to_set)
527 __u32 flags = le32_to_cpu(ace_flags);
528 /* the order of ACEs is important. The canonical order is to begin with
529 DENY entries followed by ALLOW, otherwise an allow entry could be
530 encountered first, making the subsequent deny entry like "dead code"
531 which would be superflous since Windows stops when a match is made
532 for the operation you are trying to perform for your user */
534 /* For deny ACEs we change the mask so that subsequent allow access
535 control entries do not turn on the bits we are denying */
536 if (type == ACCESS_DENIED) {
537 if (flags & GENERIC_ALL)
538 *pbits_to_set &= ~S_IRWXUGO;
540 if ((flags & GENERIC_WRITE) ||
541 ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
542 *pbits_to_set &= ~S_IWUGO;
543 if ((flags & GENERIC_READ) ||
544 ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
545 *pbits_to_set &= ~S_IRUGO;
546 if ((flags & GENERIC_EXECUTE) ||
547 ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
548 *pbits_to_set &= ~S_IXUGO;
549 return;
550 } else if (type != ACCESS_ALLOWED) {
551 cERROR(1, "unknown access control type %d", type);
552 return;
554 /* else ACCESS_ALLOWED type */
556 if (flags & GENERIC_ALL) {
557 *pmode |= (S_IRWXUGO & (*pbits_to_set));
558 cFYI(DBG2, "all perms");
559 return;
561 if ((flags & GENERIC_WRITE) ||
562 ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
563 *pmode |= (S_IWUGO & (*pbits_to_set));
564 if ((flags & GENERIC_READ) ||
565 ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
566 *pmode |= (S_IRUGO & (*pbits_to_set));
567 if ((flags & GENERIC_EXECUTE) ||
568 ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
569 *pmode |= (S_IXUGO & (*pbits_to_set));
571 cFYI(DBG2, "access flags 0x%x mode now 0x%x", flags, *pmode);
572 return;
576 Generate access flags to reflect permissions mode is the existing mode.
577 This function is called for every ACE in the DACL whose SID matches
578 with either owner or group or everyone.
581 static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
582 __u32 *pace_flags)
584 /* reset access mask */
585 *pace_flags = 0x0;
587 /* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
588 mode &= bits_to_use;
590 /* check for R/W/X UGO since we do not know whose flags
591 is this but we have cleared all the bits sans RWX for
592 either user or group or other as per bits_to_use */
593 if (mode & S_IRUGO)
594 *pace_flags |= SET_FILE_READ_RIGHTS;
595 if (mode & S_IWUGO)
596 *pace_flags |= SET_FILE_WRITE_RIGHTS;
597 if (mode & S_IXUGO)
598 *pace_flags |= SET_FILE_EXEC_RIGHTS;
600 cFYI(DBG2, "mode: 0x%x, access flags now 0x%x", mode, *pace_flags);
601 return;
604 static __u16 fill_ace_for_sid(struct cifs_ace *pntace,
605 const struct cifs_sid *psid, __u64 nmode, umode_t bits)
607 int i;
608 __u16 size = 0;
609 __u32 access_req = 0;
611 pntace->type = ACCESS_ALLOWED;
612 pntace->flags = 0x0;
613 mode_to_access_flags(nmode, bits, &access_req);
614 if (!access_req)
615 access_req = SET_MINIMUM_RIGHTS;
616 pntace->access_req = cpu_to_le32(access_req);
618 pntace->sid.revision = psid->revision;
619 pntace->sid.num_subauth = psid->num_subauth;
620 for (i = 0; i < 6; i++)
621 pntace->sid.authority[i] = psid->authority[i];
622 for (i = 0; i < psid->num_subauth; i++)
623 pntace->sid.sub_auth[i] = psid->sub_auth[i];
625 size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4);
626 pntace->size = cpu_to_le16(size);
628 return size;
632 #ifdef CONFIG_CIFS_DEBUG2
633 static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
635 int num_subauth;
637 /* validate that we do not go past end of acl */
639 if (le16_to_cpu(pace->size) < 16) {
640 cERROR(1, "ACE too small %d", le16_to_cpu(pace->size));
641 return;
644 if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) {
645 cERROR(1, "ACL too small to parse ACE");
646 return;
649 num_subauth = pace->sid.num_subauth;
650 if (num_subauth) {
651 int i;
652 cFYI(1, "ACE revision %d num_auth %d type %d flags %d size %d",
653 pace->sid.revision, pace->sid.num_subauth, pace->type,
654 pace->flags, le16_to_cpu(pace->size));
655 for (i = 0; i < num_subauth; ++i) {
656 cFYI(1, "ACE sub_auth[%d]: 0x%x", i,
657 le32_to_cpu(pace->sid.sub_auth[i]));
660 /* BB add length check to make sure that we do not have huge
661 num auths and therefore go off the end */
664 return;
666 #endif
669 static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
670 struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
671 struct cifs_fattr *fattr)
673 int i;
674 int num_aces = 0;
675 int acl_size;
676 char *acl_base;
677 struct cifs_ace **ppace;
679 /* BB need to add parm so we can store the SID BB */
681 if (!pdacl) {
682 /* no DACL in the security descriptor, set
683 all the permissions for user/group/other */
684 fattr->cf_mode |= S_IRWXUGO;
685 return;
688 /* validate that we do not go past end of acl */
689 if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
690 cERROR(1, "ACL too small to parse DACL");
691 return;
694 cFYI(DBG2, "DACL revision %d size %d num aces %d",
695 le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
696 le32_to_cpu(pdacl->num_aces));
698 /* reset rwx permissions for user/group/other.
699 Also, if num_aces is 0 i.e. DACL has no ACEs,
700 user/group/other have no permissions */
701 fattr->cf_mode &= ~(S_IRWXUGO);
703 acl_base = (char *)pdacl;
704 acl_size = sizeof(struct cifs_acl);
706 num_aces = le32_to_cpu(pdacl->num_aces);
707 if (num_aces > 0) {
708 umode_t user_mask = S_IRWXU;
709 umode_t group_mask = S_IRWXG;
710 umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
712 ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
713 GFP_KERNEL);
714 if (!ppace) {
715 cERROR(1, "DACL memory allocation error");
716 return;
719 for (i = 0; i < num_aces; ++i) {
720 ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
721 #ifdef CONFIG_CIFS_DEBUG2
722 dump_ace(ppace[i], end_of_acl);
723 #endif
724 if (compare_sids(&(ppace[i]->sid), pownersid) == 0)
725 access_flags_to_mode(ppace[i]->access_req,
726 ppace[i]->type,
727 &fattr->cf_mode,
728 &user_mask);
729 if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0)
730 access_flags_to_mode(ppace[i]->access_req,
731 ppace[i]->type,
732 &fattr->cf_mode,
733 &group_mask);
734 if (compare_sids(&(ppace[i]->sid), &sid_everyone) == 0)
735 access_flags_to_mode(ppace[i]->access_req,
736 ppace[i]->type,
737 &fattr->cf_mode,
738 &other_mask);
739 if (compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)
740 access_flags_to_mode(ppace[i]->access_req,
741 ppace[i]->type,
742 &fattr->cf_mode,
743 &other_mask);
746 /* memcpy((void *)(&(cifscred->aces[i])),
747 (void *)ppace[i],
748 sizeof(struct cifs_ace)); */
750 acl_base = (char *)ppace[i];
751 acl_size = le16_to_cpu(ppace[i]->size);
754 kfree(ppace);
757 return;
761 static int set_chmod_dacl(struct cifs_acl *pndacl, struct cifs_sid *pownersid,
762 struct cifs_sid *pgrpsid, __u64 nmode)
764 u16 size = 0;
765 struct cifs_acl *pnndacl;
767 pnndacl = (struct cifs_acl *)((char *)pndacl + sizeof(struct cifs_acl));
769 size += fill_ace_for_sid((struct cifs_ace *) ((char *)pnndacl + size),
770 pownersid, nmode, S_IRWXU);
771 size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
772 pgrpsid, nmode, S_IRWXG);
773 size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
774 &sid_everyone, nmode, S_IRWXO);
776 pndacl->size = cpu_to_le16(size + sizeof(struct cifs_acl));
777 pndacl->num_aces = cpu_to_le32(3);
779 return 0;
783 static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
785 /* BB need to add parm so we can store the SID BB */
787 /* validate that we do not go past end of ACL - sid must be at least 8
788 bytes long (assuming no sub-auths - e.g. the null SID */
789 if (end_of_acl < (char *)psid + 8) {
790 cERROR(1, "ACL too small to parse SID %p", psid);
791 return -EINVAL;
794 if (psid->num_subauth) {
795 #ifdef CONFIG_CIFS_DEBUG2
796 int i;
797 cFYI(1, "SID revision %d num_auth %d",
798 psid->revision, psid->num_subauth);
800 for (i = 0; i < psid->num_subauth; i++) {
801 cFYI(1, "SID sub_auth[%d]: 0x%x ", i,
802 le32_to_cpu(psid->sub_auth[i]));
805 /* BB add length check to make sure that we do not have huge
806 num auths and therefore go off the end */
807 cFYI(1, "RID 0x%x",
808 le32_to_cpu(psid->sub_auth[psid->num_subauth-1]));
809 #endif
812 return 0;
816 /* Convert CIFS ACL to POSIX form */
817 static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
818 struct cifs_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr)
820 int rc = 0;
821 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
822 struct cifs_acl *dacl_ptr; /* no need for SACL ptr */
823 char *end_of_acl = ((char *)pntsd) + acl_len;
824 __u32 dacloffset;
826 if (pntsd == NULL)
827 return -EIO;
829 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
830 le32_to_cpu(pntsd->osidoffset));
831 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
832 le32_to_cpu(pntsd->gsidoffset));
833 dacloffset = le32_to_cpu(pntsd->dacloffset);
834 dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
835 cFYI(DBG2, "revision %d type 0x%x ooffset 0x%x goffset 0x%x "
836 "sacloffset 0x%x dacloffset 0x%x",
837 pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
838 le32_to_cpu(pntsd->gsidoffset),
839 le32_to_cpu(pntsd->sacloffset), dacloffset);
840 /* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
841 rc = parse_sid(owner_sid_ptr, end_of_acl);
842 if (rc) {
843 cFYI(1, "%s: Error %d parsing Owner SID", __func__, rc);
844 return rc;
846 rc = sid_to_id(cifs_sb, owner_sid_ptr, fattr, SIDOWNER);
847 if (rc) {
848 cFYI(1, "%s: Error %d mapping Owner SID to uid", __func__, rc);
849 return rc;
852 rc = parse_sid(group_sid_ptr, end_of_acl);
853 if (rc) {
854 cFYI(1, "%s: Error %d mapping Owner SID to gid", __func__, rc);
855 return rc;
857 rc = sid_to_id(cifs_sb, group_sid_ptr, fattr, SIDGROUP);
858 if (rc) {
859 cFYI(1, "%s: Error %d mapping Group SID to gid", __func__, rc);
860 return rc;
863 if (dacloffset)
864 parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
865 group_sid_ptr, fattr);
866 else
867 cFYI(1, "no ACL"); /* BB grant all or default perms? */
869 /* cifscred->uid = owner_sid_ptr->rid;
870 cifscred->gid = group_sid_ptr->rid;
871 memcpy((void *)(&(cifscred->osid)), (void *)owner_sid_ptr,
872 sizeof(struct cifs_sid));
873 memcpy((void *)(&(cifscred->gsid)), (void *)group_sid_ptr,
874 sizeof(struct cifs_sid)); */
876 return rc;
880 /* Convert permission bits from mode to equivalent CIFS ACL */
881 static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
882 struct inode *inode, __u64 nmode)
884 int rc = 0;
885 __u32 dacloffset;
886 __u32 ndacloffset;
887 __u32 sidsoffset;
888 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
889 struct cifs_acl *dacl_ptr = NULL; /* no need for SACL ptr */
890 struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
892 if ((inode == NULL) || (pntsd == NULL) || (pnntsd == NULL))
893 return -EIO;
895 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
896 le32_to_cpu(pntsd->osidoffset));
897 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
898 le32_to_cpu(pntsd->gsidoffset));
900 dacloffset = le32_to_cpu(pntsd->dacloffset);
901 dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
903 ndacloffset = sizeof(struct cifs_ntsd);
904 ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
905 ndacl_ptr->revision = dacl_ptr->revision;
906 ndacl_ptr->size = 0;
907 ndacl_ptr->num_aces = 0;
909 rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr, nmode);
911 sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
913 /* copy security descriptor control portion and owner and group sid */
914 copy_sec_desc(pntsd, pnntsd, sidsoffset);
916 return rc;
919 static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
920 __u16 fid, u32 *pacllen)
922 struct cifs_ntsd *pntsd = NULL;
923 int xid, rc;
924 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
926 if (IS_ERR(tlink))
927 return ERR_CAST(tlink);
929 xid = GetXid();
930 rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
931 FreeXid(xid);
933 cifs_put_tlink(tlink);
935 cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
936 if (rc)
937 return ERR_PTR(rc);
938 return pntsd;
941 static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
942 const char *path, u32 *pacllen)
944 struct cifs_ntsd *pntsd = NULL;
945 int oplock = 0;
946 int xid, rc;
947 __u16 fid;
948 struct cifsTconInfo *tcon;
949 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
951 if (IS_ERR(tlink))
952 return ERR_CAST(tlink);
954 tcon = tlink_tcon(tlink);
955 xid = GetXid();
957 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL, 0,
958 &fid, &oplock, NULL, cifs_sb->local_nls,
959 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
960 if (!rc) {
961 rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
962 CIFSSMBClose(xid, tcon, fid);
965 cifs_put_tlink(tlink);
966 FreeXid(xid);
968 cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
969 if (rc)
970 return ERR_PTR(rc);
971 return pntsd;
974 /* Retrieve an ACL from the server */
975 struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
976 struct inode *inode, const char *path,
977 u32 *pacllen)
979 struct cifs_ntsd *pntsd = NULL;
980 struct cifsFileInfo *open_file = NULL;
982 if (inode)
983 open_file = find_readable_file(CIFS_I(inode), true);
984 if (!open_file)
985 return get_cifs_acl_by_path(cifs_sb, path, pacllen);
987 pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->netfid, pacllen);
988 cifsFileInfo_put(open_file);
989 return pntsd;
992 static int set_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, __u16 fid,
993 struct cifs_ntsd *pnntsd, u32 acllen)
995 int xid, rc;
996 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
998 if (IS_ERR(tlink))
999 return PTR_ERR(tlink);
1001 xid = GetXid();
1002 rc = CIFSSMBSetCIFSACL(xid, tlink_tcon(tlink), fid, pnntsd, acllen);
1003 FreeXid(xid);
1004 cifs_put_tlink(tlink);
1006 cFYI(DBG2, "SetCIFSACL rc = %d", rc);
1007 return rc;
1010 static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path,
1011 struct cifs_ntsd *pnntsd, u32 acllen)
1013 int oplock = 0;
1014 int xid, rc;
1015 __u16 fid;
1016 struct cifsTconInfo *tcon;
1017 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1019 if (IS_ERR(tlink))
1020 return PTR_ERR(tlink);
1022 tcon = tlink_tcon(tlink);
1023 xid = GetXid();
1025 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, WRITE_DAC, 0,
1026 &fid, &oplock, NULL, cifs_sb->local_nls,
1027 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1028 if (rc) {
1029 cERROR(1, "Unable to open file to set ACL");
1030 goto out;
1033 rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen);
1034 cFYI(DBG2, "SetCIFSACL rc = %d", rc);
1036 CIFSSMBClose(xid, tcon, fid);
1037 out:
1038 FreeXid(xid);
1039 cifs_put_tlink(tlink);
1040 return rc;
1043 /* Set an ACL on the server */
1044 int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
1045 struct inode *inode, const char *path)
1047 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1048 struct cifsFileInfo *open_file;
1049 int rc;
1051 cFYI(DBG2, "set ACL for %s from mode 0x%x", path, inode->i_mode);
1053 open_file = find_readable_file(CIFS_I(inode), true);
1054 if (!open_file)
1055 return set_cifs_acl_by_path(cifs_sb, path, pnntsd, acllen);
1057 rc = set_cifs_acl_by_fid(cifs_sb, open_file->netfid, pnntsd, acllen);
1058 cifsFileInfo_put(open_file);
1059 return rc;
1062 /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
1064 cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
1065 struct inode *inode, const char *path, const __u16 *pfid)
1067 struct cifs_ntsd *pntsd = NULL;
1068 u32 acllen = 0;
1069 int rc = 0;
1071 cFYI(DBG2, "converting ACL to mode for %s", path);
1073 if (pfid)
1074 pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen);
1075 else
1076 pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen);
1078 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
1079 if (IS_ERR(pntsd)) {
1080 rc = PTR_ERR(pntsd);
1081 cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1082 } else {
1083 rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr);
1084 kfree(pntsd);
1085 if (rc)
1086 cERROR(1, "parse sec desc failed rc = %d", rc);
1089 return rc;
1092 /* Convert mode bits to an ACL so we can update the ACL on the server */
1093 int mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode)
1095 int rc = 0;
1096 __u32 secdesclen = 0;
1097 struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
1098 struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
1100 cFYI(DBG2, "set ACL from mode for %s", path);
1102 /* Get the security descriptor */
1103 pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen);
1105 /* Add three ACEs for owner, group, everyone getting rid of
1106 other ACEs as chmod disables ACEs and set the security descriptor */
1108 if (IS_ERR(pntsd)) {
1109 rc = PTR_ERR(pntsd);
1110 cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1111 } else {
1112 /* allocate memory for the smb header,
1113 set security descriptor request security descriptor
1114 parameters, and secuirty descriptor itself */
1116 secdesclen = secdesclen < DEFSECDESCLEN ?
1117 DEFSECDESCLEN : secdesclen;
1118 pnntsd = kmalloc(secdesclen, GFP_KERNEL);
1119 if (!pnntsd) {
1120 cERROR(1, "Unable to allocate security descriptor");
1121 kfree(pntsd);
1122 return -ENOMEM;
1125 rc = build_sec_desc(pntsd, pnntsd, inode, nmode);
1127 cFYI(DBG2, "build_sec_desc rc: %d", rc);
1129 if (!rc) {
1130 /* Set the security descriptor */
1131 rc = set_cifs_acl(pnntsd, secdesclen, inode, path);
1132 cFYI(DBG2, "set_cifs_acl rc: %d", rc);
1135 kfree(pnntsd);
1136 kfree(pntsd);
1139 return rc;