tuntap: switch to use rtnl_dereference()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / ceph / xattr.c
blob2c2ae5be99027af909d30a65cb73706a7c0d193a
1 #include <linux/ceph/ceph_debug.h>
3 #include "super.h"
4 #include "mds_client.h"
6 #include <linux/ceph/decode.h>
8 #include <linux/xattr.h>
9 #include <linux/slab.h>
11 #define XATTR_CEPH_PREFIX "ceph."
12 #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
14 static bool ceph_is_valid_xattr(const char *name)
16 return !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) ||
17 !strncmp(name, XATTR_SECURITY_PREFIX,
18 XATTR_SECURITY_PREFIX_LEN) ||
19 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
20 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
24 * These define virtual xattrs exposing the recursive directory
25 * statistics and layout metadata.
27 struct ceph_vxattr {
28 char *name;
29 size_t name_size; /* strlen(name) + 1 (for '\0') */
30 size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
31 size_t size);
32 bool readonly;
35 /* directories */
37 static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
38 size_t size)
40 return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs);
43 static size_t ceph_vxattrcb_dir_files(struct ceph_inode_info *ci, char *val,
44 size_t size)
46 return snprintf(val, size, "%lld", ci->i_files);
49 static size_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info *ci, char *val,
50 size_t size)
52 return snprintf(val, size, "%lld", ci->i_subdirs);
55 static size_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info *ci, char *val,
56 size_t size)
58 return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs);
61 static size_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info *ci, char *val,
62 size_t size)
64 return snprintf(val, size, "%lld", ci->i_rfiles);
67 static size_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info *ci, char *val,
68 size_t size)
70 return snprintf(val, size, "%lld", ci->i_rsubdirs);
73 static size_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
74 size_t size)
76 return snprintf(val, size, "%lld", ci->i_rbytes);
79 static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
80 size_t size)
82 return snprintf(val, size, "%ld.09%ld", (long)ci->i_rctime.tv_sec,
83 (long)ci->i_rctime.tv_nsec);
86 #define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
88 #define XATTR_NAME_CEPH(_type, _name) \
89 { \
90 .name = CEPH_XATTR_NAME(_type, _name), \
91 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
92 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
93 .readonly = true, \
96 static struct ceph_vxattr ceph_dir_vxattrs[] = {
97 XATTR_NAME_CEPH(dir, entries),
98 XATTR_NAME_CEPH(dir, files),
99 XATTR_NAME_CEPH(dir, subdirs),
100 XATTR_NAME_CEPH(dir, rentries),
101 XATTR_NAME_CEPH(dir, rfiles),
102 XATTR_NAME_CEPH(dir, rsubdirs),
103 XATTR_NAME_CEPH(dir, rbytes),
104 XATTR_NAME_CEPH(dir, rctime),
105 { 0 } /* Required table terminator */
107 static size_t ceph_dir_vxattrs_name_size; /* total size of all names */
109 /* files */
111 static size_t ceph_vxattrcb_file_layout(struct ceph_inode_info *ci, char *val,
112 size_t size)
114 int ret;
116 ret = snprintf(val, size,
117 "chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
118 (unsigned long long)ceph_file_layout_su(ci->i_layout),
119 (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
120 (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
121 return ret;
124 static struct ceph_vxattr ceph_file_vxattrs[] = {
125 XATTR_NAME_CEPH(file, layout),
126 /* The following extended attribute name is deprecated */
128 .name = XATTR_CEPH_PREFIX "layout",
129 .name_size = sizeof (XATTR_CEPH_PREFIX "layout"),
130 .getxattr_cb = ceph_vxattrcb_file_layout,
131 .readonly = true,
133 { 0 } /* Required table terminator */
135 static size_t ceph_file_vxattrs_name_size; /* total size of all names */
137 static struct ceph_vxattr *ceph_inode_vxattrs(struct inode *inode)
139 if (S_ISDIR(inode->i_mode))
140 return ceph_dir_vxattrs;
141 else if (S_ISREG(inode->i_mode))
142 return ceph_file_vxattrs;
143 return NULL;
146 static size_t ceph_vxattrs_name_size(struct ceph_vxattr *vxattrs)
148 if (vxattrs == ceph_dir_vxattrs)
149 return ceph_dir_vxattrs_name_size;
150 if (vxattrs == ceph_file_vxattrs)
151 return ceph_file_vxattrs_name_size;
152 BUG();
154 return 0;
158 * Compute the aggregate size (including terminating '\0') of all
159 * virtual extended attribute names in the given vxattr table.
161 static size_t __init vxattrs_name_size(struct ceph_vxattr *vxattrs)
163 struct ceph_vxattr *vxattr;
164 size_t size = 0;
166 for (vxattr = vxattrs; vxattr->name; vxattr++)
167 size += vxattr->name_size;
169 return size;
172 /* Routines called at initialization and exit time */
174 void __init ceph_xattr_init(void)
176 ceph_dir_vxattrs_name_size = vxattrs_name_size(ceph_dir_vxattrs);
177 ceph_file_vxattrs_name_size = vxattrs_name_size(ceph_file_vxattrs);
180 void ceph_xattr_exit(void)
182 ceph_dir_vxattrs_name_size = 0;
183 ceph_file_vxattrs_name_size = 0;
186 static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode,
187 const char *name)
189 struct ceph_vxattr *vxattr = ceph_inode_vxattrs(inode);
191 if (vxattr) {
192 while (vxattr->name) {
193 if (!strcmp(vxattr->name, name))
194 return vxattr;
195 vxattr++;
199 return NULL;
202 static int __set_xattr(struct ceph_inode_info *ci,
203 const char *name, int name_len,
204 const char *val, int val_len,
205 int dirty,
206 int should_free_name, int should_free_val,
207 struct ceph_inode_xattr **newxattr)
209 struct rb_node **p;
210 struct rb_node *parent = NULL;
211 struct ceph_inode_xattr *xattr = NULL;
212 int c;
213 int new = 0;
215 p = &ci->i_xattrs.index.rb_node;
216 while (*p) {
217 parent = *p;
218 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
219 c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
220 if (c < 0)
221 p = &(*p)->rb_left;
222 else if (c > 0)
223 p = &(*p)->rb_right;
224 else {
225 if (name_len == xattr->name_len)
226 break;
227 else if (name_len < xattr->name_len)
228 p = &(*p)->rb_left;
229 else
230 p = &(*p)->rb_right;
232 xattr = NULL;
235 if (!xattr) {
236 new = 1;
237 xattr = *newxattr;
238 xattr->name = name;
239 xattr->name_len = name_len;
240 xattr->should_free_name = should_free_name;
242 ci->i_xattrs.count++;
243 dout("__set_xattr count=%d\n", ci->i_xattrs.count);
244 } else {
245 kfree(*newxattr);
246 *newxattr = NULL;
247 if (xattr->should_free_val)
248 kfree((void *)xattr->val);
250 if (should_free_name) {
251 kfree((void *)name);
252 name = xattr->name;
254 ci->i_xattrs.names_size -= xattr->name_len;
255 ci->i_xattrs.vals_size -= xattr->val_len;
257 ci->i_xattrs.names_size += name_len;
258 ci->i_xattrs.vals_size += val_len;
259 if (val)
260 xattr->val = val;
261 else
262 xattr->val = "";
264 xattr->val_len = val_len;
265 xattr->dirty = dirty;
266 xattr->should_free_val = (val && should_free_val);
268 if (new) {
269 rb_link_node(&xattr->node, parent, p);
270 rb_insert_color(&xattr->node, &ci->i_xattrs.index);
271 dout("__set_xattr_val p=%p\n", p);
274 dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
275 ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val);
277 return 0;
280 static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
281 const char *name)
283 struct rb_node **p;
284 struct rb_node *parent = NULL;
285 struct ceph_inode_xattr *xattr = NULL;
286 int name_len = strlen(name);
287 int c;
289 p = &ci->i_xattrs.index.rb_node;
290 while (*p) {
291 parent = *p;
292 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
293 c = strncmp(name, xattr->name, xattr->name_len);
294 if (c == 0 && name_len > xattr->name_len)
295 c = 1;
296 if (c < 0)
297 p = &(*p)->rb_left;
298 else if (c > 0)
299 p = &(*p)->rb_right;
300 else {
301 dout("__get_xattr %s: found %.*s\n", name,
302 xattr->val_len, xattr->val);
303 return xattr;
307 dout("__get_xattr %s: not found\n", name);
309 return NULL;
312 static void __free_xattr(struct ceph_inode_xattr *xattr)
314 BUG_ON(!xattr);
316 if (xattr->should_free_name)
317 kfree((void *)xattr->name);
318 if (xattr->should_free_val)
319 kfree((void *)xattr->val);
321 kfree(xattr);
324 static int __remove_xattr(struct ceph_inode_info *ci,
325 struct ceph_inode_xattr *xattr)
327 if (!xattr)
328 return -EOPNOTSUPP;
330 rb_erase(&xattr->node, &ci->i_xattrs.index);
332 if (xattr->should_free_name)
333 kfree((void *)xattr->name);
334 if (xattr->should_free_val)
335 kfree((void *)xattr->val);
337 ci->i_xattrs.names_size -= xattr->name_len;
338 ci->i_xattrs.vals_size -= xattr->val_len;
339 ci->i_xattrs.count--;
340 kfree(xattr);
342 return 0;
345 static int __remove_xattr_by_name(struct ceph_inode_info *ci,
346 const char *name)
348 struct rb_node **p;
349 struct ceph_inode_xattr *xattr;
350 int err;
352 p = &ci->i_xattrs.index.rb_node;
353 xattr = __get_xattr(ci, name);
354 err = __remove_xattr(ci, xattr);
355 return err;
358 static char *__copy_xattr_names(struct ceph_inode_info *ci,
359 char *dest)
361 struct rb_node *p;
362 struct ceph_inode_xattr *xattr = NULL;
364 p = rb_first(&ci->i_xattrs.index);
365 dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
367 while (p) {
368 xattr = rb_entry(p, struct ceph_inode_xattr, node);
369 memcpy(dest, xattr->name, xattr->name_len);
370 dest[xattr->name_len] = '\0';
372 dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
373 xattr->name_len, ci->i_xattrs.names_size);
375 dest += xattr->name_len + 1;
376 p = rb_next(p);
379 return dest;
382 void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
384 struct rb_node *p, *tmp;
385 struct ceph_inode_xattr *xattr = NULL;
387 p = rb_first(&ci->i_xattrs.index);
389 dout("__ceph_destroy_xattrs p=%p\n", p);
391 while (p) {
392 xattr = rb_entry(p, struct ceph_inode_xattr, node);
393 tmp = p;
394 p = rb_next(tmp);
395 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
396 xattr->name_len, xattr->name);
397 rb_erase(tmp, &ci->i_xattrs.index);
399 __free_xattr(xattr);
402 ci->i_xattrs.names_size = 0;
403 ci->i_xattrs.vals_size = 0;
404 ci->i_xattrs.index_version = 0;
405 ci->i_xattrs.count = 0;
406 ci->i_xattrs.index = RB_ROOT;
409 static int __build_xattrs(struct inode *inode)
410 __releases(ci->i_ceph_lock)
411 __acquires(ci->i_ceph_lock)
413 u32 namelen;
414 u32 numattr = 0;
415 void *p, *end;
416 u32 len;
417 const char *name, *val;
418 struct ceph_inode_info *ci = ceph_inode(inode);
419 int xattr_version;
420 struct ceph_inode_xattr **xattrs = NULL;
421 int err = 0;
422 int i;
424 dout("__build_xattrs() len=%d\n",
425 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
427 if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
428 return 0; /* already built */
430 __ceph_destroy_xattrs(ci);
432 start:
433 /* updated internal xattr rb tree */
434 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
435 p = ci->i_xattrs.blob->vec.iov_base;
436 end = p + ci->i_xattrs.blob->vec.iov_len;
437 ceph_decode_32_safe(&p, end, numattr, bad);
438 xattr_version = ci->i_xattrs.version;
439 spin_unlock(&ci->i_ceph_lock);
441 xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
442 GFP_NOFS);
443 err = -ENOMEM;
444 if (!xattrs)
445 goto bad_lock;
446 memset(xattrs, 0, numattr*sizeof(struct ceph_xattr *));
447 for (i = 0; i < numattr; i++) {
448 xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
449 GFP_NOFS);
450 if (!xattrs[i])
451 goto bad_lock;
454 spin_lock(&ci->i_ceph_lock);
455 if (ci->i_xattrs.version != xattr_version) {
456 /* lost a race, retry */
457 for (i = 0; i < numattr; i++)
458 kfree(xattrs[i]);
459 kfree(xattrs);
460 xattrs = NULL;
461 goto start;
463 err = -EIO;
464 while (numattr--) {
465 ceph_decode_32_safe(&p, end, len, bad);
466 namelen = len;
467 name = p;
468 p += len;
469 ceph_decode_32_safe(&p, end, len, bad);
470 val = p;
471 p += len;
473 err = __set_xattr(ci, name, namelen, val, len,
474 0, 0, 0, &xattrs[numattr]);
476 if (err < 0)
477 goto bad;
479 kfree(xattrs);
481 ci->i_xattrs.index_version = ci->i_xattrs.version;
482 ci->i_xattrs.dirty = false;
484 return err;
485 bad_lock:
486 spin_lock(&ci->i_ceph_lock);
487 bad:
488 if (xattrs) {
489 for (i = 0; i < numattr; i++)
490 kfree(xattrs[i]);
491 kfree(xattrs);
493 ci->i_xattrs.names_size = 0;
494 return err;
497 static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
498 int val_size)
501 * 4 bytes for the length, and additional 4 bytes per each xattr name,
502 * 4 bytes per each value
504 int size = 4 + ci->i_xattrs.count*(4 + 4) +
505 ci->i_xattrs.names_size +
506 ci->i_xattrs.vals_size;
507 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
508 ci->i_xattrs.count, ci->i_xattrs.names_size,
509 ci->i_xattrs.vals_size);
511 if (name_size)
512 size += 4 + 4 + name_size + val_size;
514 return size;
518 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
519 * and swap into place.
521 void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
523 struct rb_node *p;
524 struct ceph_inode_xattr *xattr = NULL;
525 void *dest;
527 dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
528 if (ci->i_xattrs.dirty) {
529 int need = __get_required_blob_size(ci, 0, 0);
531 BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
533 p = rb_first(&ci->i_xattrs.index);
534 dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
536 ceph_encode_32(&dest, ci->i_xattrs.count);
537 while (p) {
538 xattr = rb_entry(p, struct ceph_inode_xattr, node);
540 ceph_encode_32(&dest, xattr->name_len);
541 memcpy(dest, xattr->name, xattr->name_len);
542 dest += xattr->name_len;
543 ceph_encode_32(&dest, xattr->val_len);
544 memcpy(dest, xattr->val, xattr->val_len);
545 dest += xattr->val_len;
547 p = rb_next(p);
550 /* adjust buffer len; it may be larger than we need */
551 ci->i_xattrs.prealloc_blob->vec.iov_len =
552 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
554 if (ci->i_xattrs.blob)
555 ceph_buffer_put(ci->i_xattrs.blob);
556 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
557 ci->i_xattrs.prealloc_blob = NULL;
558 ci->i_xattrs.dirty = false;
559 ci->i_xattrs.version++;
563 ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
564 size_t size)
566 struct inode *inode = dentry->d_inode;
567 struct ceph_inode_info *ci = ceph_inode(inode);
568 int err;
569 struct ceph_inode_xattr *xattr;
570 struct ceph_vxattr *vxattr = NULL;
572 if (!ceph_is_valid_xattr(name))
573 return -ENODATA;
575 /* let's see if a virtual xattr was requested */
576 vxattr = ceph_match_vxattr(inode, name);
578 spin_lock(&ci->i_ceph_lock);
579 dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
580 ci->i_xattrs.version, ci->i_xattrs.index_version);
582 if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
583 (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
584 goto get_xattr;
585 } else {
586 spin_unlock(&ci->i_ceph_lock);
587 /* get xattrs from mds (if we don't already have them) */
588 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
589 if (err)
590 return err;
593 spin_lock(&ci->i_ceph_lock);
595 if (vxattr && vxattr->readonly) {
596 err = vxattr->getxattr_cb(ci, value, size);
597 goto out;
600 err = __build_xattrs(inode);
601 if (err < 0)
602 goto out;
604 get_xattr:
605 err = -ENODATA; /* == ENOATTR */
606 xattr = __get_xattr(ci, name);
607 if (!xattr) {
608 if (vxattr)
609 err = vxattr->getxattr_cb(ci, value, size);
610 goto out;
613 err = -ERANGE;
614 if (size && size < xattr->val_len)
615 goto out;
617 err = xattr->val_len;
618 if (size == 0)
619 goto out;
621 memcpy(value, xattr->val, xattr->val_len);
623 out:
624 spin_unlock(&ci->i_ceph_lock);
625 return err;
628 ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
630 struct inode *inode = dentry->d_inode;
631 struct ceph_inode_info *ci = ceph_inode(inode);
632 struct ceph_vxattr *vxattrs = ceph_inode_vxattrs(inode);
633 u32 vir_namelen = 0;
634 u32 namelen;
635 int err;
636 u32 len;
637 int i;
639 spin_lock(&ci->i_ceph_lock);
640 dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
641 ci->i_xattrs.version, ci->i_xattrs.index_version);
643 if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
644 (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
645 goto list_xattr;
646 } else {
647 spin_unlock(&ci->i_ceph_lock);
648 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
649 if (err)
650 return err;
653 spin_lock(&ci->i_ceph_lock);
655 err = __build_xattrs(inode);
656 if (err < 0)
657 goto out;
659 list_xattr:
661 * Start with virtual dir xattr names (if any) (including
662 * terminating '\0' characters for each).
664 vir_namelen = ceph_vxattrs_name_size(vxattrs);
666 /* adding 1 byte per each variable due to the null termination */
667 namelen = vir_namelen + ci->i_xattrs.names_size + ci->i_xattrs.count;
668 err = -ERANGE;
669 if (size && namelen > size)
670 goto out;
672 err = namelen;
673 if (size == 0)
674 goto out;
676 names = __copy_xattr_names(ci, names);
678 /* virtual xattr names, too */
679 if (vxattrs)
680 for (i = 0; vxattrs[i].name; i++) {
681 len = sprintf(names, "%s", vxattrs[i].name);
682 names += len + 1;
685 out:
686 spin_unlock(&ci->i_ceph_lock);
687 return err;
690 static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
691 const char *value, size_t size, int flags)
693 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
694 struct inode *inode = dentry->d_inode;
695 struct ceph_inode_info *ci = ceph_inode(inode);
696 struct inode *parent_inode;
697 struct ceph_mds_request *req;
698 struct ceph_mds_client *mdsc = fsc->mdsc;
699 int err;
700 int i, nr_pages;
701 struct page **pages = NULL;
702 void *kaddr;
704 /* copy value into some pages */
705 nr_pages = calc_pages_for(0, size);
706 if (nr_pages) {
707 pages = kmalloc(sizeof(pages[0])*nr_pages, GFP_NOFS);
708 if (!pages)
709 return -ENOMEM;
710 err = -ENOMEM;
711 for (i = 0; i < nr_pages; i++) {
712 pages[i] = __page_cache_alloc(GFP_NOFS);
713 if (!pages[i]) {
714 nr_pages = i;
715 goto out;
717 kaddr = kmap(pages[i]);
718 memcpy(kaddr, value + i*PAGE_CACHE_SIZE,
719 min(PAGE_CACHE_SIZE, size-i*PAGE_CACHE_SIZE));
723 dout("setxattr value=%.*s\n", (int)size, value);
725 /* do request */
726 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
727 USE_AUTH_MDS);
728 if (IS_ERR(req)) {
729 err = PTR_ERR(req);
730 goto out;
732 req->r_inode = inode;
733 ihold(inode);
734 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
735 req->r_num_caps = 1;
736 req->r_args.setxattr.flags = cpu_to_le32(flags);
737 req->r_path2 = kstrdup(name, GFP_NOFS);
739 req->r_pages = pages;
740 req->r_num_pages = nr_pages;
741 req->r_data_len = size;
743 dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
744 parent_inode = ceph_get_dentry_parent_inode(dentry);
745 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
746 iput(parent_inode);
747 ceph_mdsc_put_request(req);
748 dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
750 out:
751 if (pages) {
752 for (i = 0; i < nr_pages; i++)
753 __free_page(pages[i]);
754 kfree(pages);
756 return err;
759 int ceph_setxattr(struct dentry *dentry, const char *name,
760 const void *value, size_t size, int flags)
762 struct inode *inode = dentry->d_inode;
763 struct ceph_vxattr *vxattr;
764 struct ceph_inode_info *ci = ceph_inode(inode);
765 int issued;
766 int err;
767 int dirty;
768 int name_len = strlen(name);
769 int val_len = size;
770 char *newname = NULL;
771 char *newval = NULL;
772 struct ceph_inode_xattr *xattr = NULL;
773 int required_blob_size;
775 if (ceph_snap(inode) != CEPH_NOSNAP)
776 return -EROFS;
778 if (!ceph_is_valid_xattr(name))
779 return -EOPNOTSUPP;
781 vxattr = ceph_match_vxattr(inode, name);
782 if (vxattr && vxattr->readonly)
783 return -EOPNOTSUPP;
785 /* preallocate memory for xattr name, value, index node */
786 err = -ENOMEM;
787 newname = kmemdup(name, name_len + 1, GFP_NOFS);
788 if (!newname)
789 goto out;
791 if (val_len) {
792 newval = kmemdup(value, val_len, GFP_NOFS);
793 if (!newval)
794 goto out;
797 xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
798 if (!xattr)
799 goto out;
801 spin_lock(&ci->i_ceph_lock);
802 retry:
803 issued = __ceph_caps_issued(ci, NULL);
804 dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
805 if (!(issued & CEPH_CAP_XATTR_EXCL))
806 goto do_sync;
807 __build_xattrs(inode);
809 required_blob_size = __get_required_blob_size(ci, name_len, val_len);
811 if (!ci->i_xattrs.prealloc_blob ||
812 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
813 struct ceph_buffer *blob;
815 spin_unlock(&ci->i_ceph_lock);
816 dout(" preaallocating new blob size=%d\n", required_blob_size);
817 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
818 if (!blob)
819 goto out;
820 spin_lock(&ci->i_ceph_lock);
821 if (ci->i_xattrs.prealloc_blob)
822 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
823 ci->i_xattrs.prealloc_blob = blob;
824 goto retry;
827 err = __set_xattr(ci, newname, name_len, newval,
828 val_len, 1, 1, 1, &xattr);
830 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
831 ci->i_xattrs.dirty = true;
832 inode->i_ctime = CURRENT_TIME;
834 spin_unlock(&ci->i_ceph_lock);
835 if (dirty)
836 __mark_inode_dirty(inode, dirty);
837 return err;
839 do_sync:
840 spin_unlock(&ci->i_ceph_lock);
841 err = ceph_sync_setxattr(dentry, name, value, size, flags);
842 out:
843 kfree(newname);
844 kfree(newval);
845 kfree(xattr);
846 return err;
849 static int ceph_send_removexattr(struct dentry *dentry, const char *name)
851 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
852 struct ceph_mds_client *mdsc = fsc->mdsc;
853 struct inode *inode = dentry->d_inode;
854 struct inode *parent_inode;
855 struct ceph_mds_request *req;
856 int err;
858 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RMXATTR,
859 USE_AUTH_MDS);
860 if (IS_ERR(req))
861 return PTR_ERR(req);
862 req->r_inode = inode;
863 ihold(inode);
864 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
865 req->r_num_caps = 1;
866 req->r_path2 = kstrdup(name, GFP_NOFS);
868 parent_inode = ceph_get_dentry_parent_inode(dentry);
869 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
870 iput(parent_inode);
871 ceph_mdsc_put_request(req);
872 return err;
875 int ceph_removexattr(struct dentry *dentry, const char *name)
877 struct inode *inode = dentry->d_inode;
878 struct ceph_vxattr *vxattr;
879 struct ceph_inode_info *ci = ceph_inode(inode);
880 int issued;
881 int err;
882 int required_blob_size;
883 int dirty;
885 if (ceph_snap(inode) != CEPH_NOSNAP)
886 return -EROFS;
888 if (!ceph_is_valid_xattr(name))
889 return -EOPNOTSUPP;
891 vxattr = ceph_match_vxattr(inode, name);
892 if (vxattr && vxattr->readonly)
893 return -EOPNOTSUPP;
895 err = -ENOMEM;
896 spin_lock(&ci->i_ceph_lock);
897 retry:
898 issued = __ceph_caps_issued(ci, NULL);
899 dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
901 if (!(issued & CEPH_CAP_XATTR_EXCL))
902 goto do_sync;
903 __build_xattrs(inode);
905 required_blob_size = __get_required_blob_size(ci, 0, 0);
907 if (!ci->i_xattrs.prealloc_blob ||
908 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
909 struct ceph_buffer *blob;
911 spin_unlock(&ci->i_ceph_lock);
912 dout(" preaallocating new blob size=%d\n", required_blob_size);
913 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
914 if (!blob)
915 goto out;
916 spin_lock(&ci->i_ceph_lock);
917 if (ci->i_xattrs.prealloc_blob)
918 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
919 ci->i_xattrs.prealloc_blob = blob;
920 goto retry;
923 err = __remove_xattr_by_name(ceph_inode(inode), name);
925 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
926 ci->i_xattrs.dirty = true;
927 inode->i_ctime = CURRENT_TIME;
928 spin_unlock(&ci->i_ceph_lock);
929 if (dirty)
930 __mark_inode_dirty(inode, dirty);
931 return err;
932 do_sync:
933 spin_unlock(&ci->i_ceph_lock);
934 err = ceph_send_removexattr(dentry, name);
935 out:
936 return err;