1 #include "ceph_debug.h"
5 #include <linux/xattr.h>
6 #include <linux/slab.h>
8 static bool ceph_is_valid_xattr(const char *name
)
10 return !strncmp(name
, "ceph.", 5) ||
11 !strncmp(name
, XATTR_SECURITY_PREFIX
,
12 XATTR_SECURITY_PREFIX_LEN
) ||
13 !strncmp(name
, XATTR_TRUSTED_PREFIX
, XATTR_TRUSTED_PREFIX_LEN
) ||
14 !strncmp(name
, XATTR_USER_PREFIX
, XATTR_USER_PREFIX_LEN
);
18 * These define virtual xattrs exposing the recursive directory
19 * statistics and layout metadata.
21 struct ceph_vxattr_cb
{
24 size_t (*getxattr_cb
)(struct ceph_inode_info
*ci
, char *val
,
30 static size_t ceph_vxattrcb_entries(struct ceph_inode_info
*ci
, char *val
,
33 return snprintf(val
, size
, "%lld", ci
->i_files
+ ci
->i_subdirs
);
36 static size_t ceph_vxattrcb_files(struct ceph_inode_info
*ci
, char *val
,
39 return snprintf(val
, size
, "%lld", ci
->i_files
);
42 static size_t ceph_vxattrcb_subdirs(struct ceph_inode_info
*ci
, char *val
,
45 return snprintf(val
, size
, "%lld", ci
->i_subdirs
);
48 static size_t ceph_vxattrcb_rentries(struct ceph_inode_info
*ci
, char *val
,
51 return snprintf(val
, size
, "%lld", ci
->i_rfiles
+ ci
->i_rsubdirs
);
54 static size_t ceph_vxattrcb_rfiles(struct ceph_inode_info
*ci
, char *val
,
57 return snprintf(val
, size
, "%lld", ci
->i_rfiles
);
60 static size_t ceph_vxattrcb_rsubdirs(struct ceph_inode_info
*ci
, char *val
,
63 return snprintf(val
, size
, "%lld", ci
->i_rsubdirs
);
66 static size_t ceph_vxattrcb_rbytes(struct ceph_inode_info
*ci
, char *val
,
69 return snprintf(val
, size
, "%lld", ci
->i_rbytes
);
72 static size_t ceph_vxattrcb_rctime(struct ceph_inode_info
*ci
, char *val
,
75 return snprintf(val
, size
, "%ld.%ld", (long)ci
->i_rctime
.tv_sec
,
76 (long)ci
->i_rctime
.tv_nsec
);
79 static struct ceph_vxattr_cb ceph_dir_vxattrs
[] = {
80 { true, "ceph.dir.entries", ceph_vxattrcb_entries
},
81 { true, "ceph.dir.files", ceph_vxattrcb_files
},
82 { true, "ceph.dir.subdirs", ceph_vxattrcb_subdirs
},
83 { true, "ceph.dir.rentries", ceph_vxattrcb_rentries
},
84 { true, "ceph.dir.rfiles", ceph_vxattrcb_rfiles
},
85 { true, "ceph.dir.rsubdirs", ceph_vxattrcb_rsubdirs
},
86 { true, "ceph.dir.rbytes", ceph_vxattrcb_rbytes
},
87 { true, "ceph.dir.rctime", ceph_vxattrcb_rctime
},
93 static size_t ceph_vxattrcb_layout(struct ceph_inode_info
*ci
, char *val
,
98 ret
= snprintf(val
, size
,
99 "chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
100 (unsigned long long)ceph_file_layout_su(ci
->i_layout
),
101 (unsigned long long)ceph_file_layout_stripe_count(ci
->i_layout
),
102 (unsigned long long)ceph_file_layout_object_size(ci
->i_layout
));
103 if (ceph_file_layout_pg_preferred(ci
->i_layout
))
104 ret
+= snprintf(val
+ ret
, size
, "preferred_osd=%lld\n",
105 (unsigned long long)ceph_file_layout_pg_preferred(
110 static struct ceph_vxattr_cb ceph_file_vxattrs
[] = {
111 { true, "ceph.layout", ceph_vxattrcb_layout
},
115 static struct ceph_vxattr_cb
*ceph_inode_vxattrs(struct inode
*inode
)
117 if (S_ISDIR(inode
->i_mode
))
118 return ceph_dir_vxattrs
;
119 else if (S_ISREG(inode
->i_mode
))
120 return ceph_file_vxattrs
;
124 static struct ceph_vxattr_cb
*ceph_match_vxattr(struct ceph_vxattr_cb
*vxattr
,
128 if (strcmp(vxattr
->name
, name
) == 0)
131 } while (vxattr
->name
);
135 static int __set_xattr(struct ceph_inode_info
*ci
,
136 const char *name
, int name_len
,
137 const char *val
, int val_len
,
139 int should_free_name
, int should_free_val
,
140 struct ceph_inode_xattr
**newxattr
)
143 struct rb_node
*parent
= NULL
;
144 struct ceph_inode_xattr
*xattr
= NULL
;
148 p
= &ci
->i_xattrs
.index
.rb_node
;
151 xattr
= rb_entry(parent
, struct ceph_inode_xattr
, node
);
152 c
= strncmp(name
, xattr
->name
, min(name_len
, xattr
->name_len
));
158 if (name_len
== xattr
->name_len
)
160 else if (name_len
< xattr
->name_len
)
172 xattr
->name_len
= name_len
;
173 xattr
->should_free_name
= should_free_name
;
175 ci
->i_xattrs
.count
++;
176 dout("__set_xattr count=%d\n", ci
->i_xattrs
.count
);
180 if (xattr
->should_free_val
)
181 kfree((void *)xattr
->val
);
183 if (should_free_name
) {
187 ci
->i_xattrs
.names_size
-= xattr
->name_len
;
188 ci
->i_xattrs
.vals_size
-= xattr
->val_len
;
190 ci
->i_xattrs
.names_size
+= name_len
;
191 ci
->i_xattrs
.vals_size
+= val_len
;
197 xattr
->val_len
= val_len
;
198 xattr
->dirty
= dirty
;
199 xattr
->should_free_val
= (val
&& should_free_val
);
202 rb_link_node(&xattr
->node
, parent
, p
);
203 rb_insert_color(&xattr
->node
, &ci
->i_xattrs
.index
);
204 dout("__set_xattr_val p=%p\n", p
);
207 dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
208 ceph_vinop(&ci
->vfs_inode
), xattr
, name
, val_len
, val
);
213 static struct ceph_inode_xattr
*__get_xattr(struct ceph_inode_info
*ci
,
217 struct rb_node
*parent
= NULL
;
218 struct ceph_inode_xattr
*xattr
= NULL
;
221 p
= &ci
->i_xattrs
.index
.rb_node
;
224 xattr
= rb_entry(parent
, struct ceph_inode_xattr
, node
);
225 c
= strncmp(name
, xattr
->name
, xattr
->name_len
);
231 dout("__get_xattr %s: found %.*s\n", name
,
232 xattr
->val_len
, xattr
->val
);
237 dout("__get_xattr %s: not found\n", name
);
242 static void __free_xattr(struct ceph_inode_xattr
*xattr
)
246 if (xattr
->should_free_name
)
247 kfree((void *)xattr
->name
);
248 if (xattr
->should_free_val
)
249 kfree((void *)xattr
->val
);
254 static int __remove_xattr(struct ceph_inode_info
*ci
,
255 struct ceph_inode_xattr
*xattr
)
260 rb_erase(&xattr
->node
, &ci
->i_xattrs
.index
);
262 if (xattr
->should_free_name
)
263 kfree((void *)xattr
->name
);
264 if (xattr
->should_free_val
)
265 kfree((void *)xattr
->val
);
267 ci
->i_xattrs
.names_size
-= xattr
->name_len
;
268 ci
->i_xattrs
.vals_size
-= xattr
->val_len
;
269 ci
->i_xattrs
.count
--;
275 static int __remove_xattr_by_name(struct ceph_inode_info
*ci
,
279 struct ceph_inode_xattr
*xattr
;
282 p
= &ci
->i_xattrs
.index
.rb_node
;
283 xattr
= __get_xattr(ci
, name
);
284 err
= __remove_xattr(ci
, xattr
);
288 static char *__copy_xattr_names(struct ceph_inode_info
*ci
,
292 struct ceph_inode_xattr
*xattr
= NULL
;
294 p
= rb_first(&ci
->i_xattrs
.index
);
295 dout("__copy_xattr_names count=%d\n", ci
->i_xattrs
.count
);
298 xattr
= rb_entry(p
, struct ceph_inode_xattr
, node
);
299 memcpy(dest
, xattr
->name
, xattr
->name_len
);
300 dest
[xattr
->name_len
] = '\0';
302 dout("dest=%s %p (%s) (%d/%d)\n", dest
, xattr
, xattr
->name
,
303 xattr
->name_len
, ci
->i_xattrs
.names_size
);
305 dest
+= xattr
->name_len
+ 1;
312 void __ceph_destroy_xattrs(struct ceph_inode_info
*ci
)
314 struct rb_node
*p
, *tmp
;
315 struct ceph_inode_xattr
*xattr
= NULL
;
317 p
= rb_first(&ci
->i_xattrs
.index
);
319 dout("__ceph_destroy_xattrs p=%p\n", p
);
322 xattr
= rb_entry(p
, struct ceph_inode_xattr
, node
);
325 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p
,
326 xattr
->name_len
, xattr
->name
);
327 rb_erase(tmp
, &ci
->i_xattrs
.index
);
332 ci
->i_xattrs
.names_size
= 0;
333 ci
->i_xattrs
.vals_size
= 0;
334 ci
->i_xattrs
.index_version
= 0;
335 ci
->i_xattrs
.count
= 0;
336 ci
->i_xattrs
.index
= RB_ROOT
;
339 static int __build_xattrs(struct inode
*inode
)
345 const char *name
, *val
;
346 struct ceph_inode_info
*ci
= ceph_inode(inode
);
348 struct ceph_inode_xattr
**xattrs
= NULL
;
352 dout("__build_xattrs() len=%d\n",
353 ci
->i_xattrs
.blob
? (int)ci
->i_xattrs
.blob
->vec
.iov_len
: 0);
355 if (ci
->i_xattrs
.index_version
>= ci
->i_xattrs
.version
)
356 return 0; /* already built */
358 __ceph_destroy_xattrs(ci
);
361 /* updated internal xattr rb tree */
362 if (ci
->i_xattrs
.blob
&& ci
->i_xattrs
.blob
->vec
.iov_len
> 4) {
363 p
= ci
->i_xattrs
.blob
->vec
.iov_base
;
364 end
= p
+ ci
->i_xattrs
.blob
->vec
.iov_len
;
365 ceph_decode_32_safe(&p
, end
, numattr
, bad
);
366 xattr_version
= ci
->i_xattrs
.version
;
367 spin_unlock(&inode
->i_lock
);
369 xattrs
= kcalloc(numattr
, sizeof(struct ceph_xattr
*),
374 memset(xattrs
, 0, numattr
*sizeof(struct ceph_xattr
*));
375 for (i
= 0; i
< numattr
; i
++) {
376 xattrs
[i
] = kmalloc(sizeof(struct ceph_inode_xattr
),
382 spin_lock(&inode
->i_lock
);
383 if (ci
->i_xattrs
.version
!= xattr_version
) {
384 /* lost a race, retry */
385 for (i
= 0; i
< numattr
; i
++)
392 ceph_decode_32_safe(&p
, end
, len
, bad
);
396 ceph_decode_32_safe(&p
, end
, len
, bad
);
400 err
= __set_xattr(ci
, name
, namelen
, val
, len
,
401 0, 0, 0, &xattrs
[numattr
]);
408 ci
->i_xattrs
.index_version
= ci
->i_xattrs
.version
;
409 ci
->i_xattrs
.dirty
= false;
413 spin_lock(&inode
->i_lock
);
416 for (i
= 0; i
< numattr
; i
++)
420 ci
->i_xattrs
.names_size
= 0;
424 static int __get_required_blob_size(struct ceph_inode_info
*ci
, int name_size
,
428 * 4 bytes for the length, and additional 4 bytes per each xattr name,
429 * 4 bytes per each value
431 int size
= 4 + ci
->i_xattrs
.count
*(4 + 4) +
432 ci
->i_xattrs
.names_size
+
433 ci
->i_xattrs
.vals_size
;
434 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
435 ci
->i_xattrs
.count
, ci
->i_xattrs
.names_size
,
436 ci
->i_xattrs
.vals_size
);
439 size
+= 4 + 4 + name_size
+ val_size
;
445 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
446 * and swap into place.
448 void __ceph_build_xattrs_blob(struct ceph_inode_info
*ci
)
451 struct ceph_inode_xattr
*xattr
= NULL
;
454 dout("__build_xattrs_blob %p\n", &ci
->vfs_inode
);
455 if (ci
->i_xattrs
.dirty
) {
456 int need
= __get_required_blob_size(ci
, 0, 0);
458 BUG_ON(need
> ci
->i_xattrs
.prealloc_blob
->alloc_len
);
460 p
= rb_first(&ci
->i_xattrs
.index
);
461 dest
= ci
->i_xattrs
.prealloc_blob
->vec
.iov_base
;
463 ceph_encode_32(&dest
, ci
->i_xattrs
.count
);
465 xattr
= rb_entry(p
, struct ceph_inode_xattr
, node
);
467 ceph_encode_32(&dest
, xattr
->name_len
);
468 memcpy(dest
, xattr
->name
, xattr
->name_len
);
469 dest
+= xattr
->name_len
;
470 ceph_encode_32(&dest
, xattr
->val_len
);
471 memcpy(dest
, xattr
->val
, xattr
->val_len
);
472 dest
+= xattr
->val_len
;
477 /* adjust buffer len; it may be larger than we need */
478 ci
->i_xattrs
.prealloc_blob
->vec
.iov_len
=
479 dest
- ci
->i_xattrs
.prealloc_blob
->vec
.iov_base
;
481 if (ci
->i_xattrs
.blob
)
482 ceph_buffer_put(ci
->i_xattrs
.blob
);
483 ci
->i_xattrs
.blob
= ci
->i_xattrs
.prealloc_blob
;
484 ci
->i_xattrs
.prealloc_blob
= NULL
;
485 ci
->i_xattrs
.dirty
= false;
489 ssize_t
ceph_getxattr(struct dentry
*dentry
, const char *name
, void *value
,
492 struct inode
*inode
= dentry
->d_inode
;
493 struct ceph_inode_info
*ci
= ceph_inode(inode
);
494 struct ceph_vxattr_cb
*vxattrs
= ceph_inode_vxattrs(inode
);
496 struct ceph_inode_xattr
*xattr
;
497 struct ceph_vxattr_cb
*vxattr
= NULL
;
499 if (!ceph_is_valid_xattr(name
))
502 /* let's see if a virtual xattr was requested */
504 vxattr
= ceph_match_vxattr(vxattrs
, name
);
506 spin_lock(&inode
->i_lock
);
507 dout("getxattr %p ver=%lld index_ver=%lld\n", inode
,
508 ci
->i_xattrs
.version
, ci
->i_xattrs
.index_version
);
510 if (__ceph_caps_issued_mask(ci
, CEPH_CAP_XATTR_SHARED
, 1) &&
511 (ci
->i_xattrs
.index_version
>= ci
->i_xattrs
.version
)) {
514 spin_unlock(&inode
->i_lock
);
515 /* get xattrs from mds (if we don't already have them) */
516 err
= ceph_do_getattr(inode
, CEPH_STAT_CAP_XATTR
);
521 spin_lock(&inode
->i_lock
);
523 if (vxattr
&& vxattr
->readonly
) {
524 err
= vxattr
->getxattr_cb(ci
, value
, size
);
528 err
= __build_xattrs(inode
);
533 err
= -ENODATA
; /* == ENOATTR */
534 xattr
= __get_xattr(ci
, name
);
537 err
= vxattr
->getxattr_cb(ci
, value
, size
);
542 if (size
&& size
< xattr
->val_len
)
545 err
= xattr
->val_len
;
549 memcpy(value
, xattr
->val
, xattr
->val_len
);
552 spin_unlock(&inode
->i_lock
);
556 ssize_t
ceph_listxattr(struct dentry
*dentry
, char *names
, size_t size
)
558 struct inode
*inode
= dentry
->d_inode
;
559 struct ceph_inode_info
*ci
= ceph_inode(inode
);
560 struct ceph_vxattr_cb
*vxattrs
= ceph_inode_vxattrs(inode
);
567 spin_lock(&inode
->i_lock
);
568 dout("listxattr %p ver=%lld index_ver=%lld\n", inode
,
569 ci
->i_xattrs
.version
, ci
->i_xattrs
.index_version
);
571 if (__ceph_caps_issued_mask(ci
, CEPH_CAP_XATTR_SHARED
, 1) &&
572 (ci
->i_xattrs
.index_version
>= ci
->i_xattrs
.version
)) {
575 spin_unlock(&inode
->i_lock
);
576 err
= ceph_do_getattr(inode
, CEPH_STAT_CAP_XATTR
);
581 spin_lock(&inode
->i_lock
);
583 err
= __build_xattrs(inode
);
589 /* include virtual dir xattrs */
591 for (i
= 0; vxattrs
[i
].name
; i
++)
592 vir_namelen
+= strlen(vxattrs
[i
].name
) + 1;
593 /* adding 1 byte per each variable due to the null termination */
594 namelen
= vir_namelen
+ ci
->i_xattrs
.names_size
+ ci
->i_xattrs
.count
;
596 if (size
&& namelen
> size
)
603 names
= __copy_xattr_names(ci
, names
);
605 /* virtual xattr names, too */
607 for (i
= 0; vxattrs
[i
].name
; i
++) {
608 len
= sprintf(names
, "%s", vxattrs
[i
].name
);
613 spin_unlock(&inode
->i_lock
);
617 static int ceph_sync_setxattr(struct dentry
*dentry
, const char *name
,
618 const char *value
, size_t size
, int flags
)
620 struct ceph_client
*client
= ceph_sb_to_client(dentry
->d_sb
);
621 struct inode
*inode
= dentry
->d_inode
;
622 struct ceph_inode_info
*ci
= ceph_inode(inode
);
623 struct inode
*parent_inode
= dentry
->d_parent
->d_inode
;
624 struct ceph_mds_request
*req
;
625 struct ceph_mds_client
*mdsc
= &client
->mdsc
;
628 struct page
**pages
= NULL
;
631 /* copy value into some pages */
632 nr_pages
= calc_pages_for(0, size
);
634 pages
= kmalloc(sizeof(pages
[0])*nr_pages
, GFP_NOFS
);
638 for (i
= 0; i
< nr_pages
; i
++) {
639 pages
[i
] = __page_cache_alloc(GFP_NOFS
);
644 kaddr
= kmap(pages
[i
]);
645 memcpy(kaddr
, value
+ i
*PAGE_CACHE_SIZE
,
646 min(PAGE_CACHE_SIZE
, size
-i
*PAGE_CACHE_SIZE
));
650 dout("setxattr value=%.*s\n", (int)size
, value
);
653 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_SETXATTR
,
659 req
->r_inode
= igrab(inode
);
660 req
->r_inode_drop
= CEPH_CAP_XATTR_SHARED
;
662 req
->r_args
.setxattr
.flags
= cpu_to_le32(flags
);
663 req
->r_path2
= kstrdup(name
, GFP_NOFS
);
665 req
->r_pages
= pages
;
666 req
->r_num_pages
= nr_pages
;
667 req
->r_data_len
= size
;
669 dout("xattr.ver (before): %lld\n", ci
->i_xattrs
.version
);
670 err
= ceph_mdsc_do_request(mdsc
, parent_inode
, req
);
671 ceph_mdsc_put_request(req
);
672 dout("xattr.ver (after): %lld\n", ci
->i_xattrs
.version
);
676 for (i
= 0; i
< nr_pages
; i
++)
677 __free_page(pages
[i
]);
683 int ceph_setxattr(struct dentry
*dentry
, const char *name
,
684 const void *value
, size_t size
, int flags
)
686 struct inode
*inode
= dentry
->d_inode
;
687 struct ceph_inode_info
*ci
= ceph_inode(inode
);
688 struct ceph_vxattr_cb
*vxattrs
= ceph_inode_vxattrs(inode
);
690 int name_len
= strlen(name
);
692 char *newname
= NULL
;
694 struct ceph_inode_xattr
*xattr
= NULL
;
696 int required_blob_size
;
698 if (ceph_snap(inode
) != CEPH_NOSNAP
)
701 if (!ceph_is_valid_xattr(name
))
705 struct ceph_vxattr_cb
*vxattr
=
706 ceph_match_vxattr(vxattrs
, name
);
707 if (vxattr
&& vxattr
->readonly
)
711 /* preallocate memory for xattr name, value, index node */
713 newname
= kmalloc(name_len
+ 1, GFP_NOFS
);
716 memcpy(newname
, name
, name_len
+ 1);
719 newval
= kmalloc(val_len
+ 1, GFP_NOFS
);
722 memcpy(newval
, value
, val_len
);
723 newval
[val_len
] = '\0';
726 xattr
= kmalloc(sizeof(struct ceph_inode_xattr
), GFP_NOFS
);
730 spin_lock(&inode
->i_lock
);
732 issued
= __ceph_caps_issued(ci
, NULL
);
733 if (!(issued
& CEPH_CAP_XATTR_EXCL
))
735 __build_xattrs(inode
);
737 required_blob_size
= __get_required_blob_size(ci
, name_len
, val_len
);
739 if (!ci
->i_xattrs
.prealloc_blob
||
740 required_blob_size
> ci
->i_xattrs
.prealloc_blob
->alloc_len
) {
741 struct ceph_buffer
*blob
= NULL
;
743 spin_unlock(&inode
->i_lock
);
744 dout(" preaallocating new blob size=%d\n", required_blob_size
);
745 blob
= ceph_buffer_new(required_blob_size
, GFP_NOFS
);
748 spin_lock(&inode
->i_lock
);
749 if (ci
->i_xattrs
.prealloc_blob
)
750 ceph_buffer_put(ci
->i_xattrs
.prealloc_blob
);
751 ci
->i_xattrs
.prealloc_blob
= blob
;
755 dout("setxattr %p issued %s\n", inode
, ceph_cap_string(issued
));
756 err
= __set_xattr(ci
, newname
, name_len
, newval
,
757 val_len
, 1, 1, 1, &xattr
);
758 __ceph_mark_dirty_caps(ci
, CEPH_CAP_XATTR_EXCL
);
759 ci
->i_xattrs
.dirty
= true;
760 inode
->i_ctime
= CURRENT_TIME
;
761 spin_unlock(&inode
->i_lock
);
766 spin_unlock(&inode
->i_lock
);
767 err
= ceph_sync_setxattr(dentry
, name
, value
, size
, flags
);
775 static int ceph_send_removexattr(struct dentry
*dentry
, const char *name
)
777 struct ceph_client
*client
= ceph_sb_to_client(dentry
->d_sb
);
778 struct ceph_mds_client
*mdsc
= &client
->mdsc
;
779 struct inode
*inode
= dentry
->d_inode
;
780 struct inode
*parent_inode
= dentry
->d_parent
->d_inode
;
781 struct ceph_mds_request
*req
;
784 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_RMXATTR
,
788 req
->r_inode
= igrab(inode
);
789 req
->r_inode_drop
= CEPH_CAP_XATTR_SHARED
;
791 req
->r_path2
= kstrdup(name
, GFP_NOFS
);
793 err
= ceph_mdsc_do_request(mdsc
, parent_inode
, req
);
794 ceph_mdsc_put_request(req
);
798 int ceph_removexattr(struct dentry
*dentry
, const char *name
)
800 struct inode
*inode
= dentry
->d_inode
;
801 struct ceph_inode_info
*ci
= ceph_inode(inode
);
802 struct ceph_vxattr_cb
*vxattrs
= ceph_inode_vxattrs(inode
);
806 if (ceph_snap(inode
) != CEPH_NOSNAP
)
809 if (!ceph_is_valid_xattr(name
))
813 struct ceph_vxattr_cb
*vxattr
=
814 ceph_match_vxattr(vxattrs
, name
);
815 if (vxattr
&& vxattr
->readonly
)
819 spin_lock(&inode
->i_lock
);
820 __build_xattrs(inode
);
821 issued
= __ceph_caps_issued(ci
, NULL
);
822 dout("removexattr %p issued %s\n", inode
, ceph_cap_string(issued
));
824 if (!(issued
& CEPH_CAP_XATTR_EXCL
))
827 err
= __remove_xattr_by_name(ceph_inode(inode
), name
);
828 __ceph_mark_dirty_caps(ci
, CEPH_CAP_XATTR_EXCL
);
829 ci
->i_xattrs
.dirty
= true;
830 inode
->i_ctime
= CURRENT_TIME
;
832 spin_unlock(&inode
->i_lock
);
836 spin_unlock(&inode
->i_lock
);
837 err
= ceph_send_removexattr(dentry
, name
);