1 #include "ceph_debug.h"
5 #include <linux/xattr.h>
6 #include <linux/slab.h>
8 static bool ceph_is_valid_xattr(const char *name
)
10 return !strncmp(name
, "ceph.", 5) ||
11 !strncmp(name
, XATTR_SECURITY_PREFIX
,
12 XATTR_SECURITY_PREFIX_LEN
) ||
13 !strncmp(name
, XATTR_TRUSTED_PREFIX
, XATTR_TRUSTED_PREFIX_LEN
) ||
14 !strncmp(name
, XATTR_USER_PREFIX
, XATTR_USER_PREFIX_LEN
);
18 * These define virtual xattrs exposing the recursive directory
19 * statistics and layout metadata.
21 struct ceph_vxattr_cb
{
24 size_t (*getxattr_cb
)(struct ceph_inode_info
*ci
, char *val
,
30 static size_t ceph_vxattrcb_entries(struct ceph_inode_info
*ci
, char *val
,
33 return snprintf(val
, size
, "%lld", ci
->i_files
+ ci
->i_subdirs
);
36 static size_t ceph_vxattrcb_files(struct ceph_inode_info
*ci
, char *val
,
39 return snprintf(val
, size
, "%lld", ci
->i_files
);
42 static size_t ceph_vxattrcb_subdirs(struct ceph_inode_info
*ci
, char *val
,
45 return snprintf(val
, size
, "%lld", ci
->i_subdirs
);
48 static size_t ceph_vxattrcb_rentries(struct ceph_inode_info
*ci
, char *val
,
51 return snprintf(val
, size
, "%lld", ci
->i_rfiles
+ ci
->i_rsubdirs
);
54 static size_t ceph_vxattrcb_rfiles(struct ceph_inode_info
*ci
, char *val
,
57 return snprintf(val
, size
, "%lld", ci
->i_rfiles
);
60 static size_t ceph_vxattrcb_rsubdirs(struct ceph_inode_info
*ci
, char *val
,
63 return snprintf(val
, size
, "%lld", ci
->i_rsubdirs
);
66 static size_t ceph_vxattrcb_rbytes(struct ceph_inode_info
*ci
, char *val
,
69 return snprintf(val
, size
, "%lld", ci
->i_rbytes
);
72 static size_t ceph_vxattrcb_rctime(struct ceph_inode_info
*ci
, char *val
,
75 return snprintf(val
, size
, "%ld.%ld", (long)ci
->i_rctime
.tv_sec
,
76 (long)ci
->i_rctime
.tv_nsec
);
79 static struct ceph_vxattr_cb ceph_dir_vxattrs
[] = {
80 { true, "ceph.dir.entries", ceph_vxattrcb_entries
},
81 { true, "ceph.dir.files", ceph_vxattrcb_files
},
82 { true, "ceph.dir.subdirs", ceph_vxattrcb_subdirs
},
83 { true, "ceph.dir.rentries", ceph_vxattrcb_rentries
},
84 { true, "ceph.dir.rfiles", ceph_vxattrcb_rfiles
},
85 { true, "ceph.dir.rsubdirs", ceph_vxattrcb_rsubdirs
},
86 { true, "ceph.dir.rbytes", ceph_vxattrcb_rbytes
},
87 { true, "ceph.dir.rctime", ceph_vxattrcb_rctime
},
93 static size_t ceph_vxattrcb_layout(struct ceph_inode_info
*ci
, char *val
,
98 ret
= snprintf(val
, size
,
99 "chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
100 (unsigned long long)ceph_file_layout_su(ci
->i_layout
),
101 (unsigned long long)ceph_file_layout_stripe_count(ci
->i_layout
),
102 (unsigned long long)ceph_file_layout_object_size(ci
->i_layout
));
103 if (ceph_file_layout_pg_preferred(ci
->i_layout
))
104 ret
+= snprintf(val
+ ret
, size
, "preferred_osd=%lld\n",
105 (unsigned long long)ceph_file_layout_pg_preferred(
110 static struct ceph_vxattr_cb ceph_file_vxattrs
[] = {
111 { true, "ceph.layout", ceph_vxattrcb_layout
},
115 static struct ceph_vxattr_cb
*ceph_inode_vxattrs(struct inode
*inode
)
117 if (S_ISDIR(inode
->i_mode
))
118 return ceph_dir_vxattrs
;
119 else if (S_ISREG(inode
->i_mode
))
120 return ceph_file_vxattrs
;
124 static struct ceph_vxattr_cb
*ceph_match_vxattr(struct ceph_vxattr_cb
*vxattr
,
128 if (strcmp(vxattr
->name
, name
) == 0)
131 } while (vxattr
->name
);
135 static int __set_xattr(struct ceph_inode_info
*ci
,
136 const char *name
, int name_len
,
137 const char *val
, int val_len
,
139 int should_free_name
, int should_free_val
,
140 struct ceph_inode_xattr
**newxattr
)
143 struct rb_node
*parent
= NULL
;
144 struct ceph_inode_xattr
*xattr
= NULL
;
148 p
= &ci
->i_xattrs
.index
.rb_node
;
151 xattr
= rb_entry(parent
, struct ceph_inode_xattr
, node
);
152 c
= strncmp(name
, xattr
->name
, min(name_len
, xattr
->name_len
));
158 if (name_len
== xattr
->name_len
)
160 else if (name_len
< xattr
->name_len
)
172 xattr
->name_len
= name_len
;
173 xattr
->should_free_name
= should_free_name
;
175 ci
->i_xattrs
.count
++;
176 dout("__set_xattr count=%d\n", ci
->i_xattrs
.count
);
180 if (xattr
->should_free_val
)
181 kfree((void *)xattr
->val
);
183 if (should_free_name
) {
187 ci
->i_xattrs
.names_size
-= xattr
->name_len
;
188 ci
->i_xattrs
.vals_size
-= xattr
->val_len
;
190 ci
->i_xattrs
.names_size
+= name_len
;
191 ci
->i_xattrs
.vals_size
+= val_len
;
197 xattr
->val_len
= val_len
;
198 xattr
->dirty
= dirty
;
199 xattr
->should_free_val
= (val
&& should_free_val
);
202 rb_link_node(&xattr
->node
, parent
, p
);
203 rb_insert_color(&xattr
->node
, &ci
->i_xattrs
.index
);
204 dout("__set_xattr_val p=%p\n", p
);
207 dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
208 ceph_vinop(&ci
->vfs_inode
), xattr
, name
, val_len
, val
);
213 static struct ceph_inode_xattr
*__get_xattr(struct ceph_inode_info
*ci
,
217 struct rb_node
*parent
= NULL
;
218 struct ceph_inode_xattr
*xattr
= NULL
;
221 p
= &ci
->i_xattrs
.index
.rb_node
;
224 xattr
= rb_entry(parent
, struct ceph_inode_xattr
, node
);
225 c
= strncmp(name
, xattr
->name
, xattr
->name_len
);
231 dout("__get_xattr %s: found %.*s\n", name
,
232 xattr
->val_len
, xattr
->val
);
237 dout("__get_xattr %s: not found\n", name
);
242 static void __free_xattr(struct ceph_inode_xattr
*xattr
)
246 if (xattr
->should_free_name
)
247 kfree((void *)xattr
->name
);
248 if (xattr
->should_free_val
)
249 kfree((void *)xattr
->val
);
254 static int __remove_xattr(struct ceph_inode_info
*ci
,
255 struct ceph_inode_xattr
*xattr
)
260 rb_erase(&xattr
->node
, &ci
->i_xattrs
.index
);
262 if (xattr
->should_free_name
)
263 kfree((void *)xattr
->name
);
264 if (xattr
->should_free_val
)
265 kfree((void *)xattr
->val
);
267 ci
->i_xattrs
.names_size
-= xattr
->name_len
;
268 ci
->i_xattrs
.vals_size
-= xattr
->val_len
;
269 ci
->i_xattrs
.count
--;
275 static int __remove_xattr_by_name(struct ceph_inode_info
*ci
,
279 struct ceph_inode_xattr
*xattr
;
282 p
= &ci
->i_xattrs
.index
.rb_node
;
283 xattr
= __get_xattr(ci
, name
);
284 err
= __remove_xattr(ci
, xattr
);
288 static char *__copy_xattr_names(struct ceph_inode_info
*ci
,
292 struct ceph_inode_xattr
*xattr
= NULL
;
294 p
= rb_first(&ci
->i_xattrs
.index
);
295 dout("__copy_xattr_names count=%d\n", ci
->i_xattrs
.count
);
298 xattr
= rb_entry(p
, struct ceph_inode_xattr
, node
);
299 memcpy(dest
, xattr
->name
, xattr
->name_len
);
300 dest
[xattr
->name_len
] = '\0';
302 dout("dest=%s %p (%s) (%d/%d)\n", dest
, xattr
, xattr
->name
,
303 xattr
->name_len
, ci
->i_xattrs
.names_size
);
305 dest
+= xattr
->name_len
+ 1;
312 void __ceph_destroy_xattrs(struct ceph_inode_info
*ci
)
314 struct rb_node
*p
, *tmp
;
315 struct ceph_inode_xattr
*xattr
= NULL
;
317 p
= rb_first(&ci
->i_xattrs
.index
);
319 dout("__ceph_destroy_xattrs p=%p\n", p
);
322 xattr
= rb_entry(p
, struct ceph_inode_xattr
, node
);
325 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p
,
326 xattr
->name_len
, xattr
->name
);
327 rb_erase(tmp
, &ci
->i_xattrs
.index
);
332 ci
->i_xattrs
.names_size
= 0;
333 ci
->i_xattrs
.vals_size
= 0;
334 ci
->i_xattrs
.index_version
= 0;
335 ci
->i_xattrs
.count
= 0;
336 ci
->i_xattrs
.index
= RB_ROOT
;
339 static int __build_xattrs(struct inode
*inode
)
340 __releases(inode
->i_lock
)
341 __acquires(inode
->i_lock
)
347 const char *name
, *val
;
348 struct ceph_inode_info
*ci
= ceph_inode(inode
);
350 struct ceph_inode_xattr
**xattrs
= NULL
;
354 dout("__build_xattrs() len=%d\n",
355 ci
->i_xattrs
.blob
? (int)ci
->i_xattrs
.blob
->vec
.iov_len
: 0);
357 if (ci
->i_xattrs
.index_version
>= ci
->i_xattrs
.version
)
358 return 0; /* already built */
360 __ceph_destroy_xattrs(ci
);
363 /* updated internal xattr rb tree */
364 if (ci
->i_xattrs
.blob
&& ci
->i_xattrs
.blob
->vec
.iov_len
> 4) {
365 p
= ci
->i_xattrs
.blob
->vec
.iov_base
;
366 end
= p
+ ci
->i_xattrs
.blob
->vec
.iov_len
;
367 ceph_decode_32_safe(&p
, end
, numattr
, bad
);
368 xattr_version
= ci
->i_xattrs
.version
;
369 spin_unlock(&inode
->i_lock
);
371 xattrs
= kcalloc(numattr
, sizeof(struct ceph_xattr
*),
376 memset(xattrs
, 0, numattr
*sizeof(struct ceph_xattr
*));
377 for (i
= 0; i
< numattr
; i
++) {
378 xattrs
[i
] = kmalloc(sizeof(struct ceph_inode_xattr
),
384 spin_lock(&inode
->i_lock
);
385 if (ci
->i_xattrs
.version
!= xattr_version
) {
386 /* lost a race, retry */
387 for (i
= 0; i
< numattr
; i
++)
394 ceph_decode_32_safe(&p
, end
, len
, bad
);
398 ceph_decode_32_safe(&p
, end
, len
, bad
);
402 err
= __set_xattr(ci
, name
, namelen
, val
, len
,
403 0, 0, 0, &xattrs
[numattr
]);
410 ci
->i_xattrs
.index_version
= ci
->i_xattrs
.version
;
411 ci
->i_xattrs
.dirty
= false;
415 spin_lock(&inode
->i_lock
);
418 for (i
= 0; i
< numattr
; i
++)
422 ci
->i_xattrs
.names_size
= 0;
426 static int __get_required_blob_size(struct ceph_inode_info
*ci
, int name_size
,
430 * 4 bytes for the length, and additional 4 bytes per each xattr name,
431 * 4 bytes per each value
433 int size
= 4 + ci
->i_xattrs
.count
*(4 + 4) +
434 ci
->i_xattrs
.names_size
+
435 ci
->i_xattrs
.vals_size
;
436 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
437 ci
->i_xattrs
.count
, ci
->i_xattrs
.names_size
,
438 ci
->i_xattrs
.vals_size
);
441 size
+= 4 + 4 + name_size
+ val_size
;
447 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
448 * and swap into place.
450 void __ceph_build_xattrs_blob(struct ceph_inode_info
*ci
)
453 struct ceph_inode_xattr
*xattr
= NULL
;
456 dout("__build_xattrs_blob %p\n", &ci
->vfs_inode
);
457 if (ci
->i_xattrs
.dirty
) {
458 int need
= __get_required_blob_size(ci
, 0, 0);
460 BUG_ON(need
> ci
->i_xattrs
.prealloc_blob
->alloc_len
);
462 p
= rb_first(&ci
->i_xattrs
.index
);
463 dest
= ci
->i_xattrs
.prealloc_blob
->vec
.iov_base
;
465 ceph_encode_32(&dest
, ci
->i_xattrs
.count
);
467 xattr
= rb_entry(p
, struct ceph_inode_xattr
, node
);
469 ceph_encode_32(&dest
, xattr
->name_len
);
470 memcpy(dest
, xattr
->name
, xattr
->name_len
);
471 dest
+= xattr
->name_len
;
472 ceph_encode_32(&dest
, xattr
->val_len
);
473 memcpy(dest
, xattr
->val
, xattr
->val_len
);
474 dest
+= xattr
->val_len
;
479 /* adjust buffer len; it may be larger than we need */
480 ci
->i_xattrs
.prealloc_blob
->vec
.iov_len
=
481 dest
- ci
->i_xattrs
.prealloc_blob
->vec
.iov_base
;
483 if (ci
->i_xattrs
.blob
)
484 ceph_buffer_put(ci
->i_xattrs
.blob
);
485 ci
->i_xattrs
.blob
= ci
->i_xattrs
.prealloc_blob
;
486 ci
->i_xattrs
.prealloc_blob
= NULL
;
487 ci
->i_xattrs
.dirty
= false;
488 ci
->i_xattrs
.version
++;
492 ssize_t
ceph_getxattr(struct dentry
*dentry
, const char *name
, void *value
,
495 struct inode
*inode
= dentry
->d_inode
;
496 struct ceph_inode_info
*ci
= ceph_inode(inode
);
497 struct ceph_vxattr_cb
*vxattrs
= ceph_inode_vxattrs(inode
);
499 struct ceph_inode_xattr
*xattr
;
500 struct ceph_vxattr_cb
*vxattr
= NULL
;
502 if (!ceph_is_valid_xattr(name
))
505 /* let's see if a virtual xattr was requested */
507 vxattr
= ceph_match_vxattr(vxattrs
, name
);
509 spin_lock(&inode
->i_lock
);
510 dout("getxattr %p ver=%lld index_ver=%lld\n", inode
,
511 ci
->i_xattrs
.version
, ci
->i_xattrs
.index_version
);
513 if (__ceph_caps_issued_mask(ci
, CEPH_CAP_XATTR_SHARED
, 1) &&
514 (ci
->i_xattrs
.index_version
>= ci
->i_xattrs
.version
)) {
517 spin_unlock(&inode
->i_lock
);
518 /* get xattrs from mds (if we don't already have them) */
519 err
= ceph_do_getattr(inode
, CEPH_STAT_CAP_XATTR
);
524 spin_lock(&inode
->i_lock
);
526 if (vxattr
&& vxattr
->readonly
) {
527 err
= vxattr
->getxattr_cb(ci
, value
, size
);
531 err
= __build_xattrs(inode
);
536 err
= -ENODATA
; /* == ENOATTR */
537 xattr
= __get_xattr(ci
, name
);
540 err
= vxattr
->getxattr_cb(ci
, value
, size
);
545 if (size
&& size
< xattr
->val_len
)
548 err
= xattr
->val_len
;
552 memcpy(value
, xattr
->val
, xattr
->val_len
);
555 spin_unlock(&inode
->i_lock
);
559 ssize_t
ceph_listxattr(struct dentry
*dentry
, char *names
, size_t size
)
561 struct inode
*inode
= dentry
->d_inode
;
562 struct ceph_inode_info
*ci
= ceph_inode(inode
);
563 struct ceph_vxattr_cb
*vxattrs
= ceph_inode_vxattrs(inode
);
570 spin_lock(&inode
->i_lock
);
571 dout("listxattr %p ver=%lld index_ver=%lld\n", inode
,
572 ci
->i_xattrs
.version
, ci
->i_xattrs
.index_version
);
574 if (__ceph_caps_issued_mask(ci
, CEPH_CAP_XATTR_SHARED
, 1) &&
575 (ci
->i_xattrs
.index_version
>= ci
->i_xattrs
.version
)) {
578 spin_unlock(&inode
->i_lock
);
579 err
= ceph_do_getattr(inode
, CEPH_STAT_CAP_XATTR
);
584 spin_lock(&inode
->i_lock
);
586 err
= __build_xattrs(inode
);
592 /* include virtual dir xattrs */
594 for (i
= 0; vxattrs
[i
].name
; i
++)
595 vir_namelen
+= strlen(vxattrs
[i
].name
) + 1;
596 /* adding 1 byte per each variable due to the null termination */
597 namelen
= vir_namelen
+ ci
->i_xattrs
.names_size
+ ci
->i_xattrs
.count
;
599 if (size
&& namelen
> size
)
606 names
= __copy_xattr_names(ci
, names
);
608 /* virtual xattr names, too */
610 for (i
= 0; vxattrs
[i
].name
; i
++) {
611 len
= sprintf(names
, "%s", vxattrs
[i
].name
);
616 spin_unlock(&inode
->i_lock
);
620 static int ceph_sync_setxattr(struct dentry
*dentry
, const char *name
,
621 const char *value
, size_t size
, int flags
)
623 struct ceph_client
*client
= ceph_sb_to_client(dentry
->d_sb
);
624 struct inode
*inode
= dentry
->d_inode
;
625 struct ceph_inode_info
*ci
= ceph_inode(inode
);
626 struct inode
*parent_inode
= dentry
->d_parent
->d_inode
;
627 struct ceph_mds_request
*req
;
628 struct ceph_mds_client
*mdsc
= &client
->mdsc
;
631 struct page
**pages
= NULL
;
634 /* copy value into some pages */
635 nr_pages
= calc_pages_for(0, size
);
637 pages
= kmalloc(sizeof(pages
[0])*nr_pages
, GFP_NOFS
);
641 for (i
= 0; i
< nr_pages
; i
++) {
642 pages
[i
] = __page_cache_alloc(GFP_NOFS
);
647 kaddr
= kmap(pages
[i
]);
648 memcpy(kaddr
, value
+ i
*PAGE_CACHE_SIZE
,
649 min(PAGE_CACHE_SIZE
, size
-i
*PAGE_CACHE_SIZE
));
653 dout("setxattr value=%.*s\n", (int)size
, value
);
656 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_SETXATTR
,
662 req
->r_inode
= igrab(inode
);
663 req
->r_inode_drop
= CEPH_CAP_XATTR_SHARED
;
665 req
->r_args
.setxattr
.flags
= cpu_to_le32(flags
);
666 req
->r_path2
= kstrdup(name
, GFP_NOFS
);
668 req
->r_pages
= pages
;
669 req
->r_num_pages
= nr_pages
;
670 req
->r_data_len
= size
;
672 dout("xattr.ver (before): %lld\n", ci
->i_xattrs
.version
);
673 err
= ceph_mdsc_do_request(mdsc
, parent_inode
, req
);
674 ceph_mdsc_put_request(req
);
675 dout("xattr.ver (after): %lld\n", ci
->i_xattrs
.version
);
679 for (i
= 0; i
< nr_pages
; i
++)
680 __free_page(pages
[i
]);
686 int ceph_setxattr(struct dentry
*dentry
, const char *name
,
687 const void *value
, size_t size
, int flags
)
689 struct inode
*inode
= dentry
->d_inode
;
690 struct ceph_inode_info
*ci
= ceph_inode(inode
);
691 struct ceph_vxattr_cb
*vxattrs
= ceph_inode_vxattrs(inode
);
693 int name_len
= strlen(name
);
695 char *newname
= NULL
;
697 struct ceph_inode_xattr
*xattr
= NULL
;
699 int required_blob_size
;
701 if (ceph_snap(inode
) != CEPH_NOSNAP
)
704 if (!ceph_is_valid_xattr(name
))
708 struct ceph_vxattr_cb
*vxattr
=
709 ceph_match_vxattr(vxattrs
, name
);
710 if (vxattr
&& vxattr
->readonly
)
714 /* preallocate memory for xattr name, value, index node */
716 newname
= kmalloc(name_len
+ 1, GFP_NOFS
);
719 memcpy(newname
, name
, name_len
+ 1);
722 newval
= kmalloc(val_len
+ 1, GFP_NOFS
);
725 memcpy(newval
, value
, val_len
);
726 newval
[val_len
] = '\0';
729 xattr
= kmalloc(sizeof(struct ceph_inode_xattr
), GFP_NOFS
);
733 spin_lock(&inode
->i_lock
);
735 issued
= __ceph_caps_issued(ci
, NULL
);
736 if (!(issued
& CEPH_CAP_XATTR_EXCL
))
738 __build_xattrs(inode
);
740 required_blob_size
= __get_required_blob_size(ci
, name_len
, val_len
);
742 if (!ci
->i_xattrs
.prealloc_blob
||
743 required_blob_size
> ci
->i_xattrs
.prealloc_blob
->alloc_len
) {
744 struct ceph_buffer
*blob
= NULL
;
746 spin_unlock(&inode
->i_lock
);
747 dout(" preaallocating new blob size=%d\n", required_blob_size
);
748 blob
= ceph_buffer_new(required_blob_size
, GFP_NOFS
);
751 spin_lock(&inode
->i_lock
);
752 if (ci
->i_xattrs
.prealloc_blob
)
753 ceph_buffer_put(ci
->i_xattrs
.prealloc_blob
);
754 ci
->i_xattrs
.prealloc_blob
= blob
;
758 dout("setxattr %p issued %s\n", inode
, ceph_cap_string(issued
));
759 err
= __set_xattr(ci
, newname
, name_len
, newval
,
760 val_len
, 1, 1, 1, &xattr
);
761 __ceph_mark_dirty_caps(ci
, CEPH_CAP_XATTR_EXCL
);
762 ci
->i_xattrs
.dirty
= true;
763 inode
->i_ctime
= CURRENT_TIME
;
764 spin_unlock(&inode
->i_lock
);
769 spin_unlock(&inode
->i_lock
);
770 err
= ceph_sync_setxattr(dentry
, name
, value
, size
, flags
);
778 static int ceph_send_removexattr(struct dentry
*dentry
, const char *name
)
780 struct ceph_client
*client
= ceph_sb_to_client(dentry
->d_sb
);
781 struct ceph_mds_client
*mdsc
= &client
->mdsc
;
782 struct inode
*inode
= dentry
->d_inode
;
783 struct inode
*parent_inode
= dentry
->d_parent
->d_inode
;
784 struct ceph_mds_request
*req
;
787 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_RMXATTR
,
791 req
->r_inode
= igrab(inode
);
792 req
->r_inode_drop
= CEPH_CAP_XATTR_SHARED
;
794 req
->r_path2
= kstrdup(name
, GFP_NOFS
);
796 err
= ceph_mdsc_do_request(mdsc
, parent_inode
, req
);
797 ceph_mdsc_put_request(req
);
801 int ceph_removexattr(struct dentry
*dentry
, const char *name
)
803 struct inode
*inode
= dentry
->d_inode
;
804 struct ceph_inode_info
*ci
= ceph_inode(inode
);
805 struct ceph_vxattr_cb
*vxattrs
= ceph_inode_vxattrs(inode
);
809 if (ceph_snap(inode
) != CEPH_NOSNAP
)
812 if (!ceph_is_valid_xattr(name
))
816 struct ceph_vxattr_cb
*vxattr
=
817 ceph_match_vxattr(vxattrs
, name
);
818 if (vxattr
&& vxattr
->readonly
)
822 spin_lock(&inode
->i_lock
);
823 __build_xattrs(inode
);
824 issued
= __ceph_caps_issued(ci
, NULL
);
825 dout("removexattr %p issued %s\n", inode
, ceph_cap_string(issued
));
827 if (!(issued
& CEPH_CAP_XATTR_EXCL
))
830 err
= __remove_xattr_by_name(ceph_inode(inode
), name
);
831 __ceph_mark_dirty_caps(ci
, CEPH_CAP_XATTR_EXCL
);
832 ci
->i_xattrs
.dirty
= true;
833 inode
->i_ctime
= CURRENT_TIME
;
835 spin_unlock(&inode
->i_lock
);
839 spin_unlock(&inode
->i_lock
);
840 err
= ceph_send_removexattr(dentry
, name
);