1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Copyright (C) 2004, 2008 Oracle. All rights reserved.
9 * Lots of code in this file is copy from linux/fs/ext3/xattr.c.
10 * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public
14 * License version 2 as published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
22 #include <linux/capability.h>
24 #include <linux/types.h>
25 #include <linux/slab.h>
26 #include <linux/highmem.h>
27 #include <linux/pagemap.h>
28 #include <linux/uio.h>
29 #include <linux/sched.h>
30 #include <linux/splice.h>
31 #include <linux/mount.h>
32 #include <linux/writeback.h>
33 #include <linux/falloc.h>
34 #include <linux/sort.h>
35 #include <linux/init.h>
36 #include <linux/module.h>
37 #include <linux/string.h>
38 #include <linux/security.h>
40 #define MLOG_MASK_PREFIX ML_XATTR
41 #include <cluster/masklog.h>
45 #include "blockcheck.h"
55 #include "buffer_head_io.h"
58 #include "refcounttree.h"
61 struct ocfs2_xattr_def_value_root
{
62 struct ocfs2_xattr_value_root xv
;
63 struct ocfs2_extent_rec er
;
66 struct ocfs2_xattr_bucket
{
67 /* The inode these xattrs are associated with */
68 struct inode
*bu_inode
;
70 /* The actual buffers that make up the bucket */
71 struct buffer_head
*bu_bhs
[OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET
];
73 /* How many blocks make up one bucket for this filesystem */
77 struct ocfs2_xattr_set_ctxt
{
79 struct ocfs2_alloc_context
*meta_ac
;
80 struct ocfs2_alloc_context
*data_ac
;
81 struct ocfs2_cached_dealloc_ctxt dealloc
;
84 #define OCFS2_XATTR_ROOT_SIZE (sizeof(struct ocfs2_xattr_def_value_root))
85 #define OCFS2_XATTR_INLINE_SIZE 80
86 #define OCFS2_XATTR_HEADER_GAP 4
87 #define OCFS2_XATTR_FREE_IN_IBODY (OCFS2_MIN_XATTR_INLINE_SIZE \
88 - sizeof(struct ocfs2_xattr_header) \
89 - OCFS2_XATTR_HEADER_GAP)
90 #define OCFS2_XATTR_FREE_IN_BLOCK(ptr) ((ptr)->i_sb->s_blocksize \
91 - sizeof(struct ocfs2_xattr_block) \
92 - sizeof(struct ocfs2_xattr_header) \
93 - OCFS2_XATTR_HEADER_GAP)
95 static struct ocfs2_xattr_def_value_root def_xv
= {
96 .xv
.xr_list
.l_count
= cpu_to_le16(1),
99 struct xattr_handler
*ocfs2_xattr_handlers
[] = {
100 &ocfs2_xattr_user_handler
,
101 &ocfs2_xattr_acl_access_handler
,
102 &ocfs2_xattr_acl_default_handler
,
103 &ocfs2_xattr_trusted_handler
,
104 &ocfs2_xattr_security_handler
,
108 static struct xattr_handler
*ocfs2_xattr_handler_map
[OCFS2_XATTR_MAX
] = {
109 [OCFS2_XATTR_INDEX_USER
] = &ocfs2_xattr_user_handler
,
110 [OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS
]
111 = &ocfs2_xattr_acl_access_handler
,
112 [OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT
]
113 = &ocfs2_xattr_acl_default_handler
,
114 [OCFS2_XATTR_INDEX_TRUSTED
] = &ocfs2_xattr_trusted_handler
,
115 [OCFS2_XATTR_INDEX_SECURITY
] = &ocfs2_xattr_security_handler
,
118 struct ocfs2_xattr_info
{
125 struct ocfs2_xattr_search
{
126 struct buffer_head
*inode_bh
;
128 * xattr_bh point to the block buffer head which has extended attribute
129 * when extended attribute in inode, xattr_bh is equal to inode_bh.
131 struct buffer_head
*xattr_bh
;
132 struct ocfs2_xattr_header
*header
;
133 struct ocfs2_xattr_bucket
*bucket
;
136 struct ocfs2_xattr_entry
*here
;
140 static int ocfs2_xattr_bucket_get_name_value(struct super_block
*sb
,
141 struct ocfs2_xattr_header
*xh
,
146 static int ocfs2_xattr_block_find(struct inode
*inode
,
149 struct ocfs2_xattr_search
*xs
);
150 static int ocfs2_xattr_index_block_find(struct inode
*inode
,
151 struct buffer_head
*root_bh
,
154 struct ocfs2_xattr_search
*xs
);
156 static int ocfs2_xattr_tree_list_index_block(struct inode
*inode
,
157 struct buffer_head
*blk_bh
,
161 static int ocfs2_xattr_create_index_block(struct inode
*inode
,
162 struct ocfs2_xattr_search
*xs
,
163 struct ocfs2_xattr_set_ctxt
*ctxt
);
165 static int ocfs2_xattr_set_entry_index_block(struct inode
*inode
,
166 struct ocfs2_xattr_info
*xi
,
167 struct ocfs2_xattr_search
*xs
,
168 struct ocfs2_xattr_set_ctxt
*ctxt
);
170 typedef int (xattr_tree_rec_func
)(struct inode
*inode
,
171 struct buffer_head
*root_bh
,
172 u64 blkno
, u32 cpos
, u32 len
, void *para
);
173 static int ocfs2_iterate_xattr_index_block(struct inode
*inode
,
174 struct buffer_head
*root_bh
,
175 xattr_tree_rec_func
*rec_func
,
177 static int ocfs2_delete_xattr_in_bucket(struct inode
*inode
,
178 struct ocfs2_xattr_bucket
*bucket
,
180 static int ocfs2_rm_xattr_cluster(struct inode
*inode
,
181 struct buffer_head
*root_bh
,
187 static int ocfs2_mv_xattr_buckets(struct inode
*inode
, handle_t
*handle
,
188 u64 src_blk
, u64 last_blk
, u64 to_blk
,
189 unsigned int start_bucket
,
191 static int ocfs2_prepare_refcount_xattr(struct inode
*inode
,
192 struct ocfs2_dinode
*di
,
193 struct ocfs2_xattr_info
*xi
,
194 struct ocfs2_xattr_search
*xis
,
195 struct ocfs2_xattr_search
*xbs
,
196 struct ocfs2_refcount_tree
**ref_tree
,
199 static int ocfs2_get_xattr_tree_value_root(struct super_block
*sb
,
200 struct ocfs2_xattr_bucket
*bucket
,
202 struct ocfs2_xattr_value_root
**xv
,
203 struct buffer_head
**bh
);
205 static inline u16
ocfs2_xattr_buckets_per_cluster(struct ocfs2_super
*osb
)
207 return (1 << osb
->s_clustersize_bits
) / OCFS2_XATTR_BUCKET_SIZE
;
210 static inline u16
ocfs2_blocks_per_xattr_bucket(struct super_block
*sb
)
212 return OCFS2_XATTR_BUCKET_SIZE
/ (1 << sb
->s_blocksize_bits
);
215 static inline u16
ocfs2_xattr_max_xe_in_bucket(struct super_block
*sb
)
217 u16 len
= sb
->s_blocksize
-
218 offsetof(struct ocfs2_xattr_header
, xh_entries
);
220 return len
/ sizeof(struct ocfs2_xattr_entry
);
223 #define bucket_blkno(_b) ((_b)->bu_bhs[0]->b_blocknr)
224 #define bucket_block(_b, _n) ((_b)->bu_bhs[(_n)]->b_data)
225 #define bucket_xh(_b) ((struct ocfs2_xattr_header *)bucket_block((_b), 0))
227 static struct ocfs2_xattr_bucket
*ocfs2_xattr_bucket_new(struct inode
*inode
)
229 struct ocfs2_xattr_bucket
*bucket
;
230 int blks
= ocfs2_blocks_per_xattr_bucket(inode
->i_sb
);
232 BUG_ON(blks
> OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET
);
234 bucket
= kzalloc(sizeof(struct ocfs2_xattr_bucket
), GFP_NOFS
);
236 bucket
->bu_inode
= inode
;
237 bucket
->bu_blocks
= blks
;
243 static void ocfs2_xattr_bucket_relse(struct ocfs2_xattr_bucket
*bucket
)
247 for (i
= 0; i
< bucket
->bu_blocks
; i
++) {
248 brelse(bucket
->bu_bhs
[i
]);
249 bucket
->bu_bhs
[i
] = NULL
;
253 static void ocfs2_xattr_bucket_free(struct ocfs2_xattr_bucket
*bucket
)
256 ocfs2_xattr_bucket_relse(bucket
);
257 bucket
->bu_inode
= NULL
;
263 * A bucket that has never been written to disk doesn't need to be
264 * read. We just need the buffer_heads. Don't call this for
265 * buckets that are already on disk. ocfs2_read_xattr_bucket() initializes
268 static int ocfs2_init_xattr_bucket(struct ocfs2_xattr_bucket
*bucket
,
273 for (i
= 0; i
< bucket
->bu_blocks
; i
++) {
274 bucket
->bu_bhs
[i
] = sb_getblk(bucket
->bu_inode
->i_sb
,
276 if (!bucket
->bu_bhs
[i
]) {
282 if (!ocfs2_buffer_uptodate(INODE_CACHE(bucket
->bu_inode
),
284 ocfs2_set_new_buffer_uptodate(INODE_CACHE(bucket
->bu_inode
),
289 ocfs2_xattr_bucket_relse(bucket
);
293 /* Read the xattr bucket at xb_blkno */
294 static int ocfs2_read_xattr_bucket(struct ocfs2_xattr_bucket
*bucket
,
299 rc
= ocfs2_read_blocks(INODE_CACHE(bucket
->bu_inode
), xb_blkno
,
300 bucket
->bu_blocks
, bucket
->bu_bhs
, 0,
303 spin_lock(&OCFS2_SB(bucket
->bu_inode
->i_sb
)->osb_xattr_lock
);
304 rc
= ocfs2_validate_meta_ecc_bhs(bucket
->bu_inode
->i_sb
,
307 &bucket_xh(bucket
)->xh_check
);
308 spin_unlock(&OCFS2_SB(bucket
->bu_inode
->i_sb
)->osb_xattr_lock
);
314 ocfs2_xattr_bucket_relse(bucket
);
318 static int ocfs2_xattr_bucket_journal_access(handle_t
*handle
,
319 struct ocfs2_xattr_bucket
*bucket
,
324 for (i
= 0; i
< bucket
->bu_blocks
; i
++) {
325 rc
= ocfs2_journal_access(handle
,
326 INODE_CACHE(bucket
->bu_inode
),
327 bucket
->bu_bhs
[i
], type
);
337 static void ocfs2_xattr_bucket_journal_dirty(handle_t
*handle
,
338 struct ocfs2_xattr_bucket
*bucket
)
342 spin_lock(&OCFS2_SB(bucket
->bu_inode
->i_sb
)->osb_xattr_lock
);
343 ocfs2_compute_meta_ecc_bhs(bucket
->bu_inode
->i_sb
,
344 bucket
->bu_bhs
, bucket
->bu_blocks
,
345 &bucket_xh(bucket
)->xh_check
);
346 spin_unlock(&OCFS2_SB(bucket
->bu_inode
->i_sb
)->osb_xattr_lock
);
348 for (i
= 0; i
< bucket
->bu_blocks
; i
++)
349 ocfs2_journal_dirty(handle
, bucket
->bu_bhs
[i
]);
352 static void ocfs2_xattr_bucket_copy_data(struct ocfs2_xattr_bucket
*dest
,
353 struct ocfs2_xattr_bucket
*src
)
356 int blocksize
= src
->bu_inode
->i_sb
->s_blocksize
;
358 BUG_ON(dest
->bu_blocks
!= src
->bu_blocks
);
359 BUG_ON(dest
->bu_inode
!= src
->bu_inode
);
361 for (i
= 0; i
< src
->bu_blocks
; i
++) {
362 memcpy(bucket_block(dest
, i
), bucket_block(src
, i
),
367 static int ocfs2_validate_xattr_block(struct super_block
*sb
,
368 struct buffer_head
*bh
)
371 struct ocfs2_xattr_block
*xb
=
372 (struct ocfs2_xattr_block
*)bh
->b_data
;
374 mlog(0, "Validating xattr block %llu\n",
375 (unsigned long long)bh
->b_blocknr
);
377 BUG_ON(!buffer_uptodate(bh
));
380 * If the ecc fails, we return the error but otherwise
381 * leave the filesystem running. We know any error is
382 * local to this block.
384 rc
= ocfs2_validate_meta_ecc(sb
, bh
->b_data
, &xb
->xb_check
);
389 * Errors after here are fatal
392 if (!OCFS2_IS_VALID_XATTR_BLOCK(xb
)) {
394 "Extended attribute block #%llu has bad "
396 (unsigned long long)bh
->b_blocknr
, 7,
401 if (le64_to_cpu(xb
->xb_blkno
) != bh
->b_blocknr
) {
403 "Extended attribute block #%llu has an "
404 "invalid xb_blkno of %llu",
405 (unsigned long long)bh
->b_blocknr
,
406 (unsigned long long)le64_to_cpu(xb
->xb_blkno
));
410 if (le32_to_cpu(xb
->xb_fs_generation
) != OCFS2_SB(sb
)->fs_generation
) {
412 "Extended attribute block #%llu has an invalid "
413 "xb_fs_generation of #%u",
414 (unsigned long long)bh
->b_blocknr
,
415 le32_to_cpu(xb
->xb_fs_generation
));
422 static int ocfs2_read_xattr_block(struct inode
*inode
, u64 xb_blkno
,
423 struct buffer_head
**bh
)
426 struct buffer_head
*tmp
= *bh
;
428 rc
= ocfs2_read_block(INODE_CACHE(inode
), xb_blkno
, &tmp
,
429 ocfs2_validate_xattr_block
);
431 /* If ocfs2_read_block() got us a new bh, pass it up. */
438 static inline const char *ocfs2_xattr_prefix(int name_index
)
440 struct xattr_handler
*handler
= NULL
;
442 if (name_index
> 0 && name_index
< OCFS2_XATTR_MAX
)
443 handler
= ocfs2_xattr_handler_map
[name_index
];
445 return handler
? handler
->prefix
: NULL
;
448 static u32
ocfs2_xattr_name_hash(struct inode
*inode
,
452 /* Get hash value of uuid from super block */
453 u32 hash
= OCFS2_SB(inode
->i_sb
)->uuid_hash
;
456 /* hash extended attribute name */
457 for (i
= 0; i
< name_len
; i
++) {
458 hash
= (hash
<< OCFS2_HASH_SHIFT
) ^
459 (hash
>> (8*sizeof(hash
) - OCFS2_HASH_SHIFT
)) ^
467 * ocfs2_xattr_hash_entry()
469 * Compute the hash of an extended attribute.
471 static void ocfs2_xattr_hash_entry(struct inode
*inode
,
472 struct ocfs2_xattr_header
*header
,
473 struct ocfs2_xattr_entry
*entry
)
476 char *name
= (char *)header
+ le16_to_cpu(entry
->xe_name_offset
);
478 hash
= ocfs2_xattr_name_hash(inode
, name
, entry
->xe_name_len
);
479 entry
->xe_name_hash
= cpu_to_le32(hash
);
484 static int ocfs2_xattr_entry_real_size(int name_len
, size_t value_len
)
488 if (value_len
<= OCFS2_XATTR_INLINE_SIZE
)
489 size
= OCFS2_XATTR_SIZE(name_len
) + OCFS2_XATTR_SIZE(value_len
);
491 size
= OCFS2_XATTR_SIZE(name_len
) + OCFS2_XATTR_ROOT_SIZE
;
492 size
+= sizeof(struct ocfs2_xattr_entry
);
497 int ocfs2_calc_security_init(struct inode
*dir
,
498 struct ocfs2_security_xattr_info
*si
,
501 struct ocfs2_alloc_context
**xattr_ac
)
504 struct ocfs2_super
*osb
= OCFS2_SB(dir
->i_sb
);
505 int s_size
= ocfs2_xattr_entry_real_size(strlen(si
->name
),
509 * The max space of security xattr taken inline is
510 * 256(name) + 80(value) + 16(entry) = 352 bytes,
511 * So reserve one metadata block for it is ok.
513 if (dir
->i_sb
->s_blocksize
== OCFS2_MIN_BLOCKSIZE
||
514 s_size
> OCFS2_XATTR_FREE_IN_IBODY
) {
515 ret
= ocfs2_reserve_new_metadata_blocks(osb
, 1, xattr_ac
);
520 *xattr_credits
+= OCFS2_XATTR_BLOCK_CREATE_CREDITS
;
523 /* reserve clusters for xattr value which will be set in B tree*/
524 if (si
->value_len
> OCFS2_XATTR_INLINE_SIZE
) {
525 int new_clusters
= ocfs2_clusters_for_bytes(dir
->i_sb
,
528 *xattr_credits
+= ocfs2_clusters_to_blocks(dir
->i_sb
,
530 *want_clusters
+= new_clusters
;
535 int ocfs2_calc_xattr_init(struct inode
*dir
,
536 struct buffer_head
*dir_bh
,
538 struct ocfs2_security_xattr_info
*si
,
544 struct ocfs2_super
*osb
= OCFS2_SB(dir
->i_sb
);
545 int s_size
= 0, a_size
= 0, acl_len
= 0, new_clusters
;
548 s_size
= ocfs2_xattr_entry_real_size(strlen(si
->name
),
551 if (osb
->s_mount_opt
& OCFS2_MOUNT_POSIX_ACL
) {
552 acl_len
= ocfs2_xattr_get_nolock(dir
, dir_bh
,
553 OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT
,
556 a_size
= ocfs2_xattr_entry_real_size(0, acl_len
);
559 } else if (acl_len
!= 0 && acl_len
!= -ENODATA
) {
565 if (!(s_size
+ a_size
))
569 * The max space of security xattr taken inline is
570 * 256(name) + 80(value) + 16(entry) = 352 bytes,
571 * The max space of acl xattr taken inline is
572 * 80(value) + 16(entry) * 2(if directory) = 192 bytes,
573 * when blocksize = 512, may reserve one more cluser for
574 * xattr bucket, otherwise reserve one metadata block
576 * If this is a new directory with inline data,
577 * we choose to reserve the entire inline area for
578 * directory contents and force an external xattr block.
580 if (dir
->i_sb
->s_blocksize
== OCFS2_MIN_BLOCKSIZE
||
581 (S_ISDIR(mode
) && ocfs2_supports_inline_data(osb
)) ||
582 (s_size
+ a_size
) > OCFS2_XATTR_FREE_IN_IBODY
) {
583 *want_meta
= *want_meta
+ 1;
584 *xattr_credits
+= OCFS2_XATTR_BLOCK_CREATE_CREDITS
;
587 if (dir
->i_sb
->s_blocksize
== OCFS2_MIN_BLOCKSIZE
&&
588 (s_size
+ a_size
) > OCFS2_XATTR_FREE_IN_BLOCK(dir
)) {
590 *xattr_credits
+= ocfs2_blocks_per_xattr_bucket(dir
->i_sb
);
594 * reserve credits and clusters for xattrs which has large value
595 * and have to be set outside
597 if (si
->enable
&& si
->value_len
> OCFS2_XATTR_INLINE_SIZE
) {
598 new_clusters
= ocfs2_clusters_for_bytes(dir
->i_sb
,
600 *xattr_credits
+= ocfs2_clusters_to_blocks(dir
->i_sb
,
602 *want_clusters
+= new_clusters
;
604 if (osb
->s_mount_opt
& OCFS2_MOUNT_POSIX_ACL
&&
605 acl_len
> OCFS2_XATTR_INLINE_SIZE
) {
606 /* for directory, it has DEFAULT and ACCESS two types of acls */
607 new_clusters
= (S_ISDIR(mode
) ? 2 : 1) *
608 ocfs2_clusters_for_bytes(dir
->i_sb
, acl_len
);
609 *xattr_credits
+= ocfs2_clusters_to_blocks(dir
->i_sb
,
611 *want_clusters
+= new_clusters
;
617 static int ocfs2_xattr_extend_allocation(struct inode
*inode
,
619 struct ocfs2_xattr_value_buf
*vb
,
620 struct ocfs2_xattr_set_ctxt
*ctxt
)
623 handle_t
*handle
= ctxt
->handle
;
624 enum ocfs2_alloc_restarted why
;
625 u32 prev_clusters
, logical_start
= le32_to_cpu(vb
->vb_xv
->xr_clusters
);
626 struct ocfs2_extent_tree et
;
628 mlog(0, "(clusters_to_add for xattr= %u)\n", clusters_to_add
);
630 ocfs2_init_xattr_value_extent_tree(&et
, INODE_CACHE(inode
), vb
);
632 status
= vb
->vb_access(handle
, INODE_CACHE(inode
), vb
->vb_bh
,
633 OCFS2_JOURNAL_ACCESS_WRITE
);
639 prev_clusters
= le32_to_cpu(vb
->vb_xv
->xr_clusters
);
640 status
= ocfs2_add_clusters_in_btree(handle
,
653 status
= ocfs2_journal_dirty(handle
, vb
->vb_bh
);
659 clusters_to_add
-= le32_to_cpu(vb
->vb_xv
->xr_clusters
) - prev_clusters
;
662 * We should have already allocated enough space before the transaction,
663 * so no need to restart.
665 BUG_ON(why
!= RESTART_NONE
|| clusters_to_add
);
672 static int __ocfs2_remove_xattr_range(struct inode
*inode
,
673 struct ocfs2_xattr_value_buf
*vb
,
674 u32 cpos
, u32 phys_cpos
, u32 len
,
675 unsigned int ext_flags
,
676 struct ocfs2_xattr_set_ctxt
*ctxt
)
679 u64 phys_blkno
= ocfs2_clusters_to_blocks(inode
->i_sb
, phys_cpos
);
680 handle_t
*handle
= ctxt
->handle
;
681 struct ocfs2_extent_tree et
;
683 ocfs2_init_xattr_value_extent_tree(&et
, INODE_CACHE(inode
), vb
);
685 ret
= vb
->vb_access(handle
, INODE_CACHE(inode
), vb
->vb_bh
,
686 OCFS2_JOURNAL_ACCESS_WRITE
);
692 ret
= ocfs2_remove_extent(handle
, &et
, cpos
, len
, ctxt
->meta_ac
,
699 le32_add_cpu(&vb
->vb_xv
->xr_clusters
, -len
);
701 ret
= ocfs2_journal_dirty(handle
, vb
->vb_bh
);
707 if (ext_flags
& OCFS2_EXT_REFCOUNTED
)
708 ret
= ocfs2_decrease_refcount(inode
, handle
,
709 ocfs2_blocks_to_clusters(inode
->i_sb
,
711 len
, ctxt
->meta_ac
, &ctxt
->dealloc
, 1);
713 ret
= ocfs2_cache_cluster_dealloc(&ctxt
->dealloc
,
722 static int ocfs2_xattr_shrink_size(struct inode
*inode
,
725 struct ocfs2_xattr_value_buf
*vb
,
726 struct ocfs2_xattr_set_ctxt
*ctxt
)
729 unsigned int ext_flags
;
730 u32 trunc_len
, cpos
, phys_cpos
, alloc_size
;
733 if (old_clusters
<= new_clusters
)
737 trunc_len
= old_clusters
- new_clusters
;
739 ret
= ocfs2_xattr_get_clusters(inode
, cpos
, &phys_cpos
,
741 &vb
->vb_xv
->xr_list
, &ext_flags
);
747 if (alloc_size
> trunc_len
)
748 alloc_size
= trunc_len
;
750 ret
= __ocfs2_remove_xattr_range(inode
, vb
, cpos
,
751 phys_cpos
, alloc_size
,
758 block
= ocfs2_clusters_to_blocks(inode
->i_sb
, phys_cpos
);
759 ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode
),
762 trunc_len
-= alloc_size
;
769 static int ocfs2_xattr_value_truncate(struct inode
*inode
,
770 struct ocfs2_xattr_value_buf
*vb
,
772 struct ocfs2_xattr_set_ctxt
*ctxt
)
775 u32 new_clusters
= ocfs2_clusters_for_bytes(inode
->i_sb
, len
);
776 u32 old_clusters
= le32_to_cpu(vb
->vb_xv
->xr_clusters
);
778 if (new_clusters
== old_clusters
)
781 if (new_clusters
> old_clusters
)
782 ret
= ocfs2_xattr_extend_allocation(inode
,
783 new_clusters
- old_clusters
,
786 ret
= ocfs2_xattr_shrink_size(inode
,
787 old_clusters
, new_clusters
,
793 static int ocfs2_xattr_list_entry(char *buffer
, size_t size
,
794 size_t *result
, const char *prefix
,
795 const char *name
, int name_len
)
797 char *p
= buffer
+ *result
;
798 int prefix_len
= strlen(prefix
);
799 int total_len
= prefix_len
+ name_len
+ 1;
801 *result
+= total_len
;
803 /* we are just looking for how big our buffer needs to be */
810 memcpy(p
, prefix
, prefix_len
);
811 memcpy(p
+ prefix_len
, name
, name_len
);
812 p
[prefix_len
+ name_len
] = '\0';
817 static int ocfs2_xattr_list_entries(struct inode
*inode
,
818 struct ocfs2_xattr_header
*header
,
819 char *buffer
, size_t buffer_size
)
823 const char *prefix
, *name
;
825 for (i
= 0 ; i
< le16_to_cpu(header
->xh_count
); i
++) {
826 struct ocfs2_xattr_entry
*entry
= &header
->xh_entries
[i
];
827 type
= ocfs2_xattr_get_type(entry
);
828 prefix
= ocfs2_xattr_prefix(type
);
831 name
= (const char *)header
+
832 le16_to_cpu(entry
->xe_name_offset
);
834 ret
= ocfs2_xattr_list_entry(buffer
, buffer_size
,
835 &result
, prefix
, name
,
845 int ocfs2_has_inline_xattr_value_outside(struct inode
*inode
,
846 struct ocfs2_dinode
*di
)
848 struct ocfs2_xattr_header
*xh
;
851 xh
= (struct ocfs2_xattr_header
*)
852 ((void *)di
+ inode
->i_sb
->s_blocksize
-
853 le16_to_cpu(di
->i_xattr_inline_size
));
855 for (i
= 0; i
< le16_to_cpu(xh
->xh_count
); i
++)
856 if (!ocfs2_xattr_is_local(&xh
->xh_entries
[i
]))
862 static int ocfs2_xattr_ibody_list(struct inode
*inode
,
863 struct ocfs2_dinode
*di
,
867 struct ocfs2_xattr_header
*header
= NULL
;
868 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
871 if (!(oi
->ip_dyn_features
& OCFS2_INLINE_XATTR_FL
))
874 header
= (struct ocfs2_xattr_header
*)
875 ((void *)di
+ inode
->i_sb
->s_blocksize
-
876 le16_to_cpu(di
->i_xattr_inline_size
));
878 ret
= ocfs2_xattr_list_entries(inode
, header
, buffer
, buffer_size
);
883 static int ocfs2_xattr_block_list(struct inode
*inode
,
884 struct ocfs2_dinode
*di
,
888 struct buffer_head
*blk_bh
= NULL
;
889 struct ocfs2_xattr_block
*xb
;
892 if (!di
->i_xattr_loc
)
895 ret
= ocfs2_read_xattr_block(inode
, le64_to_cpu(di
->i_xattr_loc
),
902 xb
= (struct ocfs2_xattr_block
*)blk_bh
->b_data
;
903 if (!(le16_to_cpu(xb
->xb_flags
) & OCFS2_XATTR_INDEXED
)) {
904 struct ocfs2_xattr_header
*header
= &xb
->xb_attrs
.xb_header
;
905 ret
= ocfs2_xattr_list_entries(inode
, header
,
906 buffer
, buffer_size
);
908 ret
= ocfs2_xattr_tree_list_index_block(inode
, blk_bh
,
909 buffer
, buffer_size
);
916 ssize_t
ocfs2_listxattr(struct dentry
*dentry
,
920 int ret
= 0, i_ret
= 0, b_ret
= 0;
921 struct buffer_head
*di_bh
= NULL
;
922 struct ocfs2_dinode
*di
= NULL
;
923 struct ocfs2_inode_info
*oi
= OCFS2_I(dentry
->d_inode
);
925 if (!ocfs2_supports_xattr(OCFS2_SB(dentry
->d_sb
)))
928 if (!(oi
->ip_dyn_features
& OCFS2_HAS_XATTR_FL
))
931 ret
= ocfs2_inode_lock(dentry
->d_inode
, &di_bh
, 0);
937 di
= (struct ocfs2_dinode
*)di_bh
->b_data
;
939 down_read(&oi
->ip_xattr_sem
);
940 i_ret
= ocfs2_xattr_ibody_list(dentry
->d_inode
, di
, buffer
, size
);
948 b_ret
= ocfs2_xattr_block_list(dentry
->d_inode
, di
,
953 up_read(&oi
->ip_xattr_sem
);
954 ocfs2_inode_unlock(dentry
->d_inode
, 0);
958 return i_ret
+ b_ret
;
961 static int ocfs2_xattr_find_entry(int name_index
,
963 struct ocfs2_xattr_search
*xs
)
965 struct ocfs2_xattr_entry
*entry
;
972 name_len
= strlen(name
);
974 for (i
= 0; i
< le16_to_cpu(xs
->header
->xh_count
); i
++) {
975 cmp
= name_index
- ocfs2_xattr_get_type(entry
);
977 cmp
= name_len
- entry
->xe_name_len
;
979 cmp
= memcmp(name
, (xs
->base
+
980 le16_to_cpu(entry
->xe_name_offset
)),
988 return cmp
? -ENODATA
: 0;
991 static int ocfs2_xattr_get_value_outside(struct inode
*inode
,
992 struct ocfs2_xattr_value_root
*xv
,
996 u32 cpos
, p_cluster
, num_clusters
, bpc
, clusters
;
999 size_t cplen
, blocksize
;
1000 struct buffer_head
*bh
= NULL
;
1001 struct ocfs2_extent_list
*el
;
1004 clusters
= le32_to_cpu(xv
->xr_clusters
);
1005 bpc
= ocfs2_clusters_to_blocks(inode
->i_sb
, 1);
1006 blocksize
= inode
->i_sb
->s_blocksize
;
1009 while (cpos
< clusters
) {
1010 ret
= ocfs2_xattr_get_clusters(inode
, cpos
, &p_cluster
,
1011 &num_clusters
, el
, NULL
);
1017 blkno
= ocfs2_clusters_to_blocks(inode
->i_sb
, p_cluster
);
1018 /* Copy ocfs2_xattr_value */
1019 for (i
= 0; i
< num_clusters
* bpc
; i
++, blkno
++) {
1020 ret
= ocfs2_read_block(INODE_CACHE(inode
), blkno
,
1027 cplen
= len
>= blocksize
? blocksize
: len
;
1028 memcpy(buffer
, bh
->b_data
, cplen
);
1037 cpos
+= num_clusters
;
1043 static int ocfs2_xattr_ibody_get(struct inode
*inode
,
1048 struct ocfs2_xattr_search
*xs
)
1050 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
1051 struct ocfs2_dinode
*di
= (struct ocfs2_dinode
*)xs
->inode_bh
->b_data
;
1052 struct ocfs2_xattr_value_root
*xv
;
1056 if (!(oi
->ip_dyn_features
& OCFS2_INLINE_XATTR_FL
))
1059 xs
->end
= (void *)di
+ inode
->i_sb
->s_blocksize
;
1060 xs
->header
= (struct ocfs2_xattr_header
*)
1061 (xs
->end
- le16_to_cpu(di
->i_xattr_inline_size
));
1062 xs
->base
= (void *)xs
->header
;
1063 xs
->here
= xs
->header
->xh_entries
;
1065 ret
= ocfs2_xattr_find_entry(name_index
, name
, xs
);
1068 size
= le64_to_cpu(xs
->here
->xe_value_size
);
1070 if (size
> buffer_size
)
1072 if (ocfs2_xattr_is_local(xs
->here
)) {
1073 memcpy(buffer
, (void *)xs
->base
+
1074 le16_to_cpu(xs
->here
->xe_name_offset
) +
1075 OCFS2_XATTR_SIZE(xs
->here
->xe_name_len
), size
);
1077 xv
= (struct ocfs2_xattr_value_root
*)
1078 (xs
->base
+ le16_to_cpu(
1079 xs
->here
->xe_name_offset
) +
1080 OCFS2_XATTR_SIZE(xs
->here
->xe_name_len
));
1081 ret
= ocfs2_xattr_get_value_outside(inode
, xv
,
1093 static int ocfs2_xattr_block_get(struct inode
*inode
,
1098 struct ocfs2_xattr_search
*xs
)
1100 struct ocfs2_xattr_block
*xb
;
1101 struct ocfs2_xattr_value_root
*xv
;
1103 int ret
= -ENODATA
, name_offset
, name_len
, i
;
1104 int uninitialized_var(block_off
);
1106 xs
->bucket
= ocfs2_xattr_bucket_new(inode
);
1113 ret
= ocfs2_xattr_block_find(inode
, name_index
, name
, xs
);
1119 if (xs
->not_found
) {
1124 xb
= (struct ocfs2_xattr_block
*)xs
->xattr_bh
->b_data
;
1125 size
= le64_to_cpu(xs
->here
->xe_value_size
);
1128 if (size
> buffer_size
)
1131 name_offset
= le16_to_cpu(xs
->here
->xe_name_offset
);
1132 name_len
= OCFS2_XATTR_SIZE(xs
->here
->xe_name_len
);
1133 i
= xs
->here
- xs
->header
->xh_entries
;
1135 if (le16_to_cpu(xb
->xb_flags
) & OCFS2_XATTR_INDEXED
) {
1136 ret
= ocfs2_xattr_bucket_get_name_value(inode
->i_sb
,
1137 bucket_xh(xs
->bucket
),
1141 xs
->base
= bucket_block(xs
->bucket
, block_off
);
1143 if (ocfs2_xattr_is_local(xs
->here
)) {
1144 memcpy(buffer
, (void *)xs
->base
+
1145 name_offset
+ name_len
, size
);
1147 xv
= (struct ocfs2_xattr_value_root
*)
1148 (xs
->base
+ name_offset
+ name_len
);
1149 ret
= ocfs2_xattr_get_value_outside(inode
, xv
,
1159 ocfs2_xattr_bucket_free(xs
->bucket
);
1161 brelse(xs
->xattr_bh
);
1162 xs
->xattr_bh
= NULL
;
1166 int ocfs2_xattr_get_nolock(struct inode
*inode
,
1167 struct buffer_head
*di_bh
,
1174 struct ocfs2_dinode
*di
= NULL
;
1175 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
1176 struct ocfs2_xattr_search xis
= {
1177 .not_found
= -ENODATA
,
1179 struct ocfs2_xattr_search xbs
= {
1180 .not_found
= -ENODATA
,
1183 if (!ocfs2_supports_xattr(OCFS2_SB(inode
->i_sb
)))
1186 if (!(oi
->ip_dyn_features
& OCFS2_HAS_XATTR_FL
))
1189 xis
.inode_bh
= xbs
.inode_bh
= di_bh
;
1190 di
= (struct ocfs2_dinode
*)di_bh
->b_data
;
1192 down_read(&oi
->ip_xattr_sem
);
1193 ret
= ocfs2_xattr_ibody_get(inode
, name_index
, name
, buffer
,
1195 if (ret
== -ENODATA
&& di
->i_xattr_loc
)
1196 ret
= ocfs2_xattr_block_get(inode
, name_index
, name
, buffer
,
1198 up_read(&oi
->ip_xattr_sem
);
1203 /* ocfs2_xattr_get()
1205 * Copy an extended attribute into the buffer provided.
1206 * Buffer is NULL to compute the size of buffer required.
1208 static int ocfs2_xattr_get(struct inode
*inode
,
1215 struct buffer_head
*di_bh
= NULL
;
1217 ret
= ocfs2_inode_lock(inode
, &di_bh
, 0);
1222 ret
= ocfs2_xattr_get_nolock(inode
, di_bh
, name_index
,
1223 name
, buffer
, buffer_size
);
1225 ocfs2_inode_unlock(inode
, 0);
1232 static int __ocfs2_xattr_set_value_outside(struct inode
*inode
,
1234 struct ocfs2_xattr_value_buf
*vb
,
1238 int ret
= 0, i
, cp_len
;
1239 u16 blocksize
= inode
->i_sb
->s_blocksize
;
1240 u32 p_cluster
, num_clusters
;
1241 u32 cpos
= 0, bpc
= ocfs2_clusters_to_blocks(inode
->i_sb
, 1);
1242 u32 clusters
= ocfs2_clusters_for_bytes(inode
->i_sb
, value_len
);
1244 struct buffer_head
*bh
= NULL
;
1245 unsigned int ext_flags
;
1246 struct ocfs2_xattr_value_root
*xv
= vb
->vb_xv
;
1248 BUG_ON(clusters
> le32_to_cpu(xv
->xr_clusters
));
1250 while (cpos
< clusters
) {
1251 ret
= ocfs2_xattr_get_clusters(inode
, cpos
, &p_cluster
,
1252 &num_clusters
, &xv
->xr_list
,
1259 BUG_ON(ext_flags
& OCFS2_EXT_REFCOUNTED
);
1261 blkno
= ocfs2_clusters_to_blocks(inode
->i_sb
, p_cluster
);
1263 for (i
= 0; i
< num_clusters
* bpc
; i
++, blkno
++) {
1264 ret
= ocfs2_read_block(INODE_CACHE(inode
), blkno
,
1271 ret
= ocfs2_journal_access(handle
,
1274 OCFS2_JOURNAL_ACCESS_WRITE
);
1280 cp_len
= value_len
> blocksize
? blocksize
: value_len
;
1281 memcpy(bh
->b_data
, value
, cp_len
);
1282 value_len
-= cp_len
;
1284 if (cp_len
< blocksize
)
1285 memset(bh
->b_data
+ cp_len
, 0,
1286 blocksize
- cp_len
);
1288 ret
= ocfs2_journal_dirty(handle
, bh
);
1297 * XXX: do we need to empty all the following
1298 * blocks in this cluster?
1303 cpos
+= num_clusters
;
1311 static int ocfs2_xattr_cleanup(struct inode
*inode
,
1313 struct ocfs2_xattr_info
*xi
,
1314 struct ocfs2_xattr_search
*xs
,
1315 struct ocfs2_xattr_value_buf
*vb
,
1319 size_t name_len
= strlen(xi
->name
);
1320 void *val
= xs
->base
+ offs
;
1321 size_t size
= OCFS2_XATTR_SIZE(name_len
) + OCFS2_XATTR_ROOT_SIZE
;
1323 ret
= vb
->vb_access(handle
, INODE_CACHE(inode
), vb
->vb_bh
,
1324 OCFS2_JOURNAL_ACCESS_WRITE
);
1329 /* Decrease xattr count */
1330 le16_add_cpu(&xs
->header
->xh_count
, -1);
1331 /* Remove the xattr entry and tree root which has already be set*/
1332 memset((void *)xs
->here
, 0, sizeof(struct ocfs2_xattr_entry
));
1333 memset(val
, 0, size
);
1335 ret
= ocfs2_journal_dirty(handle
, vb
->vb_bh
);
1342 static int ocfs2_xattr_update_entry(struct inode
*inode
,
1344 struct ocfs2_xattr_info
*xi
,
1345 struct ocfs2_xattr_search
*xs
,
1346 struct ocfs2_xattr_value_buf
*vb
,
1351 ret
= vb
->vb_access(handle
, INODE_CACHE(inode
), vb
->vb_bh
,
1352 OCFS2_JOURNAL_ACCESS_WRITE
);
1358 xs
->here
->xe_name_offset
= cpu_to_le16(offs
);
1359 xs
->here
->xe_value_size
= cpu_to_le64(xi
->value_len
);
1360 if (xi
->value_len
<= OCFS2_XATTR_INLINE_SIZE
)
1361 ocfs2_xattr_set_local(xs
->here
, 1);
1363 ocfs2_xattr_set_local(xs
->here
, 0);
1364 ocfs2_xattr_hash_entry(inode
, xs
->header
, xs
->here
);
1366 ret
= ocfs2_journal_dirty(handle
, vb
->vb_bh
);
1374 * ocfs2_xattr_set_value_outside()
1376 * Set large size value in B tree.
1378 static int ocfs2_xattr_set_value_outside(struct inode
*inode
,
1379 struct ocfs2_xattr_info
*xi
,
1380 struct ocfs2_xattr_search
*xs
,
1381 struct ocfs2_xattr_set_ctxt
*ctxt
,
1382 struct ocfs2_xattr_value_buf
*vb
,
1385 size_t name_len
= strlen(xi
->name
);
1386 void *val
= xs
->base
+ offs
;
1387 struct ocfs2_xattr_value_root
*xv
= NULL
;
1388 size_t size
= OCFS2_XATTR_SIZE(name_len
) + OCFS2_XATTR_ROOT_SIZE
;
1391 memset(val
, 0, size
);
1392 memcpy(val
, xi
->name
, name_len
);
1393 xv
= (struct ocfs2_xattr_value_root
*)
1394 (val
+ OCFS2_XATTR_SIZE(name_len
));
1395 xv
->xr_clusters
= 0;
1396 xv
->xr_last_eb_blk
= 0;
1397 xv
->xr_list
.l_tree_depth
= 0;
1398 xv
->xr_list
.l_count
= cpu_to_le16(1);
1399 xv
->xr_list
.l_next_free_rec
= 0;
1402 ret
= ocfs2_xattr_value_truncate(inode
, vb
, xi
->value_len
, ctxt
);
1407 ret
= ocfs2_xattr_update_entry(inode
, ctxt
->handle
, xi
, xs
, vb
, offs
);
1412 ret
= __ocfs2_xattr_set_value_outside(inode
, ctxt
->handle
, vb
,
1413 xi
->value
, xi
->value_len
);
1421 * ocfs2_xattr_set_entry_local()
1423 * Set, replace or remove extended attribute in local.
1425 static void ocfs2_xattr_set_entry_local(struct inode
*inode
,
1426 struct ocfs2_xattr_info
*xi
,
1427 struct ocfs2_xattr_search
*xs
,
1428 struct ocfs2_xattr_entry
*last
,
1431 size_t name_len
= strlen(xi
->name
);
1434 if (xi
->value
&& xs
->not_found
) {
1435 /* Insert the new xattr entry. */
1436 le16_add_cpu(&xs
->header
->xh_count
, 1);
1437 ocfs2_xattr_set_type(last
, xi
->name_index
);
1438 ocfs2_xattr_set_local(last
, 1);
1439 last
->xe_name_len
= name_len
;
1445 first_val
= xs
->base
+ min_offs
;
1446 offs
= le16_to_cpu(xs
->here
->xe_name_offset
);
1447 val
= xs
->base
+ offs
;
1449 if (le64_to_cpu(xs
->here
->xe_value_size
) >
1450 OCFS2_XATTR_INLINE_SIZE
)
1451 size
= OCFS2_XATTR_SIZE(name_len
) +
1452 OCFS2_XATTR_ROOT_SIZE
;
1454 size
= OCFS2_XATTR_SIZE(name_len
) +
1455 OCFS2_XATTR_SIZE(le64_to_cpu(xs
->here
->xe_value_size
));
1457 if (xi
->value
&& size
== OCFS2_XATTR_SIZE(name_len
) +
1458 OCFS2_XATTR_SIZE(xi
->value_len
)) {
1459 /* The old and the new value have the
1460 same size. Just replace the value. */
1461 ocfs2_xattr_set_local(xs
->here
, 1);
1462 xs
->here
->xe_value_size
= cpu_to_le64(xi
->value_len
);
1463 /* Clear value bytes. */
1464 memset(val
+ OCFS2_XATTR_SIZE(name_len
),
1466 OCFS2_XATTR_SIZE(xi
->value_len
));
1467 memcpy(val
+ OCFS2_XATTR_SIZE(name_len
),
1472 /* Remove the old name+value. */
1473 memmove(first_val
+ size
, first_val
, val
- first_val
);
1474 memset(first_val
, 0, size
);
1475 xs
->here
->xe_name_hash
= 0;
1476 xs
->here
->xe_name_offset
= 0;
1477 ocfs2_xattr_set_local(xs
->here
, 1);
1478 xs
->here
->xe_value_size
= 0;
1482 /* Adjust all value offsets. */
1483 last
= xs
->header
->xh_entries
;
1484 for (i
= 0 ; i
< le16_to_cpu(xs
->header
->xh_count
); i
++) {
1485 size_t o
= le16_to_cpu(last
->xe_name_offset
);
1488 last
->xe_name_offset
= cpu_to_le16(o
+ size
);
1493 /* Remove the old entry. */
1495 memmove(xs
->here
, xs
->here
+ 1,
1496 (void *)last
- (void *)xs
->here
);
1497 memset(last
, 0, sizeof(struct ocfs2_xattr_entry
));
1498 le16_add_cpu(&xs
->header
->xh_count
, -1);
1502 /* Insert the new name+value. */
1503 size_t size
= OCFS2_XATTR_SIZE(name_len
) +
1504 OCFS2_XATTR_SIZE(xi
->value_len
);
1505 void *val
= xs
->base
+ min_offs
- size
;
1507 xs
->here
->xe_name_offset
= cpu_to_le16(min_offs
- size
);
1508 memset(val
, 0, size
);
1509 memcpy(val
, xi
->name
, name_len
);
1510 memcpy(val
+ OCFS2_XATTR_SIZE(name_len
),
1513 xs
->here
->xe_value_size
= cpu_to_le64(xi
->value_len
);
1514 ocfs2_xattr_set_local(xs
->here
, 1);
1515 ocfs2_xattr_hash_entry(inode
, xs
->header
, xs
->here
);
1522 * ocfs2_xattr_set_entry()
1524 * Set extended attribute entry into inode or block.
1526 * If extended attribute value size > OCFS2_XATTR_INLINE_SIZE,
1527 * We first insert tree root(ocfs2_xattr_value_root) with set_entry_local(),
1528 * then set value in B tree with set_value_outside().
1530 static int ocfs2_xattr_set_entry(struct inode
*inode
,
1531 struct ocfs2_xattr_info
*xi
,
1532 struct ocfs2_xattr_search
*xs
,
1533 struct ocfs2_xattr_set_ctxt
*ctxt
,
1536 struct ocfs2_xattr_entry
*last
;
1537 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
1538 struct ocfs2_dinode
*di
= (struct ocfs2_dinode
*)xs
->inode_bh
->b_data
;
1539 size_t min_offs
= xs
->end
- xs
->base
, name_len
= strlen(xi
->name
);
1541 handle_t
*handle
= ctxt
->handle
;
1543 struct ocfs2_xattr_info xi_l
= {
1544 .name_index
= xi
->name_index
,
1547 .value_len
= xi
->value_len
,
1549 struct ocfs2_xattr_value_buf vb
= {
1550 .vb_bh
= xs
->xattr_bh
,
1551 .vb_access
= ocfs2_journal_access_di
,
1554 if (!(flag
& OCFS2_INLINE_XATTR_FL
)) {
1555 BUG_ON(xs
->xattr_bh
== xs
->inode_bh
);
1556 vb
.vb_access
= ocfs2_journal_access_xb
;
1558 BUG_ON(xs
->xattr_bh
!= xs
->inode_bh
);
1560 /* Compute min_offs, last and free space. */
1561 last
= xs
->header
->xh_entries
;
1563 for (i
= 0 ; i
< le16_to_cpu(xs
->header
->xh_count
); i
++) {
1564 size_t offs
= le16_to_cpu(last
->xe_name_offset
);
1565 if (offs
< min_offs
)
1570 free
= min_offs
- ((void *)last
- xs
->base
) - OCFS2_XATTR_HEADER_GAP
;
1574 if (!xs
->not_found
) {
1576 if (ocfs2_xattr_is_local(xs
->here
))
1577 size
= OCFS2_XATTR_SIZE(name_len
) +
1578 OCFS2_XATTR_SIZE(le64_to_cpu(xs
->here
->xe_value_size
));
1580 size
= OCFS2_XATTR_SIZE(name_len
) +
1581 OCFS2_XATTR_ROOT_SIZE
;
1582 free
+= (size
+ sizeof(struct ocfs2_xattr_entry
));
1584 /* Check free space in inode or block */
1585 if (xi
->value
&& xi
->value_len
> OCFS2_XATTR_INLINE_SIZE
) {
1586 if (free
< sizeof(struct ocfs2_xattr_entry
) +
1587 OCFS2_XATTR_SIZE(name_len
) +
1588 OCFS2_XATTR_ROOT_SIZE
) {
1592 size_l
= OCFS2_XATTR_SIZE(name_len
) + OCFS2_XATTR_ROOT_SIZE
;
1593 xi_l
.value
= (void *)&def_xv
;
1594 xi_l
.value_len
= OCFS2_XATTR_ROOT_SIZE
;
1595 } else if (xi
->value
) {
1596 if (free
< sizeof(struct ocfs2_xattr_entry
) +
1597 OCFS2_XATTR_SIZE(name_len
) +
1598 OCFS2_XATTR_SIZE(xi
->value_len
)) {
1604 if (!xs
->not_found
) {
1605 /* For existing extended attribute */
1606 size_t size
= OCFS2_XATTR_SIZE(name_len
) +
1607 OCFS2_XATTR_SIZE(le64_to_cpu(xs
->here
->xe_value_size
));
1608 size_t offs
= le16_to_cpu(xs
->here
->xe_name_offset
);
1609 void *val
= xs
->base
+ offs
;
1611 if (ocfs2_xattr_is_local(xs
->here
) && size
== size_l
) {
1612 /* Replace existing local xattr with tree root */
1613 ret
= ocfs2_xattr_set_value_outside(inode
, xi
, xs
,
1618 } else if (!ocfs2_xattr_is_local(xs
->here
)) {
1619 /* For existing xattr which has value outside */
1620 vb
.vb_xv
= (struct ocfs2_xattr_value_root
*)
1621 (val
+ OCFS2_XATTR_SIZE(name_len
));
1623 if (xi
->value_len
> OCFS2_XATTR_INLINE_SIZE
) {
1625 * If new value need set outside also,
1626 * first truncate old value to new value,
1627 * then set new value with set_value_outside().
1629 ret
= ocfs2_xattr_value_truncate(inode
,
1638 ret
= ocfs2_xattr_update_entry(inode
,
1649 ret
= __ocfs2_xattr_set_value_outside(inode
,
1659 * If new value need set in local,
1660 * just trucate old value to zero.
1662 ret
= ocfs2_xattr_value_truncate(inode
,
1672 ret
= ocfs2_journal_access_di(handle
, INODE_CACHE(inode
), xs
->inode_bh
,
1673 OCFS2_JOURNAL_ACCESS_WRITE
);
1679 if (!(flag
& OCFS2_INLINE_XATTR_FL
)) {
1680 ret
= vb
.vb_access(handle
, INODE_CACHE(inode
), vb
.vb_bh
,
1681 OCFS2_JOURNAL_ACCESS_WRITE
);
1689 * Set value in local, include set tree root in local.
1690 * This is the first step for value size >INLINE_SIZE.
1692 ocfs2_xattr_set_entry_local(inode
, &xi_l
, xs
, last
, min_offs
);
1694 if (!(flag
& OCFS2_INLINE_XATTR_FL
)) {
1695 ret
= ocfs2_journal_dirty(handle
, xs
->xattr_bh
);
1702 if (!(oi
->ip_dyn_features
& OCFS2_INLINE_XATTR_FL
) &&
1703 (flag
& OCFS2_INLINE_XATTR_FL
)) {
1704 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1705 unsigned int xattrsize
= osb
->s_xattr_inline_size
;
1708 * Adjust extent record count or inline data size
1709 * to reserve space for extended attribute.
1711 if (oi
->ip_dyn_features
& OCFS2_INLINE_DATA_FL
) {
1712 struct ocfs2_inline_data
*idata
= &di
->id2
.i_data
;
1713 le16_add_cpu(&idata
->id_count
, -xattrsize
);
1714 } else if (!(ocfs2_inode_is_fast_symlink(inode
))) {
1715 struct ocfs2_extent_list
*el
= &di
->id2
.i_list
;
1716 le16_add_cpu(&el
->l_count
, -(xattrsize
/
1717 sizeof(struct ocfs2_extent_rec
)));
1719 di
->i_xattr_inline_size
= cpu_to_le16(xattrsize
);
1721 /* Update xattr flag */
1722 spin_lock(&oi
->ip_lock
);
1723 oi
->ip_dyn_features
|= flag
;
1724 di
->i_dyn_features
= cpu_to_le16(oi
->ip_dyn_features
);
1725 spin_unlock(&oi
->ip_lock
);
1727 ret
= ocfs2_journal_dirty(handle
, xs
->inode_bh
);
1731 if (!ret
&& xi
->value_len
> OCFS2_XATTR_INLINE_SIZE
) {
1733 * Set value outside in B tree.
1734 * This is the second step for value size > INLINE_SIZE.
1736 size_t offs
= le16_to_cpu(xs
->here
->xe_name_offset
);
1737 ret
= ocfs2_xattr_set_value_outside(inode
, xi
, xs
, ctxt
,
1744 * If set value outside failed, we have to clean
1745 * the junk tree root we have already set in local.
1747 ret2
= ocfs2_xattr_cleanup(inode
, ctxt
->handle
,
1758 * In xattr remove, if it is stored outside and refcounted, we may have
1759 * the chance to split the refcount tree. So need the allocators.
1761 static int ocfs2_lock_xattr_remove_allocators(struct inode
*inode
,
1762 struct ocfs2_xattr_value_root
*xv
,
1763 struct ocfs2_caching_info
*ref_ci
,
1764 struct buffer_head
*ref_root_bh
,
1765 struct ocfs2_alloc_context
**meta_ac
,
1768 int ret
, meta_add
= 0;
1769 u32 p_cluster
, num_clusters
;
1770 unsigned int ext_flags
;
1773 ret
= ocfs2_xattr_get_clusters(inode
, 0, &p_cluster
,
1782 if (!(ext_flags
& OCFS2_EXT_REFCOUNTED
))
1785 ret
= ocfs2_refcounted_xattr_delete_need(inode
, ref_ci
,
1787 &meta_add
, ref_credits
);
1793 ret
= ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode
->i_sb
),
1802 static int ocfs2_remove_value_outside(struct inode
*inode
,
1803 struct ocfs2_xattr_value_buf
*vb
,
1804 struct ocfs2_xattr_header
*header
,
1805 struct ocfs2_caching_info
*ref_ci
,
1806 struct buffer_head
*ref_root_bh
)
1808 int ret
= 0, i
, ref_credits
;
1809 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1810 struct ocfs2_xattr_set_ctxt ctxt
= { NULL
, NULL
, };
1813 ocfs2_init_dealloc_ctxt(&ctxt
.dealloc
);
1815 for (i
= 0; i
< le16_to_cpu(header
->xh_count
); i
++) {
1816 struct ocfs2_xattr_entry
*entry
= &header
->xh_entries
[i
];
1818 if (ocfs2_xattr_is_local(entry
))
1821 val
= (void *)header
+
1822 le16_to_cpu(entry
->xe_name_offset
);
1823 vb
->vb_xv
= (struct ocfs2_xattr_value_root
*)
1824 (val
+ OCFS2_XATTR_SIZE(entry
->xe_name_len
));
1826 ret
= ocfs2_lock_xattr_remove_allocators(inode
, vb
->vb_xv
,
1827 ref_ci
, ref_root_bh
,
1831 ctxt
.handle
= ocfs2_start_trans(osb
, ref_credits
+
1832 ocfs2_remove_extent_credits(osb
->sb
));
1833 if (IS_ERR(ctxt
.handle
)) {
1834 ret
= PTR_ERR(ctxt
.handle
);
1839 ret
= ocfs2_xattr_value_truncate(inode
, vb
, 0, &ctxt
);
1845 ocfs2_commit_trans(osb
, ctxt
.handle
);
1847 ocfs2_free_alloc_context(ctxt
.meta_ac
);
1848 ctxt
.meta_ac
= NULL
;
1853 ocfs2_free_alloc_context(ctxt
.meta_ac
);
1854 ocfs2_schedule_truncate_log_flush(osb
, 1);
1855 ocfs2_run_deallocs(osb
, &ctxt
.dealloc
);
1859 static int ocfs2_xattr_ibody_remove(struct inode
*inode
,
1860 struct buffer_head
*di_bh
,
1861 struct ocfs2_caching_info
*ref_ci
,
1862 struct buffer_head
*ref_root_bh
)
1865 struct ocfs2_dinode
*di
= (struct ocfs2_dinode
*)di_bh
->b_data
;
1866 struct ocfs2_xattr_header
*header
;
1868 struct ocfs2_xattr_value_buf vb
= {
1870 .vb_access
= ocfs2_journal_access_di
,
1873 header
= (struct ocfs2_xattr_header
*)
1874 ((void *)di
+ inode
->i_sb
->s_blocksize
-
1875 le16_to_cpu(di
->i_xattr_inline_size
));
1877 ret
= ocfs2_remove_value_outside(inode
, &vb
, header
,
1878 ref_ci
, ref_root_bh
);
1883 struct ocfs2_rm_xattr_bucket_para
{
1884 struct ocfs2_caching_info
*ref_ci
;
1885 struct buffer_head
*ref_root_bh
;
1888 static int ocfs2_xattr_block_remove(struct inode
*inode
,
1889 struct buffer_head
*blk_bh
,
1890 struct ocfs2_caching_info
*ref_ci
,
1891 struct buffer_head
*ref_root_bh
)
1893 struct ocfs2_xattr_block
*xb
;
1895 struct ocfs2_xattr_value_buf vb
= {
1897 .vb_access
= ocfs2_journal_access_xb
,
1899 struct ocfs2_rm_xattr_bucket_para args
= {
1901 .ref_root_bh
= ref_root_bh
,
1904 xb
= (struct ocfs2_xattr_block
*)blk_bh
->b_data
;
1905 if (!(le16_to_cpu(xb
->xb_flags
) & OCFS2_XATTR_INDEXED
)) {
1906 struct ocfs2_xattr_header
*header
= &(xb
->xb_attrs
.xb_header
);
1907 ret
= ocfs2_remove_value_outside(inode
, &vb
, header
,
1908 ref_ci
, ref_root_bh
);
1910 ret
= ocfs2_iterate_xattr_index_block(inode
,
1912 ocfs2_rm_xattr_cluster
,
1918 static int ocfs2_xattr_free_block(struct inode
*inode
,
1920 struct ocfs2_caching_info
*ref_ci
,
1921 struct buffer_head
*ref_root_bh
)
1923 struct inode
*xb_alloc_inode
;
1924 struct buffer_head
*xb_alloc_bh
= NULL
;
1925 struct buffer_head
*blk_bh
= NULL
;
1926 struct ocfs2_xattr_block
*xb
;
1927 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1933 ret
= ocfs2_read_xattr_block(inode
, block
, &blk_bh
);
1939 ret
= ocfs2_xattr_block_remove(inode
, blk_bh
, ref_ci
, ref_root_bh
);
1945 xb
= (struct ocfs2_xattr_block
*)blk_bh
->b_data
;
1946 blk
= le64_to_cpu(xb
->xb_blkno
);
1947 bit
= le16_to_cpu(xb
->xb_suballoc_bit
);
1948 bg_blkno
= ocfs2_which_suballoc_group(blk
, bit
);
1950 xb_alloc_inode
= ocfs2_get_system_file_inode(osb
,
1951 EXTENT_ALLOC_SYSTEM_INODE
,
1952 le16_to_cpu(xb
->xb_suballoc_slot
));
1953 if (!xb_alloc_inode
) {
1958 mutex_lock(&xb_alloc_inode
->i_mutex
);
1960 ret
= ocfs2_inode_lock(xb_alloc_inode
, &xb_alloc_bh
, 1);
1966 handle
= ocfs2_start_trans(osb
, OCFS2_SUBALLOC_FREE
);
1967 if (IS_ERR(handle
)) {
1968 ret
= PTR_ERR(handle
);
1973 ret
= ocfs2_free_suballoc_bits(handle
, xb_alloc_inode
, xb_alloc_bh
,
1978 ocfs2_commit_trans(osb
, handle
);
1980 ocfs2_inode_unlock(xb_alloc_inode
, 1);
1981 brelse(xb_alloc_bh
);
1983 mutex_unlock(&xb_alloc_inode
->i_mutex
);
1984 iput(xb_alloc_inode
);
1991 * ocfs2_xattr_remove()
1993 * Free extended attribute resources associated with this inode.
1995 int ocfs2_xattr_remove(struct inode
*inode
, struct buffer_head
*di_bh
)
1997 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
1998 struct ocfs2_dinode
*di
= (struct ocfs2_dinode
*)di_bh
->b_data
;
1999 struct ocfs2_refcount_tree
*ref_tree
= NULL
;
2000 struct buffer_head
*ref_root_bh
= NULL
;
2001 struct ocfs2_caching_info
*ref_ci
= NULL
;
2005 if (!ocfs2_supports_xattr(OCFS2_SB(inode
->i_sb
)))
2008 if (!(oi
->ip_dyn_features
& OCFS2_HAS_XATTR_FL
))
2011 if (OCFS2_I(inode
)->ip_dyn_features
& OCFS2_HAS_REFCOUNT_FL
) {
2012 ret
= ocfs2_lock_refcount_tree(OCFS2_SB(inode
->i_sb
),
2013 le64_to_cpu(di
->i_refcount_loc
),
2014 1, &ref_tree
, &ref_root_bh
);
2019 ref_ci
= &ref_tree
->rf_ci
;
2023 if (oi
->ip_dyn_features
& OCFS2_INLINE_XATTR_FL
) {
2024 ret
= ocfs2_xattr_ibody_remove(inode
, di_bh
,
2025 ref_ci
, ref_root_bh
);
2032 if (di
->i_xattr_loc
) {
2033 ret
= ocfs2_xattr_free_block(inode
,
2034 le64_to_cpu(di
->i_xattr_loc
),
2035 ref_ci
, ref_root_bh
);
2042 handle
= ocfs2_start_trans((OCFS2_SB(inode
->i_sb
)),
2043 OCFS2_INODE_UPDATE_CREDITS
);
2044 if (IS_ERR(handle
)) {
2045 ret
= PTR_ERR(handle
);
2049 ret
= ocfs2_journal_access_di(handle
, INODE_CACHE(inode
), di_bh
,
2050 OCFS2_JOURNAL_ACCESS_WRITE
);
2056 di
->i_xattr_loc
= 0;
2058 spin_lock(&oi
->ip_lock
);
2059 oi
->ip_dyn_features
&= ~(OCFS2_INLINE_XATTR_FL
| OCFS2_HAS_XATTR_FL
);
2060 di
->i_dyn_features
= cpu_to_le16(oi
->ip_dyn_features
);
2061 spin_unlock(&oi
->ip_lock
);
2063 ret
= ocfs2_journal_dirty(handle
, di_bh
);
2067 ocfs2_commit_trans(OCFS2_SB(inode
->i_sb
), handle
);
2070 ocfs2_unlock_refcount_tree(OCFS2_SB(inode
->i_sb
), ref_tree
, 1);
2071 brelse(ref_root_bh
);
2075 static int ocfs2_xattr_has_space_inline(struct inode
*inode
,
2076 struct ocfs2_dinode
*di
)
2078 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
2079 unsigned int xattrsize
= OCFS2_SB(inode
->i_sb
)->s_xattr_inline_size
;
2082 if (xattrsize
< OCFS2_MIN_XATTR_INLINE_SIZE
)
2085 if (oi
->ip_dyn_features
& OCFS2_INLINE_DATA_FL
) {
2086 struct ocfs2_inline_data
*idata
= &di
->id2
.i_data
;
2087 free
= le16_to_cpu(idata
->id_count
) - le64_to_cpu(di
->i_size
);
2088 } else if (ocfs2_inode_is_fast_symlink(inode
)) {
2089 free
= ocfs2_fast_symlink_chars(inode
->i_sb
) -
2090 le64_to_cpu(di
->i_size
);
2092 struct ocfs2_extent_list
*el
= &di
->id2
.i_list
;
2093 free
= (le16_to_cpu(el
->l_count
) -
2094 le16_to_cpu(el
->l_next_free_rec
)) *
2095 sizeof(struct ocfs2_extent_rec
);
2097 if (free
>= xattrsize
)
2104 * ocfs2_xattr_ibody_find()
2106 * Find extended attribute in inode block and
2107 * fill search info into struct ocfs2_xattr_search.
2109 static int ocfs2_xattr_ibody_find(struct inode
*inode
,
2112 struct ocfs2_xattr_search
*xs
)
2114 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
2115 struct ocfs2_dinode
*di
= (struct ocfs2_dinode
*)xs
->inode_bh
->b_data
;
2119 if (inode
->i_sb
->s_blocksize
== OCFS2_MIN_BLOCKSIZE
)
2122 if (!(oi
->ip_dyn_features
& OCFS2_INLINE_XATTR_FL
)) {
2123 down_read(&oi
->ip_alloc_sem
);
2124 has_space
= ocfs2_xattr_has_space_inline(inode
, di
);
2125 up_read(&oi
->ip_alloc_sem
);
2130 xs
->xattr_bh
= xs
->inode_bh
;
2131 xs
->end
= (void *)di
+ inode
->i_sb
->s_blocksize
;
2132 if (oi
->ip_dyn_features
& OCFS2_INLINE_XATTR_FL
)
2133 xs
->header
= (struct ocfs2_xattr_header
*)
2134 (xs
->end
- le16_to_cpu(di
->i_xattr_inline_size
));
2136 xs
->header
= (struct ocfs2_xattr_header
*)
2137 (xs
->end
- OCFS2_SB(inode
->i_sb
)->s_xattr_inline_size
);
2138 xs
->base
= (void *)xs
->header
;
2139 xs
->here
= xs
->header
->xh_entries
;
2141 /* Find the named attribute. */
2142 if (oi
->ip_dyn_features
& OCFS2_INLINE_XATTR_FL
) {
2143 ret
= ocfs2_xattr_find_entry(name_index
, name
, xs
);
2144 if (ret
&& ret
!= -ENODATA
)
2146 xs
->not_found
= ret
;
2153 * ocfs2_xattr_ibody_set()
2155 * Set, replace or remove an extended attribute into inode block.
2158 static int ocfs2_xattr_ibody_set(struct inode
*inode
,
2159 struct ocfs2_xattr_info
*xi
,
2160 struct ocfs2_xattr_search
*xs
,
2161 struct ocfs2_xattr_set_ctxt
*ctxt
)
2163 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
2164 struct ocfs2_dinode
*di
= (struct ocfs2_dinode
*)xs
->inode_bh
->b_data
;
2167 if (inode
->i_sb
->s_blocksize
== OCFS2_MIN_BLOCKSIZE
)
2170 down_write(&oi
->ip_alloc_sem
);
2171 if (!(oi
->ip_dyn_features
& OCFS2_INLINE_XATTR_FL
)) {
2172 if (!ocfs2_xattr_has_space_inline(inode
, di
)) {
2178 ret
= ocfs2_xattr_set_entry(inode
, xi
, xs
, ctxt
,
2179 (OCFS2_INLINE_XATTR_FL
| OCFS2_HAS_XATTR_FL
));
2181 up_write(&oi
->ip_alloc_sem
);
2187 * ocfs2_xattr_block_find()
2189 * Find extended attribute in external block and
2190 * fill search info into struct ocfs2_xattr_search.
2192 static int ocfs2_xattr_block_find(struct inode
*inode
,
2195 struct ocfs2_xattr_search
*xs
)
2197 struct ocfs2_dinode
*di
= (struct ocfs2_dinode
*)xs
->inode_bh
->b_data
;
2198 struct buffer_head
*blk_bh
= NULL
;
2199 struct ocfs2_xattr_block
*xb
;
2202 if (!di
->i_xattr_loc
)
2205 ret
= ocfs2_read_xattr_block(inode
, le64_to_cpu(di
->i_xattr_loc
),
2212 xs
->xattr_bh
= blk_bh
;
2213 xb
= (struct ocfs2_xattr_block
*)blk_bh
->b_data
;
2215 if (!(le16_to_cpu(xb
->xb_flags
) & OCFS2_XATTR_INDEXED
)) {
2216 xs
->header
= &xb
->xb_attrs
.xb_header
;
2217 xs
->base
= (void *)xs
->header
;
2218 xs
->end
= (void *)(blk_bh
->b_data
) + blk_bh
->b_size
;
2219 xs
->here
= xs
->header
->xh_entries
;
2221 ret
= ocfs2_xattr_find_entry(name_index
, name
, xs
);
2223 ret
= ocfs2_xattr_index_block_find(inode
, blk_bh
,
2227 if (ret
&& ret
!= -ENODATA
) {
2228 xs
->xattr_bh
= NULL
;
2231 xs
->not_found
= ret
;
2239 static int ocfs2_create_xattr_block(handle_t
*handle
,
2240 struct inode
*inode
,
2241 struct buffer_head
*inode_bh
,
2242 struct ocfs2_alloc_context
*meta_ac
,
2243 struct buffer_head
**ret_bh
,
2247 u16 suballoc_bit_start
;
2250 struct ocfs2_dinode
*di
= (struct ocfs2_dinode
*)inode_bh
->b_data
;
2251 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
2252 struct buffer_head
*new_bh
= NULL
;
2253 struct ocfs2_xattr_block
*xblk
;
2255 ret
= ocfs2_journal_access_di(handle
, INODE_CACHE(inode
), inode_bh
,
2256 OCFS2_JOURNAL_ACCESS_CREATE
);
2262 ret
= ocfs2_claim_metadata(osb
, handle
, meta_ac
, 1,
2263 &suballoc_bit_start
, &num_got
,
2270 new_bh
= sb_getblk(inode
->i_sb
, first_blkno
);
2271 ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode
), new_bh
);
2273 ret
= ocfs2_journal_access_xb(handle
, INODE_CACHE(inode
),
2275 OCFS2_JOURNAL_ACCESS_CREATE
);
2281 /* Initialize ocfs2_xattr_block */
2282 xblk
= (struct ocfs2_xattr_block
*)new_bh
->b_data
;
2283 memset(xblk
, 0, inode
->i_sb
->s_blocksize
);
2284 strcpy((void *)xblk
, OCFS2_XATTR_BLOCK_SIGNATURE
);
2285 xblk
->xb_suballoc_slot
= cpu_to_le16(osb
->slot_num
);
2286 xblk
->xb_suballoc_bit
= cpu_to_le16(suballoc_bit_start
);
2287 xblk
->xb_fs_generation
= cpu_to_le32(osb
->fs_generation
);
2288 xblk
->xb_blkno
= cpu_to_le64(first_blkno
);
2291 struct ocfs2_xattr_tree_root
*xr
= &xblk
->xb_attrs
.xb_root
;
2292 xr
->xt_clusters
= cpu_to_le32(1);
2293 xr
->xt_last_eb_blk
= 0;
2294 xr
->xt_list
.l_tree_depth
= 0;
2295 xr
->xt_list
.l_count
= cpu_to_le16(
2296 ocfs2_xattr_recs_per_xb(inode
->i_sb
));
2297 xr
->xt_list
.l_next_free_rec
= cpu_to_le16(1);
2298 xblk
->xb_flags
= cpu_to_le16(OCFS2_XATTR_INDEXED
);
2301 ret
= ocfs2_journal_dirty(handle
, new_bh
);
2306 di
->i_xattr_loc
= cpu_to_le64(first_blkno
);
2307 ocfs2_journal_dirty(handle
, inode_bh
);
2318 * ocfs2_xattr_block_set()
2320 * Set, replace or remove an extended attribute into external block.
2323 static int ocfs2_xattr_block_set(struct inode
*inode
,
2324 struct ocfs2_xattr_info
*xi
,
2325 struct ocfs2_xattr_search
*xs
,
2326 struct ocfs2_xattr_set_ctxt
*ctxt
)
2328 struct buffer_head
*new_bh
= NULL
;
2329 handle_t
*handle
= ctxt
->handle
;
2330 struct ocfs2_xattr_block
*xblk
= NULL
;
2333 if (!xs
->xattr_bh
) {
2334 ret
= ocfs2_create_xattr_block(handle
, inode
, xs
->inode_bh
,
2335 ctxt
->meta_ac
, &new_bh
, 0);
2341 xs
->xattr_bh
= new_bh
;
2342 xblk
= (struct ocfs2_xattr_block
*)xs
->xattr_bh
->b_data
;
2343 xs
->header
= &xblk
->xb_attrs
.xb_header
;
2344 xs
->base
= (void *)xs
->header
;
2345 xs
->end
= (void *)xblk
+ inode
->i_sb
->s_blocksize
;
2346 xs
->here
= xs
->header
->xh_entries
;
2348 xblk
= (struct ocfs2_xattr_block
*)xs
->xattr_bh
->b_data
;
2350 if (!(le16_to_cpu(xblk
->xb_flags
) & OCFS2_XATTR_INDEXED
)) {
2351 /* Set extended attribute into external block */
2352 ret
= ocfs2_xattr_set_entry(inode
, xi
, xs
, ctxt
,
2353 OCFS2_HAS_XATTR_FL
);
2354 if (!ret
|| ret
!= -ENOSPC
)
2357 ret
= ocfs2_xattr_create_index_block(inode
, xs
, ctxt
);
2362 ret
= ocfs2_xattr_set_entry_index_block(inode
, xi
, xs
, ctxt
);
2369 /* Check whether the new xattr can be inserted into the inode. */
2370 static int ocfs2_xattr_can_be_in_inode(struct inode
*inode
,
2371 struct ocfs2_xattr_info
*xi
,
2372 struct ocfs2_xattr_search
*xs
)
2375 struct ocfs2_xattr_entry
*last
;
2377 size_t min_offs
= xs
->end
- xs
->base
;
2382 last
= xs
->header
->xh_entries
;
2384 for (i
= 0; i
< le16_to_cpu(xs
->header
->xh_count
); i
++) {
2385 size_t offs
= le16_to_cpu(last
->xe_name_offset
);
2386 if (offs
< min_offs
)
2391 free
= min_offs
- ((void *)last
- xs
->base
) - OCFS2_XATTR_HEADER_GAP
;
2395 BUG_ON(!xs
->not_found
);
2397 if (xi
->value_len
> OCFS2_XATTR_INLINE_SIZE
)
2398 value_size
= OCFS2_XATTR_ROOT_SIZE
;
2400 value_size
= OCFS2_XATTR_SIZE(xi
->value_len
);
2402 if (free
>= sizeof(struct ocfs2_xattr_entry
) +
2403 OCFS2_XATTR_SIZE(strlen(xi
->name
)) + value_size
)
2409 static int ocfs2_calc_xattr_set_need(struct inode
*inode
,
2410 struct ocfs2_dinode
*di
,
2411 struct ocfs2_xattr_info
*xi
,
2412 struct ocfs2_xattr_search
*xis
,
2413 struct ocfs2_xattr_search
*xbs
,
2418 int ret
= 0, old_in_xb
= 0;
2419 int clusters_add
= 0, meta_add
= 0, credits
= 0;
2420 struct buffer_head
*bh
= NULL
;
2421 struct ocfs2_xattr_block
*xb
= NULL
;
2422 struct ocfs2_xattr_entry
*xe
= NULL
;
2423 struct ocfs2_xattr_value_root
*xv
= NULL
;
2425 int name_offset
, name_len
= 0;
2426 u32 new_clusters
= ocfs2_clusters_for_bytes(inode
->i_sb
,
2431 * Calculate the clusters we need to write.
2432 * No matter whether we replace an old one or add a new one,
2433 * we need this for writing.
2435 if (xi
->value_len
> OCFS2_XATTR_INLINE_SIZE
)
2436 credits
+= new_clusters
*
2437 ocfs2_clusters_to_blocks(inode
->i_sb
, 1);
2439 if (xis
->not_found
&& xbs
->not_found
) {
2440 credits
+= ocfs2_blocks_per_xattr_bucket(inode
->i_sb
);
2442 if (xi
->value_len
> OCFS2_XATTR_INLINE_SIZE
) {
2443 clusters_add
+= new_clusters
;
2444 credits
+= ocfs2_calc_extend_credits(inode
->i_sb
,
2452 if (!xis
->not_found
) {
2454 name_offset
= le16_to_cpu(xe
->xe_name_offset
);
2455 name_len
= OCFS2_XATTR_SIZE(xe
->xe_name_len
);
2457 credits
+= OCFS2_INODE_UPDATE_CREDITS
;
2459 int i
, block_off
= 0;
2460 xb
= (struct ocfs2_xattr_block
*)xbs
->xattr_bh
->b_data
;
2462 name_offset
= le16_to_cpu(xe
->xe_name_offset
);
2463 name_len
= OCFS2_XATTR_SIZE(xe
->xe_name_len
);
2464 i
= xbs
->here
- xbs
->header
->xh_entries
;
2467 if (le16_to_cpu(xb
->xb_flags
) & OCFS2_XATTR_INDEXED
) {
2468 ret
= ocfs2_xattr_bucket_get_name_value(inode
->i_sb
,
2469 bucket_xh(xbs
->bucket
),
2472 base
= bucket_block(xbs
->bucket
, block_off
);
2473 credits
+= ocfs2_blocks_per_xattr_bucket(inode
->i_sb
);
2476 credits
+= OCFS2_XATTR_BLOCK_UPDATE_CREDITS
;
2481 * delete a xattr doesn't need metadata and cluster allocation.
2482 * so just calculate the credits and return.
2484 * The credits for removing the value tree will be extended
2485 * by ocfs2_remove_extent itself.
2488 if (!ocfs2_xattr_is_local(xe
))
2489 credits
+= ocfs2_remove_extent_credits(inode
->i_sb
);
2494 /* do cluster allocation guess first. */
2495 value_size
= le64_to_cpu(xe
->xe_value_size
);
2499 * In xattr set, we always try to set the xe in inode first,
2500 * so if it can be inserted into inode successfully, the old
2501 * one will be removed from the xattr block, and this xattr
2502 * will be inserted into inode as a new xattr in inode.
2504 if (ocfs2_xattr_can_be_in_inode(inode
, xi
, xis
)) {
2505 clusters_add
+= new_clusters
;
2506 credits
+= ocfs2_remove_extent_credits(inode
->i_sb
) +
2507 OCFS2_INODE_UPDATE_CREDITS
;
2508 if (!ocfs2_xattr_is_local(xe
))
2509 credits
+= ocfs2_calc_extend_credits(
2517 if (xi
->value_len
> OCFS2_XATTR_INLINE_SIZE
) {
2518 /* the new values will be stored outside. */
2519 u32 old_clusters
= 0;
2521 if (!ocfs2_xattr_is_local(xe
)) {
2522 old_clusters
= ocfs2_clusters_for_bytes(inode
->i_sb
,
2524 xv
= (struct ocfs2_xattr_value_root
*)
2525 (base
+ name_offset
+ name_len
);
2526 value_size
= OCFS2_XATTR_ROOT_SIZE
;
2530 if (old_clusters
>= new_clusters
) {
2531 credits
+= ocfs2_remove_extent_credits(inode
->i_sb
);
2534 meta_add
+= ocfs2_extend_meta_needed(&xv
->xr_list
);
2535 clusters_add
+= new_clusters
- old_clusters
;
2536 credits
+= ocfs2_calc_extend_credits(inode
->i_sb
,
2540 if (value_size
>= OCFS2_XATTR_ROOT_SIZE
)
2545 * Now the new value will be stored inside. So if the new
2546 * value is smaller than the size of value root or the old
2547 * value, we don't need any allocation, otherwise we have
2548 * to guess metadata allocation.
2550 if ((ocfs2_xattr_is_local(xe
) && value_size
>= xi
->value_len
) ||
2551 (!ocfs2_xattr_is_local(xe
) &&
2552 OCFS2_XATTR_ROOT_SIZE
>= xi
->value_len
))
2557 /* calculate metadata allocation. */
2558 if (di
->i_xattr_loc
) {
2559 if (!xbs
->xattr_bh
) {
2560 ret
= ocfs2_read_xattr_block(inode
,
2561 le64_to_cpu(di
->i_xattr_loc
),
2568 xb
= (struct ocfs2_xattr_block
*)bh
->b_data
;
2570 xb
= (struct ocfs2_xattr_block
*)xbs
->xattr_bh
->b_data
;
2573 * If there is already an xattr tree, good, we can calculate
2574 * like other b-trees. Otherwise we may have the chance of
2575 * create a tree, the credit calculation is borrowed from
2576 * ocfs2_calc_extend_credits with root_el = NULL. And the
2577 * new tree will be cluster based, so no meta is needed.
2579 if (le16_to_cpu(xb
->xb_flags
) & OCFS2_XATTR_INDEXED
) {
2580 struct ocfs2_extent_list
*el
=
2581 &xb
->xb_attrs
.xb_root
.xt_list
;
2582 meta_add
+= ocfs2_extend_meta_needed(el
);
2583 credits
+= ocfs2_calc_extend_credits(inode
->i_sb
,
2586 credits
+= OCFS2_SUBALLOC_ALLOC
+ 1;
2589 * This cluster will be used either for new bucket or for
2591 * If the cluster size is the same as the bucket size, one
2592 * more is needed since we may need to extend the bucket
2596 credits
+= ocfs2_blocks_per_xattr_bucket(inode
->i_sb
);
2597 if (OCFS2_XATTR_BUCKET_SIZE
==
2598 OCFS2_SB(inode
->i_sb
)->s_clustersize
) {
2599 credits
+= ocfs2_blocks_per_xattr_bucket(inode
->i_sb
);
2604 credits
+= OCFS2_XATTR_BLOCK_CREATE_CREDITS
;
2608 *clusters_need
= clusters_add
;
2610 *meta_need
= meta_add
;
2612 *credits_need
= credits
;
2617 static int ocfs2_init_xattr_set_ctxt(struct inode
*inode
,
2618 struct ocfs2_dinode
*di
,
2619 struct ocfs2_xattr_info
*xi
,
2620 struct ocfs2_xattr_search
*xis
,
2621 struct ocfs2_xattr_search
*xbs
,
2622 struct ocfs2_xattr_set_ctxt
*ctxt
,
2626 int clusters_add
, meta_add
, ret
;
2627 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
2629 memset(ctxt
, 0, sizeof(struct ocfs2_xattr_set_ctxt
));
2631 ocfs2_init_dealloc_ctxt(&ctxt
->dealloc
);
2633 ret
= ocfs2_calc_xattr_set_need(inode
, di
, xi
, xis
, xbs
,
2634 &clusters_add
, &meta_add
, credits
);
2640 meta_add
+= extra_meta
;
2641 mlog(0, "Set xattr %s, reserve meta blocks = %d, clusters = %d, "
2642 "credits = %d\n", xi
->name
, meta_add
, clusters_add
, *credits
);
2645 ret
= ocfs2_reserve_new_metadata_blocks(osb
, meta_add
,
2654 ret
= ocfs2_reserve_clusters(osb
, clusters_add
, &ctxt
->data_ac
);
2660 if (ctxt
->meta_ac
) {
2661 ocfs2_free_alloc_context(ctxt
->meta_ac
);
2662 ctxt
->meta_ac
= NULL
;
2666 * We cannot have an error and a non null ctxt->data_ac.
2673 static int __ocfs2_xattr_set_handle(struct inode
*inode
,
2674 struct ocfs2_dinode
*di
,
2675 struct ocfs2_xattr_info
*xi
,
2676 struct ocfs2_xattr_search
*xis
,
2677 struct ocfs2_xattr_search
*xbs
,
2678 struct ocfs2_xattr_set_ctxt
*ctxt
)
2680 int ret
= 0, credits
, old_found
;
2683 /* Remove existing extended attribute */
2684 if (!xis
->not_found
)
2685 ret
= ocfs2_xattr_ibody_set(inode
, xi
, xis
, ctxt
);
2686 else if (!xbs
->not_found
)
2687 ret
= ocfs2_xattr_block_set(inode
, xi
, xbs
, ctxt
);
2689 /* We always try to set extended attribute into inode first*/
2690 ret
= ocfs2_xattr_ibody_set(inode
, xi
, xis
, ctxt
);
2691 if (!ret
&& !xbs
->not_found
) {
2693 * If succeed and that extended attribute existing in
2694 * external block, then we will remove it.
2699 old_found
= xis
->not_found
;
2700 xis
->not_found
= -ENODATA
;
2701 ret
= ocfs2_calc_xattr_set_need(inode
,
2709 xis
->not_found
= old_found
;
2715 ret
= ocfs2_extend_trans(ctxt
->handle
, credits
+
2716 ctxt
->handle
->h_buffer_credits
);
2721 ret
= ocfs2_xattr_block_set(inode
, xi
, xbs
, ctxt
);
2722 } else if (ret
== -ENOSPC
) {
2723 if (di
->i_xattr_loc
&& !xbs
->xattr_bh
) {
2724 ret
= ocfs2_xattr_block_find(inode
,
2730 old_found
= xis
->not_found
;
2731 xis
->not_found
= -ENODATA
;
2732 ret
= ocfs2_calc_xattr_set_need(inode
,
2740 xis
->not_found
= old_found
;
2746 ret
= ocfs2_extend_trans(ctxt
->handle
, credits
+
2747 ctxt
->handle
->h_buffer_credits
);
2754 * If no space in inode, we will set extended attribute
2755 * into external block.
2757 ret
= ocfs2_xattr_block_set(inode
, xi
, xbs
, ctxt
);
2760 if (!xis
->not_found
) {
2762 * If succeed and that extended attribute
2763 * existing in inode, we will remove it.
2767 xbs
->not_found
= -ENODATA
;
2768 ret
= ocfs2_calc_xattr_set_need(inode
,
2781 ret
= ocfs2_extend_trans(ctxt
->handle
, credits
+
2782 ctxt
->handle
->h_buffer_credits
);
2787 ret
= ocfs2_xattr_ibody_set(inode
, xi
,
2794 /* Update inode ctime. */
2795 ret
= ocfs2_journal_access_di(ctxt
->handle
, INODE_CACHE(inode
),
2797 OCFS2_JOURNAL_ACCESS_WRITE
);
2803 inode
->i_ctime
= CURRENT_TIME
;
2804 di
->i_ctime
= cpu_to_le64(inode
->i_ctime
.tv_sec
);
2805 di
->i_ctime_nsec
= cpu_to_le32(inode
->i_ctime
.tv_nsec
);
2806 ocfs2_journal_dirty(ctxt
->handle
, xis
->inode_bh
);
2813 * This function only called duing creating inode
2814 * for init security/acl xattrs of the new inode.
2815 * All transanction credits have been reserved in mknod.
2817 int ocfs2_xattr_set_handle(handle_t
*handle
,
2818 struct inode
*inode
,
2819 struct buffer_head
*di_bh
,
2825 struct ocfs2_alloc_context
*meta_ac
,
2826 struct ocfs2_alloc_context
*data_ac
)
2828 struct ocfs2_dinode
*di
;
2831 struct ocfs2_xattr_info xi
= {
2832 .name_index
= name_index
,
2835 .value_len
= value_len
,
2838 struct ocfs2_xattr_search xis
= {
2839 .not_found
= -ENODATA
,
2842 struct ocfs2_xattr_search xbs
= {
2843 .not_found
= -ENODATA
,
2846 struct ocfs2_xattr_set_ctxt ctxt
= {
2852 if (!ocfs2_supports_xattr(OCFS2_SB(inode
->i_sb
)))
2856 * In extreme situation, may need xattr bucket when
2857 * block size is too small. And we have already reserved
2858 * the credits for bucket in mknod.
2860 if (inode
->i_sb
->s_blocksize
== OCFS2_MIN_BLOCKSIZE
) {
2861 xbs
.bucket
= ocfs2_xattr_bucket_new(inode
);
2863 mlog_errno(-ENOMEM
);
2868 xis
.inode_bh
= xbs
.inode_bh
= di_bh
;
2869 di
= (struct ocfs2_dinode
*)di_bh
->b_data
;
2871 down_write(&OCFS2_I(inode
)->ip_xattr_sem
);
2873 ret
= ocfs2_xattr_ibody_find(inode
, name_index
, name
, &xis
);
2876 if (xis
.not_found
) {
2877 ret
= ocfs2_xattr_block_find(inode
, name_index
, name
, &xbs
);
2882 ret
= __ocfs2_xattr_set_handle(inode
, di
, &xi
, &xis
, &xbs
, &ctxt
);
2885 up_write(&OCFS2_I(inode
)->ip_xattr_sem
);
2886 brelse(xbs
.xattr_bh
);
2887 ocfs2_xattr_bucket_free(xbs
.bucket
);
2895 * Set, replace or remove an extended attribute for this inode.
2896 * value is NULL to remove an existing extended attribute, else either
2897 * create or replace an extended attribute.
2899 int ocfs2_xattr_set(struct inode
*inode
,
2906 struct buffer_head
*di_bh
= NULL
;
2907 struct ocfs2_dinode
*di
;
2908 int ret
, credits
, ref_meta
= 0, ref_credits
= 0;
2909 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
2910 struct inode
*tl_inode
= osb
->osb_tl_inode
;
2911 struct ocfs2_xattr_set_ctxt ctxt
= { NULL
, NULL
, };
2912 struct ocfs2_refcount_tree
*ref_tree
= NULL
;
2914 struct ocfs2_xattr_info xi
= {
2915 .name_index
= name_index
,
2918 .value_len
= value_len
,
2921 struct ocfs2_xattr_search xis
= {
2922 .not_found
= -ENODATA
,
2925 struct ocfs2_xattr_search xbs
= {
2926 .not_found
= -ENODATA
,
2929 if (!ocfs2_supports_xattr(OCFS2_SB(inode
->i_sb
)))
2933 * Only xbs will be used on indexed trees. xis doesn't need a
2936 xbs
.bucket
= ocfs2_xattr_bucket_new(inode
);
2938 mlog_errno(-ENOMEM
);
2942 ret
= ocfs2_inode_lock(inode
, &di_bh
, 1);
2945 goto cleanup_nolock
;
2947 xis
.inode_bh
= xbs
.inode_bh
= di_bh
;
2948 di
= (struct ocfs2_dinode
*)di_bh
->b_data
;
2950 down_write(&OCFS2_I(inode
)->ip_xattr_sem
);
2952 * Scan inode and external block to find the same name
2953 * extended attribute and collect search infomation.
2955 ret
= ocfs2_xattr_ibody_find(inode
, name_index
, name
, &xis
);
2958 if (xis
.not_found
) {
2959 ret
= ocfs2_xattr_block_find(inode
, name_index
, name
, &xbs
);
2964 if (xis
.not_found
&& xbs
.not_found
) {
2966 if (flags
& XATTR_REPLACE
)
2973 if (flags
& XATTR_CREATE
)
2977 /* Check whether the value is refcounted and do some prepartion. */
2978 if (OCFS2_I(inode
)->ip_dyn_features
& OCFS2_HAS_REFCOUNT_FL
&&
2979 (!xis
.not_found
|| !xbs
.not_found
)) {
2980 ret
= ocfs2_prepare_refcount_xattr(inode
, di
, &xi
,
2981 &xis
, &xbs
, &ref_tree
,
2982 &ref_meta
, &ref_credits
);
2989 mutex_lock(&tl_inode
->i_mutex
);
2991 if (ocfs2_truncate_log_needs_flush(osb
)) {
2992 ret
= __ocfs2_flush_truncate_log(osb
);
2994 mutex_unlock(&tl_inode
->i_mutex
);
2999 mutex_unlock(&tl_inode
->i_mutex
);
3001 ret
= ocfs2_init_xattr_set_ctxt(inode
, di
, &xi
, &xis
,
3002 &xbs
, &ctxt
, ref_meta
, &credits
);
3008 /* we need to update inode's ctime field, so add credit for it. */
3009 credits
+= OCFS2_INODE_UPDATE_CREDITS
;
3010 ctxt
.handle
= ocfs2_start_trans(osb
, credits
+ ref_credits
);
3011 if (IS_ERR(ctxt
.handle
)) {
3012 ret
= PTR_ERR(ctxt
.handle
);
3017 ret
= __ocfs2_xattr_set_handle(inode
, di
, &xi
, &xis
, &xbs
, &ctxt
);
3019 ocfs2_commit_trans(osb
, ctxt
.handle
);
3022 ocfs2_free_alloc_context(ctxt
.data_ac
);
3024 ocfs2_free_alloc_context(ctxt
.meta_ac
);
3025 if (ocfs2_dealloc_has_cluster(&ctxt
.dealloc
))
3026 ocfs2_schedule_truncate_log_flush(osb
, 1);
3027 ocfs2_run_deallocs(osb
, &ctxt
.dealloc
);
3031 ocfs2_unlock_refcount_tree(osb
, ref_tree
, 1);
3032 up_write(&OCFS2_I(inode
)->ip_xattr_sem
);
3033 if (!value
&& !ret
) {
3034 ret
= ocfs2_try_remove_refcount_tree(inode
, di_bh
);
3038 ocfs2_inode_unlock(inode
, 1);
3041 brelse(xbs
.xattr_bh
);
3042 ocfs2_xattr_bucket_free(xbs
.bucket
);
3048 * Find the xattr extent rec which may contains name_hash.
3049 * e_cpos will be the first name hash of the xattr rec.
3050 * el must be the ocfs2_xattr_header.xb_attrs.xb_root.xt_list.
3052 static int ocfs2_xattr_get_rec(struct inode
*inode
,
3057 struct ocfs2_extent_list
*el
)
3060 struct buffer_head
*eb_bh
= NULL
;
3061 struct ocfs2_extent_block
*eb
;
3062 struct ocfs2_extent_rec
*rec
= NULL
;
3065 if (el
->l_tree_depth
) {
3066 ret
= ocfs2_find_leaf(INODE_CACHE(inode
), el
, name_hash
,
3073 eb
= (struct ocfs2_extent_block
*) eb_bh
->b_data
;
3076 if (el
->l_tree_depth
) {
3077 ocfs2_error(inode
->i_sb
,
3078 "Inode %lu has non zero tree depth in "
3079 "xattr tree block %llu\n", inode
->i_ino
,
3080 (unsigned long long)eb_bh
->b_blocknr
);
3086 for (i
= le16_to_cpu(el
->l_next_free_rec
) - 1; i
>= 0; i
--) {
3087 rec
= &el
->l_recs
[i
];
3089 if (le32_to_cpu(rec
->e_cpos
) <= name_hash
) {
3090 e_blkno
= le64_to_cpu(rec
->e_blkno
);
3096 ocfs2_error(inode
->i_sb
, "Inode %lu has bad extent "
3097 "record (%u, %u, 0) in xattr", inode
->i_ino
,
3098 le32_to_cpu(rec
->e_cpos
),
3099 ocfs2_rec_clusters(el
, rec
));
3104 *p_blkno
= le64_to_cpu(rec
->e_blkno
);
3105 *num_clusters
= le16_to_cpu(rec
->e_leaf_clusters
);
3107 *e_cpos
= le32_to_cpu(rec
->e_cpos
);
3113 typedef int (xattr_bucket_func
)(struct inode
*inode
,
3114 struct ocfs2_xattr_bucket
*bucket
,
3117 static int ocfs2_find_xe_in_bucket(struct inode
*inode
,
3118 struct ocfs2_xattr_bucket
*bucket
,
3125 int i
, ret
= 0, cmp
= 1, block_off
, new_offset
;
3126 struct ocfs2_xattr_header
*xh
= bucket_xh(bucket
);
3127 size_t name_len
= strlen(name
);
3128 struct ocfs2_xattr_entry
*xe
= NULL
;
3132 * We don't use binary search in the bucket because there
3133 * may be multiple entries with the same name hash.
3135 for (i
= 0; i
< le16_to_cpu(xh
->xh_count
); i
++) {
3136 xe
= &xh
->xh_entries
[i
];
3138 if (name_hash
> le32_to_cpu(xe
->xe_name_hash
))
3140 else if (name_hash
< le32_to_cpu(xe
->xe_name_hash
))
3143 cmp
= name_index
- ocfs2_xattr_get_type(xe
);
3145 cmp
= name_len
- xe
->xe_name_len
;
3149 ret
= ocfs2_xattr_bucket_get_name_value(inode
->i_sb
,
3160 xe_name
= bucket_block(bucket
, block_off
) + new_offset
;
3161 if (!memcmp(name
, xe_name
, name_len
)) {
3173 * Find the specified xattr entry in a series of buckets.
3174 * This series start from p_blkno and last for num_clusters.
3175 * The ocfs2_xattr_header.xh_num_buckets of the first bucket contains
3176 * the num of the valid buckets.
3178 * Return the buffer_head this xattr should reside in. And if the xattr's
3179 * hash is in the gap of 2 buckets, return the lower bucket.
3181 static int ocfs2_xattr_bucket_find(struct inode
*inode
,
3188 struct ocfs2_xattr_search
*xs
)
3191 struct ocfs2_xattr_header
*xh
= NULL
;
3192 struct ocfs2_xattr_entry
*xe
= NULL
;
3194 u16 blk_per_bucket
= ocfs2_blocks_per_xattr_bucket(inode
->i_sb
);
3195 int low_bucket
= 0, bucket
, high_bucket
;
3196 struct ocfs2_xattr_bucket
*search
;
3198 u64 blkno
, lower_blkno
= 0;
3200 search
= ocfs2_xattr_bucket_new(inode
);
3207 ret
= ocfs2_read_xattr_bucket(search
, p_blkno
);
3213 xh
= bucket_xh(search
);
3214 high_bucket
= le16_to_cpu(xh
->xh_num_buckets
) - 1;
3215 while (low_bucket
<= high_bucket
) {
3216 ocfs2_xattr_bucket_relse(search
);
3218 bucket
= (low_bucket
+ high_bucket
) / 2;
3219 blkno
= p_blkno
+ bucket
* blk_per_bucket
;
3220 ret
= ocfs2_read_xattr_bucket(search
, blkno
);
3226 xh
= bucket_xh(search
);
3227 xe
= &xh
->xh_entries
[0];
3228 if (name_hash
< le32_to_cpu(xe
->xe_name_hash
)) {
3229 high_bucket
= bucket
- 1;
3234 * Check whether the hash of the last entry in our
3235 * bucket is larger than the search one. for an empty
3236 * bucket, the last one is also the first one.
3239 xe
= &xh
->xh_entries
[le16_to_cpu(xh
->xh_count
) - 1];
3241 last_hash
= le32_to_cpu(xe
->xe_name_hash
);
3243 /* record lower_blkno which may be the insert place. */
3244 lower_blkno
= blkno
;
3246 if (name_hash
> le32_to_cpu(xe
->xe_name_hash
)) {
3247 low_bucket
= bucket
+ 1;
3251 /* the searched xattr should reside in this bucket if exists. */
3252 ret
= ocfs2_find_xe_in_bucket(inode
, search
,
3253 name_index
, name
, name_hash
,
3263 * Record the bucket we have found.
3264 * When the xattr's hash value is in the gap of 2 buckets, we will
3265 * always set it to the previous bucket.
3268 lower_blkno
= p_blkno
;
3270 /* This should be in cache - we just read it during the search */
3271 ret
= ocfs2_read_xattr_bucket(xs
->bucket
, lower_blkno
);
3277 xs
->header
= bucket_xh(xs
->bucket
);
3278 xs
->base
= bucket_block(xs
->bucket
, 0);
3279 xs
->end
= xs
->base
+ inode
->i_sb
->s_blocksize
;
3282 xs
->here
= &xs
->header
->xh_entries
[index
];
3283 mlog(0, "find xattr %s in bucket %llu, entry = %u\n", name
,
3284 (unsigned long long)bucket_blkno(xs
->bucket
), index
);
3289 ocfs2_xattr_bucket_free(search
);
3293 static int ocfs2_xattr_index_block_find(struct inode
*inode
,
3294 struct buffer_head
*root_bh
,
3297 struct ocfs2_xattr_search
*xs
)
3300 struct ocfs2_xattr_block
*xb
=
3301 (struct ocfs2_xattr_block
*)root_bh
->b_data
;
3302 struct ocfs2_xattr_tree_root
*xb_root
= &xb
->xb_attrs
.xb_root
;
3303 struct ocfs2_extent_list
*el
= &xb_root
->xt_list
;
3305 u32 first_hash
, num_clusters
= 0;
3306 u32 name_hash
= ocfs2_xattr_name_hash(inode
, name
, strlen(name
));
3308 if (le16_to_cpu(el
->l_next_free_rec
) == 0)
3311 mlog(0, "find xattr %s, hash = %u, index = %d in xattr tree\n",
3312 name
, name_hash
, name_index
);
3314 ret
= ocfs2_xattr_get_rec(inode
, name_hash
, &p_blkno
, &first_hash
,
3321 BUG_ON(p_blkno
== 0 || num_clusters
== 0 || first_hash
> name_hash
);
3323 mlog(0, "find xattr extent rec %u clusters from %llu, the first hash "
3324 "in the rec is %u\n", num_clusters
, (unsigned long long)p_blkno
,
3327 ret
= ocfs2_xattr_bucket_find(inode
, name_index
, name
, name_hash
,
3328 p_blkno
, first_hash
, num_clusters
, xs
);
3334 static int ocfs2_iterate_xattr_buckets(struct inode
*inode
,
3337 xattr_bucket_func
*func
,
3341 u32 bpc
= ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode
->i_sb
));
3342 u32 num_buckets
= clusters
* bpc
;
3343 struct ocfs2_xattr_bucket
*bucket
;
3345 bucket
= ocfs2_xattr_bucket_new(inode
);
3347 mlog_errno(-ENOMEM
);
3351 mlog(0, "iterating xattr buckets in %u clusters starting from %llu\n",
3352 clusters
, (unsigned long long)blkno
);
3354 for (i
= 0; i
< num_buckets
; i
++, blkno
+= bucket
->bu_blocks
) {
3355 ret
= ocfs2_read_xattr_bucket(bucket
, blkno
);
3362 * The real bucket num in this series of blocks is stored
3363 * in the 1st bucket.
3366 num_buckets
= le16_to_cpu(bucket_xh(bucket
)->xh_num_buckets
);
3368 mlog(0, "iterating xattr bucket %llu, first hash %u\n",
3369 (unsigned long long)blkno
,
3370 le32_to_cpu(bucket_xh(bucket
)->xh_entries
[0].xe_name_hash
));
3372 ret
= func(inode
, bucket
, para
);
3373 if (ret
&& ret
!= -ERANGE
)
3375 /* Fall through to bucket_relse() */
3378 ocfs2_xattr_bucket_relse(bucket
);
3383 ocfs2_xattr_bucket_free(bucket
);
3387 struct ocfs2_xattr_tree_list
{
3393 static int ocfs2_xattr_bucket_get_name_value(struct super_block
*sb
,
3394 struct ocfs2_xattr_header
*xh
,
3401 if (index
< 0 || index
>= le16_to_cpu(xh
->xh_count
))
3404 name_offset
= le16_to_cpu(xh
->xh_entries
[index
].xe_name_offset
);
3406 *block_off
= name_offset
>> sb
->s_blocksize_bits
;
3407 *new_offset
= name_offset
% sb
->s_blocksize
;
3412 static int ocfs2_list_xattr_bucket(struct inode
*inode
,
3413 struct ocfs2_xattr_bucket
*bucket
,
3417 struct ocfs2_xattr_tree_list
*xl
= (struct ocfs2_xattr_tree_list
*)para
;
3418 int i
, block_off
, new_offset
;
3419 const char *prefix
, *name
;
3421 for (i
= 0 ; i
< le16_to_cpu(bucket_xh(bucket
)->xh_count
); i
++) {
3422 struct ocfs2_xattr_entry
*entry
= &bucket_xh(bucket
)->xh_entries
[i
];
3423 type
= ocfs2_xattr_get_type(entry
);
3424 prefix
= ocfs2_xattr_prefix(type
);
3427 ret
= ocfs2_xattr_bucket_get_name_value(inode
->i_sb
,
3435 name
= (const char *)bucket_block(bucket
, block_off
) +
3437 ret
= ocfs2_xattr_list_entry(xl
->buffer
,
3441 entry
->xe_name_len
);
3450 static int ocfs2_iterate_xattr_index_block(struct inode
*inode
,
3451 struct buffer_head
*blk_bh
,
3452 xattr_tree_rec_func
*rec_func
,
3455 struct ocfs2_xattr_block
*xb
=
3456 (struct ocfs2_xattr_block
*)blk_bh
->b_data
;
3457 struct ocfs2_extent_list
*el
= &xb
->xb_attrs
.xb_root
.xt_list
;
3459 u32 name_hash
= UINT_MAX
, e_cpos
= 0, num_clusters
= 0;
3462 if (!el
->l_next_free_rec
|| !rec_func
)
3465 while (name_hash
> 0) {
3466 ret
= ocfs2_xattr_get_rec(inode
, name_hash
, &p_blkno
,
3467 &e_cpos
, &num_clusters
, el
);
3473 ret
= rec_func(inode
, blk_bh
, p_blkno
, e_cpos
,
3474 num_clusters
, para
);
3484 name_hash
= e_cpos
- 1;
3491 static int ocfs2_list_xattr_tree_rec(struct inode
*inode
,
3492 struct buffer_head
*root_bh
,
3493 u64 blkno
, u32 cpos
, u32 len
, void *para
)
3495 return ocfs2_iterate_xattr_buckets(inode
, blkno
, len
,
3496 ocfs2_list_xattr_bucket
, para
);
3499 static int ocfs2_xattr_tree_list_index_block(struct inode
*inode
,
3500 struct buffer_head
*blk_bh
,
3505 struct ocfs2_xattr_tree_list xl
= {
3507 .buffer_size
= buffer_size
,
3511 ret
= ocfs2_iterate_xattr_index_block(inode
, blk_bh
,
3512 ocfs2_list_xattr_tree_rec
, &xl
);
3523 static int cmp_xe(const void *a
, const void *b
)
3525 const struct ocfs2_xattr_entry
*l
= a
, *r
= b
;
3526 u32 l_hash
= le32_to_cpu(l
->xe_name_hash
);
3527 u32 r_hash
= le32_to_cpu(r
->xe_name_hash
);
3529 if (l_hash
> r_hash
)
3531 if (l_hash
< r_hash
)
3536 static void swap_xe(void *a
, void *b
, int size
)
3538 struct ocfs2_xattr_entry
*l
= a
, *r
= b
, tmp
;
3541 memcpy(l
, r
, sizeof(struct ocfs2_xattr_entry
));
3542 memcpy(r
, &tmp
, sizeof(struct ocfs2_xattr_entry
));
3546 * When the ocfs2_xattr_block is filled up, new bucket will be created
3547 * and all the xattr entries will be moved to the new bucket.
3548 * The header goes at the start of the bucket, and the names+values are
3549 * filled from the end. This is why *target starts as the last buffer.
3550 * Note: we need to sort the entries since they are not saved in order
3551 * in the ocfs2_xattr_block.
3553 static void ocfs2_cp_xattr_block_to_bucket(struct inode
*inode
,
3554 struct buffer_head
*xb_bh
,
3555 struct ocfs2_xattr_bucket
*bucket
)
3557 int i
, blocksize
= inode
->i_sb
->s_blocksize
;
3558 int blks
= ocfs2_blocks_per_xattr_bucket(inode
->i_sb
);
3559 u16 offset
, size
, off_change
;
3560 struct ocfs2_xattr_entry
*xe
;
3561 struct ocfs2_xattr_block
*xb
=
3562 (struct ocfs2_xattr_block
*)xb_bh
->b_data
;
3563 struct ocfs2_xattr_header
*xb_xh
= &xb
->xb_attrs
.xb_header
;
3564 struct ocfs2_xattr_header
*xh
= bucket_xh(bucket
);
3565 u16 count
= le16_to_cpu(xb_xh
->xh_count
);
3566 char *src
= xb_bh
->b_data
;
3567 char *target
= bucket_block(bucket
, blks
- 1);
3569 mlog(0, "cp xattr from block %llu to bucket %llu\n",
3570 (unsigned long long)xb_bh
->b_blocknr
,
3571 (unsigned long long)bucket_blkno(bucket
));
3573 for (i
= 0; i
< blks
; i
++)
3574 memset(bucket_block(bucket
, i
), 0, blocksize
);
3577 * Since the xe_name_offset is based on ocfs2_xattr_header,
3578 * there is a offset change corresponding to the change of
3579 * ocfs2_xattr_header's position.
3581 off_change
= offsetof(struct ocfs2_xattr_block
, xb_attrs
.xb_header
);
3582 xe
= &xb_xh
->xh_entries
[count
- 1];
3583 offset
= le16_to_cpu(xe
->xe_name_offset
) + off_change
;
3584 size
= blocksize
- offset
;
3586 /* copy all the names and values. */
3587 memcpy(target
+ offset
, src
+ offset
, size
);
3589 /* Init new header now. */
3590 xh
->xh_count
= xb_xh
->xh_count
;
3591 xh
->xh_num_buckets
= cpu_to_le16(1);
3592 xh
->xh_name_value_len
= cpu_to_le16(size
);
3593 xh
->xh_free_start
= cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE
- size
);
3595 /* copy all the entries. */
3596 target
= bucket_block(bucket
, 0);
3597 offset
= offsetof(struct ocfs2_xattr_header
, xh_entries
);
3598 size
= count
* sizeof(struct ocfs2_xattr_entry
);
3599 memcpy(target
+ offset
, (char *)xb_xh
+ offset
, size
);
3601 /* Change the xe offset for all the xe because of the move. */
3602 off_change
= OCFS2_XATTR_BUCKET_SIZE
- blocksize
+
3603 offsetof(struct ocfs2_xattr_block
, xb_attrs
.xb_header
);
3604 for (i
= 0; i
< count
; i
++)
3605 le16_add_cpu(&xh
->xh_entries
[i
].xe_name_offset
, off_change
);
3607 mlog(0, "copy entry: start = %u, size = %u, offset_change = %u\n",
3608 offset
, size
, off_change
);
3610 sort(target
+ offset
, count
, sizeof(struct ocfs2_xattr_entry
),
3615 * After we move xattr from block to index btree, we have to
3616 * update ocfs2_xattr_search to the new xe and base.
3618 * When the entry is in xattr block, xattr_bh indicates the storage place.
3619 * While if the entry is in index b-tree, "bucket" indicates the
3620 * real place of the xattr.
3622 static void ocfs2_xattr_update_xattr_search(struct inode
*inode
,
3623 struct ocfs2_xattr_search
*xs
,
3624 struct buffer_head
*old_bh
)
3626 char *buf
= old_bh
->b_data
;
3627 struct ocfs2_xattr_block
*old_xb
= (struct ocfs2_xattr_block
*)buf
;
3628 struct ocfs2_xattr_header
*old_xh
= &old_xb
->xb_attrs
.xb_header
;
3631 xs
->header
= bucket_xh(xs
->bucket
);
3632 xs
->base
= bucket_block(xs
->bucket
, 0);
3633 xs
->end
= xs
->base
+ inode
->i_sb
->s_blocksize
;
3638 i
= xs
->here
- old_xh
->xh_entries
;
3639 xs
->here
= &xs
->header
->xh_entries
[i
];
3642 static int ocfs2_xattr_create_index_block(struct inode
*inode
,
3643 struct ocfs2_xattr_search
*xs
,
3644 struct ocfs2_xattr_set_ctxt
*ctxt
)
3649 handle_t
*handle
= ctxt
->handle
;
3650 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
3651 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
3652 struct buffer_head
*xb_bh
= xs
->xattr_bh
;
3653 struct ocfs2_xattr_block
*xb
=
3654 (struct ocfs2_xattr_block
*)xb_bh
->b_data
;
3655 struct ocfs2_xattr_tree_root
*xr
;
3656 u16 xb_flags
= le16_to_cpu(xb
->xb_flags
);
3658 mlog(0, "create xattr index block for %llu\n",
3659 (unsigned long long)xb_bh
->b_blocknr
);
3661 BUG_ON(xb_flags
& OCFS2_XATTR_INDEXED
);
3662 BUG_ON(!xs
->bucket
);
3666 * We can use this lock for now, and maybe move to a dedicated mutex
3667 * if performance becomes a problem later.
3669 down_write(&oi
->ip_alloc_sem
);
3671 ret
= ocfs2_journal_access_xb(handle
, INODE_CACHE(inode
), xb_bh
,
3672 OCFS2_JOURNAL_ACCESS_WRITE
);
3678 ret
= __ocfs2_claim_clusters(osb
, handle
, ctxt
->data_ac
,
3679 1, 1, &bit_off
, &len
);
3686 * The bucket may spread in many blocks, and
3687 * we will only touch the 1st block and the last block
3688 * in the whole bucket(one for entry and one for data).
3690 blkno
= ocfs2_clusters_to_blocks(inode
->i_sb
, bit_off
);
3692 mlog(0, "allocate 1 cluster from %llu to xattr block\n",
3693 (unsigned long long)blkno
);
3695 ret
= ocfs2_init_xattr_bucket(xs
->bucket
, blkno
);
3701 ret
= ocfs2_xattr_bucket_journal_access(handle
, xs
->bucket
,
3702 OCFS2_JOURNAL_ACCESS_CREATE
);
3708 ocfs2_cp_xattr_block_to_bucket(inode
, xb_bh
, xs
->bucket
);
3709 ocfs2_xattr_bucket_journal_dirty(handle
, xs
->bucket
);
3711 ocfs2_xattr_update_xattr_search(inode
, xs
, xb_bh
);
3713 /* Change from ocfs2_xattr_header to ocfs2_xattr_tree_root */
3714 memset(&xb
->xb_attrs
, 0, inode
->i_sb
->s_blocksize
-
3715 offsetof(struct ocfs2_xattr_block
, xb_attrs
));
3717 xr
= &xb
->xb_attrs
.xb_root
;
3718 xr
->xt_clusters
= cpu_to_le32(1);
3719 xr
->xt_last_eb_blk
= 0;
3720 xr
->xt_list
.l_tree_depth
= 0;
3721 xr
->xt_list
.l_count
= cpu_to_le16(ocfs2_xattr_recs_per_xb(inode
->i_sb
));
3722 xr
->xt_list
.l_next_free_rec
= cpu_to_le16(1);
3724 xr
->xt_list
.l_recs
[0].e_cpos
= 0;
3725 xr
->xt_list
.l_recs
[0].e_blkno
= cpu_to_le64(blkno
);
3726 xr
->xt_list
.l_recs
[0].e_leaf_clusters
= cpu_to_le16(1);
3728 xb
->xb_flags
= cpu_to_le16(xb_flags
| OCFS2_XATTR_INDEXED
);
3730 ocfs2_journal_dirty(handle
, xb_bh
);
3733 up_write(&oi
->ip_alloc_sem
);
3738 static int cmp_xe_offset(const void *a
, const void *b
)
3740 const struct ocfs2_xattr_entry
*l
= a
, *r
= b
;
3741 u32 l_name_offset
= le16_to_cpu(l
->xe_name_offset
);
3742 u32 r_name_offset
= le16_to_cpu(r
->xe_name_offset
);
3744 if (l_name_offset
< r_name_offset
)
3746 if (l_name_offset
> r_name_offset
)
3752 * defrag a xattr bucket if we find that the bucket has some
3753 * holes beteen name/value pairs.
3754 * We will move all the name/value pairs to the end of the bucket
3755 * so that we can spare some space for insertion.
3757 static int ocfs2_defrag_xattr_bucket(struct inode
*inode
,
3759 struct ocfs2_xattr_bucket
*bucket
)
3762 size_t end
, offset
, len
, value_len
;
3763 struct ocfs2_xattr_header
*xh
;
3764 char *entries
, *buf
, *bucket_buf
= NULL
;
3765 u64 blkno
= bucket_blkno(bucket
);
3767 size_t blocksize
= inode
->i_sb
->s_blocksize
;
3768 struct ocfs2_xattr_entry
*xe
;
3771 * In order to make the operation more efficient and generic,
3772 * we copy all the blocks into a contiguous memory and do the
3773 * defragment there, so if anything is error, we will not touch
3776 bucket_buf
= kmalloc(OCFS2_XATTR_BUCKET_SIZE
, GFP_NOFS
);
3783 for (i
= 0; i
< bucket
->bu_blocks
; i
++, buf
+= blocksize
)
3784 memcpy(buf
, bucket_block(bucket
, i
), blocksize
);
3786 ret
= ocfs2_xattr_bucket_journal_access(handle
, bucket
,
3787 OCFS2_JOURNAL_ACCESS_WRITE
);
3793 xh
= (struct ocfs2_xattr_header
*)bucket_buf
;
3794 entries
= (char *)xh
->xh_entries
;
3795 xh_free_start
= le16_to_cpu(xh
->xh_free_start
);
3797 mlog(0, "adjust xattr bucket in %llu, count = %u, "
3798 "xh_free_start = %u, xh_name_value_len = %u.\n",
3799 (unsigned long long)blkno
, le16_to_cpu(xh
->xh_count
),
3800 xh_free_start
, le16_to_cpu(xh
->xh_name_value_len
));
3803 * sort all the entries by their offset.
3804 * the largest will be the first, so that we can
3805 * move them to the end one by one.
3807 sort(entries
, le16_to_cpu(xh
->xh_count
),
3808 sizeof(struct ocfs2_xattr_entry
),
3809 cmp_xe_offset
, swap_xe
);
3811 /* Move all name/values to the end of the bucket. */
3812 xe
= xh
->xh_entries
;
3813 end
= OCFS2_XATTR_BUCKET_SIZE
;
3814 for (i
= 0; i
< le16_to_cpu(xh
->xh_count
); i
++, xe
++) {
3815 offset
= le16_to_cpu(xe
->xe_name_offset
);
3816 if (ocfs2_xattr_is_local(xe
))
3817 value_len
= OCFS2_XATTR_SIZE(
3818 le64_to_cpu(xe
->xe_value_size
));
3820 value_len
= OCFS2_XATTR_ROOT_SIZE
;
3821 len
= OCFS2_XATTR_SIZE(xe
->xe_name_len
) + value_len
;
3824 * We must make sure that the name/value pair
3825 * exist in the same block. So adjust end to
3826 * the previous block end if needed.
3828 if (((end
- len
) / blocksize
!=
3829 (end
- 1) / blocksize
))
3830 end
= end
- end
% blocksize
;
3832 if (end
> offset
+ len
) {
3833 memmove(bucket_buf
+ end
- len
,
3834 bucket_buf
+ offset
, len
);
3835 xe
->xe_name_offset
= cpu_to_le16(end
- len
);
3838 mlog_bug_on_msg(end
< offset
+ len
, "Defrag check failed for "
3839 "bucket %llu\n", (unsigned long long)blkno
);
3844 mlog_bug_on_msg(xh_free_start
> end
, "Defrag check failed for "
3845 "bucket %llu\n", (unsigned long long)blkno
);
3847 if (xh_free_start
== end
)
3850 memset(bucket_buf
+ xh_free_start
, 0, end
- xh_free_start
);
3851 xh
->xh_free_start
= cpu_to_le16(end
);
3853 /* sort the entries by their name_hash. */
3854 sort(entries
, le16_to_cpu(xh
->xh_count
),
3855 sizeof(struct ocfs2_xattr_entry
),
3859 for (i
= 0; i
< bucket
->bu_blocks
; i
++, buf
+= blocksize
)
3860 memcpy(bucket_block(bucket
, i
), buf
, blocksize
);
3861 ocfs2_xattr_bucket_journal_dirty(handle
, bucket
);
3869 * prev_blkno points to the start of an existing extent. new_blkno
3870 * points to a newly allocated extent. Because we know each of our
3871 * clusters contains more than bucket, we can easily split one cluster
3872 * at a bucket boundary. So we take the last cluster of the existing
3873 * extent and split it down the middle. We move the last half of the
3874 * buckets in the last cluster of the existing extent over to the new
3877 * first_bh is the buffer at prev_blkno so we can update the existing
3878 * extent's bucket count. header_bh is the bucket were we were hoping
3879 * to insert our xattr. If the bucket move places the target in the new
3880 * extent, we'll update first_bh and header_bh after modifying the old
3883 * first_hash will be set as the 1st xe's name_hash in the new extent.
3885 static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode
*inode
,
3887 struct ocfs2_xattr_bucket
*first
,
3888 struct ocfs2_xattr_bucket
*target
,
3894 struct super_block
*sb
= inode
->i_sb
;
3895 int blks_per_bucket
= ocfs2_blocks_per_xattr_bucket(sb
);
3896 int num_buckets
= ocfs2_xattr_buckets_per_cluster(OCFS2_SB(sb
));
3897 int to_move
= num_buckets
/ 2;
3899 u64 last_cluster_blkno
= bucket_blkno(first
) +
3900 ((num_clusters
- 1) * ocfs2_clusters_to_blocks(sb
, 1));
3902 BUG_ON(le16_to_cpu(bucket_xh(first
)->xh_num_buckets
) < num_buckets
);
3903 BUG_ON(OCFS2_XATTR_BUCKET_SIZE
== OCFS2_SB(sb
)->s_clustersize
);
3905 mlog(0, "move half of xattrs in cluster %llu to %llu\n",
3906 (unsigned long long)last_cluster_blkno
, (unsigned long long)new_blkno
);
3908 ret
= ocfs2_mv_xattr_buckets(inode
, handle
, bucket_blkno(first
),
3909 last_cluster_blkno
, new_blkno
,
3910 to_move
, first_hash
);
3916 /* This is the first bucket that got moved */
3917 src_blkno
= last_cluster_blkno
+ (to_move
* blks_per_bucket
);
3920 * If the target bucket was part of the moved buckets, we need to
3921 * update first and target.
3923 if (bucket_blkno(target
) >= src_blkno
) {
3924 /* Find the block for the new target bucket */
3925 src_blkno
= new_blkno
+
3926 (bucket_blkno(target
) - src_blkno
);
3928 ocfs2_xattr_bucket_relse(first
);
3929 ocfs2_xattr_bucket_relse(target
);
3932 * These shouldn't fail - the buffers are in the
3933 * journal from ocfs2_cp_xattr_bucket().
3935 ret
= ocfs2_read_xattr_bucket(first
, new_blkno
);
3940 ret
= ocfs2_read_xattr_bucket(target
, src_blkno
);
3951 * Find the suitable pos when we divide a bucket into 2.
3952 * We have to make sure the xattrs with the same hash value exist
3953 * in the same bucket.
3955 * If this ocfs2_xattr_header covers more than one hash value, find a
3956 * place where the hash value changes. Try to find the most even split.
3957 * The most common case is that all entries have different hash values,
3958 * and the first check we make will find a place to split.
3960 static int ocfs2_xattr_find_divide_pos(struct ocfs2_xattr_header
*xh
)
3962 struct ocfs2_xattr_entry
*entries
= xh
->xh_entries
;
3963 int count
= le16_to_cpu(xh
->xh_count
);
3964 int delta
, middle
= count
/ 2;
3967 * We start at the middle. Each step gets farther away in both
3968 * directions. We therefore hit the change in hash value
3969 * nearest to the middle. Note that this loop does not execute for
3972 for (delta
= 0; delta
< middle
; delta
++) {
3973 /* Let's check delta earlier than middle */
3974 if (cmp_xe(&entries
[middle
- delta
- 1],
3975 &entries
[middle
- delta
]))
3976 return middle
- delta
;
3978 /* For even counts, don't walk off the end */
3979 if ((middle
+ delta
+ 1) == count
)
3982 /* Now try delta past middle */
3983 if (cmp_xe(&entries
[middle
+ delta
],
3984 &entries
[middle
+ delta
+ 1]))
3985 return middle
+ delta
+ 1;
3988 /* Every entry had the same hash */
3993 * Move some xattrs in old bucket(blk) to new bucket(new_blk).
3994 * first_hash will record the 1st hash of the new bucket.
3996 * Normally half of the xattrs will be moved. But we have to make
3997 * sure that the xattrs with the same hash value are stored in the
3998 * same bucket. If all the xattrs in this bucket have the same hash
3999 * value, the new bucket will be initialized as an empty one and the
4000 * first_hash will be initialized as (hash_value+1).
4002 static int ocfs2_divide_xattr_bucket(struct inode
*inode
,
4007 int new_bucket_head
)
4010 int count
, start
, len
, name_value_len
= 0, xe_len
, name_offset
= 0;
4011 struct ocfs2_xattr_bucket
*s_bucket
= NULL
, *t_bucket
= NULL
;
4012 struct ocfs2_xattr_header
*xh
;
4013 struct ocfs2_xattr_entry
*xe
;
4014 int blocksize
= inode
->i_sb
->s_blocksize
;
4016 mlog(0, "move some of xattrs from bucket %llu to %llu\n",
4017 (unsigned long long)blk
, (unsigned long long)new_blk
);
4019 s_bucket
= ocfs2_xattr_bucket_new(inode
);
4020 t_bucket
= ocfs2_xattr_bucket_new(inode
);
4021 if (!s_bucket
|| !t_bucket
) {
4027 ret
= ocfs2_read_xattr_bucket(s_bucket
, blk
);
4033 ret
= ocfs2_xattr_bucket_journal_access(handle
, s_bucket
,
4034 OCFS2_JOURNAL_ACCESS_WRITE
);
4041 * Even if !new_bucket_head, we're overwriting t_bucket. Thus,
4042 * there's no need to read it.
4044 ret
= ocfs2_init_xattr_bucket(t_bucket
, new_blk
);
4051 * Hey, if we're overwriting t_bucket, what difference does
4052 * ACCESS_CREATE vs ACCESS_WRITE make? See the comment in the
4053 * same part of ocfs2_cp_xattr_bucket().
4055 ret
= ocfs2_xattr_bucket_journal_access(handle
, t_bucket
,
4057 OCFS2_JOURNAL_ACCESS_CREATE
:
4058 OCFS2_JOURNAL_ACCESS_WRITE
);
4064 xh
= bucket_xh(s_bucket
);
4065 count
= le16_to_cpu(xh
->xh_count
);
4066 start
= ocfs2_xattr_find_divide_pos(xh
);
4068 if (start
== count
) {
4069 xe
= &xh
->xh_entries
[start
-1];
4072 * initialized a new empty bucket here.
4073 * The hash value is set as one larger than
4074 * that of the last entry in the previous bucket.
4076 for (i
= 0; i
< t_bucket
->bu_blocks
; i
++)
4077 memset(bucket_block(t_bucket
, i
), 0, blocksize
);
4079 xh
= bucket_xh(t_bucket
);
4080 xh
->xh_free_start
= cpu_to_le16(blocksize
);
4081 xh
->xh_entries
[0].xe_name_hash
= xe
->xe_name_hash
;
4082 le32_add_cpu(&xh
->xh_entries
[0].xe_name_hash
, 1);
4084 goto set_num_buckets
;
4087 /* copy the whole bucket to the new first. */
4088 ocfs2_xattr_bucket_copy_data(t_bucket
, s_bucket
);
4090 /* update the new bucket. */
4091 xh
= bucket_xh(t_bucket
);
4094 * Calculate the total name/value len and xh_free_start for
4095 * the old bucket first.
4097 name_offset
= OCFS2_XATTR_BUCKET_SIZE
;
4099 for (i
= 0; i
< start
; i
++) {
4100 xe
= &xh
->xh_entries
[i
];
4101 xe_len
= OCFS2_XATTR_SIZE(xe
->xe_name_len
);
4102 if (ocfs2_xattr_is_local(xe
))
4104 OCFS2_XATTR_SIZE(le64_to_cpu(xe
->xe_value_size
));
4106 xe_len
+= OCFS2_XATTR_ROOT_SIZE
;
4107 name_value_len
+= xe_len
;
4108 if (le16_to_cpu(xe
->xe_name_offset
) < name_offset
)
4109 name_offset
= le16_to_cpu(xe
->xe_name_offset
);
4113 * Now begin the modification to the new bucket.
4115 * In the new bucket, We just move the xattr entry to the beginning
4116 * and don't touch the name/value. So there will be some holes in the
4117 * bucket, and they will be removed when ocfs2_defrag_xattr_bucket is
4120 xe
= &xh
->xh_entries
[start
];
4121 len
= sizeof(struct ocfs2_xattr_entry
) * (count
- start
);
4122 mlog(0, "mv xattr entry len %d from %d to %d\n", len
,
4123 (int)((char *)xe
- (char *)xh
),
4124 (int)((char *)xh
->xh_entries
- (char *)xh
));
4125 memmove((char *)xh
->xh_entries
, (char *)xe
, len
);
4126 xe
= &xh
->xh_entries
[count
- start
];
4127 len
= sizeof(struct ocfs2_xattr_entry
) * start
;
4128 memset((char *)xe
, 0, len
);
4130 le16_add_cpu(&xh
->xh_count
, -start
);
4131 le16_add_cpu(&xh
->xh_name_value_len
, -name_value_len
);
4133 /* Calculate xh_free_start for the new bucket. */
4134 xh
->xh_free_start
= cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE
);
4135 for (i
= 0; i
< le16_to_cpu(xh
->xh_count
); i
++) {
4136 xe
= &xh
->xh_entries
[i
];
4137 xe_len
= OCFS2_XATTR_SIZE(xe
->xe_name_len
);
4138 if (ocfs2_xattr_is_local(xe
))
4140 OCFS2_XATTR_SIZE(le64_to_cpu(xe
->xe_value_size
));
4142 xe_len
+= OCFS2_XATTR_ROOT_SIZE
;
4143 if (le16_to_cpu(xe
->xe_name_offset
) <
4144 le16_to_cpu(xh
->xh_free_start
))
4145 xh
->xh_free_start
= xe
->xe_name_offset
;
4149 /* set xh->xh_num_buckets for the new xh. */
4150 if (new_bucket_head
)
4151 xh
->xh_num_buckets
= cpu_to_le16(1);
4153 xh
->xh_num_buckets
= 0;
4155 ocfs2_xattr_bucket_journal_dirty(handle
, t_bucket
);
4157 /* store the first_hash of the new bucket. */
4159 *first_hash
= le32_to_cpu(xh
->xh_entries
[0].xe_name_hash
);
4162 * Now only update the 1st block of the old bucket. If we
4163 * just added a new empty bucket, there is no need to modify
4169 xh
= bucket_xh(s_bucket
);
4170 memset(&xh
->xh_entries
[start
], 0,
4171 sizeof(struct ocfs2_xattr_entry
) * (count
- start
));
4172 xh
->xh_count
= cpu_to_le16(start
);
4173 xh
->xh_free_start
= cpu_to_le16(name_offset
);
4174 xh
->xh_name_value_len
= cpu_to_le16(name_value_len
);
4176 ocfs2_xattr_bucket_journal_dirty(handle
, s_bucket
);
4179 ocfs2_xattr_bucket_free(s_bucket
);
4180 ocfs2_xattr_bucket_free(t_bucket
);
4186 * Copy xattr from one bucket to another bucket.
4188 * The caller must make sure that the journal transaction
4189 * has enough space for journaling.
4191 static int ocfs2_cp_xattr_bucket(struct inode
*inode
,
4198 struct ocfs2_xattr_bucket
*s_bucket
= NULL
, *t_bucket
= NULL
;
4200 BUG_ON(s_blkno
== t_blkno
);
4202 mlog(0, "cp bucket %llu to %llu, target is %d\n",
4203 (unsigned long long)s_blkno
, (unsigned long long)t_blkno
,
4206 s_bucket
= ocfs2_xattr_bucket_new(inode
);
4207 t_bucket
= ocfs2_xattr_bucket_new(inode
);
4208 if (!s_bucket
|| !t_bucket
) {
4214 ret
= ocfs2_read_xattr_bucket(s_bucket
, s_blkno
);
4219 * Even if !t_is_new, we're overwriting t_bucket. Thus,
4220 * there's no need to read it.
4222 ret
= ocfs2_init_xattr_bucket(t_bucket
, t_blkno
);
4227 * Hey, if we're overwriting t_bucket, what difference does
4228 * ACCESS_CREATE vs ACCESS_WRITE make? Well, if we allocated a new
4229 * cluster to fill, we came here from
4230 * ocfs2_mv_xattr_buckets(), and it is really new -
4231 * ACCESS_CREATE is required. But we also might have moved data
4232 * out of t_bucket before extending back into it.
4233 * ocfs2_add_new_xattr_bucket() can do this - its call to
4234 * ocfs2_add_new_xattr_cluster() may have created a new extent
4235 * and copied out the end of the old extent. Then it re-extends
4236 * the old extent back to create space for new xattrs. That's
4237 * how we get here, and the bucket isn't really new.
4239 ret
= ocfs2_xattr_bucket_journal_access(handle
, t_bucket
,
4241 OCFS2_JOURNAL_ACCESS_CREATE
:
4242 OCFS2_JOURNAL_ACCESS_WRITE
);
4246 ocfs2_xattr_bucket_copy_data(t_bucket
, s_bucket
);
4247 ocfs2_xattr_bucket_journal_dirty(handle
, t_bucket
);
4250 ocfs2_xattr_bucket_free(t_bucket
);
4251 ocfs2_xattr_bucket_free(s_bucket
);
4257 * src_blk points to the start of an existing extent. last_blk points to
4258 * last cluster in that extent. to_blk points to a newly allocated
4259 * extent. We copy the buckets from the cluster at last_blk to the new
4260 * extent. If start_bucket is non-zero, we skip that many buckets before
4261 * we start copying. The new extent's xh_num_buckets gets set to the
4262 * number of buckets we copied. The old extent's xh_num_buckets shrinks
4263 * by the same amount.
4265 static int ocfs2_mv_xattr_buckets(struct inode
*inode
, handle_t
*handle
,
4266 u64 src_blk
, u64 last_blk
, u64 to_blk
,
4267 unsigned int start_bucket
,
4270 int i
, ret
, credits
;
4271 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
4272 int blks_per_bucket
= ocfs2_blocks_per_xattr_bucket(inode
->i_sb
);
4273 int num_buckets
= ocfs2_xattr_buckets_per_cluster(osb
);
4274 struct ocfs2_xattr_bucket
*old_first
, *new_first
;
4276 mlog(0, "mv xattrs from cluster %llu to %llu\n",
4277 (unsigned long long)last_blk
, (unsigned long long)to_blk
);
4279 BUG_ON(start_bucket
>= num_buckets
);
4281 num_buckets
-= start_bucket
;
4282 last_blk
+= (start_bucket
* blks_per_bucket
);
4285 /* The first bucket of the original extent */
4286 old_first
= ocfs2_xattr_bucket_new(inode
);
4287 /* The first bucket of the new extent */
4288 new_first
= ocfs2_xattr_bucket_new(inode
);
4289 if (!old_first
|| !new_first
) {
4295 ret
= ocfs2_read_xattr_bucket(old_first
, src_blk
);
4302 * We need to update the first bucket of the old extent and all
4303 * the buckets going to the new extent.
4305 credits
= ((num_buckets
+ 1) * blks_per_bucket
) +
4306 handle
->h_buffer_credits
;
4307 ret
= ocfs2_extend_trans(handle
, credits
);
4313 ret
= ocfs2_xattr_bucket_journal_access(handle
, old_first
,
4314 OCFS2_JOURNAL_ACCESS_WRITE
);
4320 for (i
= 0; i
< num_buckets
; i
++) {
4321 ret
= ocfs2_cp_xattr_bucket(inode
, handle
,
4322 last_blk
+ (i
* blks_per_bucket
),
4323 to_blk
+ (i
* blks_per_bucket
),
4332 * Get the new bucket ready before we dirty anything
4333 * (This actually shouldn't fail, because we already dirtied
4334 * it once in ocfs2_cp_xattr_bucket()).
4336 ret
= ocfs2_read_xattr_bucket(new_first
, to_blk
);
4341 ret
= ocfs2_xattr_bucket_journal_access(handle
, new_first
,
4342 OCFS2_JOURNAL_ACCESS_WRITE
);
4348 /* Now update the headers */
4349 le16_add_cpu(&bucket_xh(old_first
)->xh_num_buckets
, -num_buckets
);
4350 ocfs2_xattr_bucket_journal_dirty(handle
, old_first
);
4352 bucket_xh(new_first
)->xh_num_buckets
= cpu_to_le16(num_buckets
);
4353 ocfs2_xattr_bucket_journal_dirty(handle
, new_first
);
4356 *first_hash
= le32_to_cpu(bucket_xh(new_first
)->xh_entries
[0].xe_name_hash
);
4359 ocfs2_xattr_bucket_free(new_first
);
4360 ocfs2_xattr_bucket_free(old_first
);
4365 * Move some xattrs in this cluster to the new cluster.
4366 * This function should only be called when bucket size == cluster size.
4367 * Otherwise ocfs2_mv_xattr_bucket_cross_cluster should be used instead.
4369 static int ocfs2_divide_xattr_cluster(struct inode
*inode
,
4375 u16 blk_per_bucket
= ocfs2_blocks_per_xattr_bucket(inode
->i_sb
);
4376 int ret
, credits
= 2 * blk_per_bucket
+ handle
->h_buffer_credits
;
4378 BUG_ON(OCFS2_XATTR_BUCKET_SIZE
< OCFS2_SB(inode
->i_sb
)->s_clustersize
);
4380 ret
= ocfs2_extend_trans(handle
, credits
);
4386 /* Move half of the xattr in start_blk to the next bucket. */
4387 return ocfs2_divide_xattr_bucket(inode
, handle
, prev_blk
,
4388 new_blk
, first_hash
, 1);
4392 * Move some xattrs from the old cluster to the new one since they are not
4393 * contiguous in ocfs2 xattr tree.
4395 * new_blk starts a new separate cluster, and we will move some xattrs from
4396 * prev_blk to it. v_start will be set as the first name hash value in this
4397 * new cluster so that it can be used as e_cpos during tree insertion and
4398 * don't collide with our original b-tree operations. first_bh and header_bh
4399 * will also be updated since they will be used in ocfs2_extend_xattr_bucket
4400 * to extend the insert bucket.
4402 * The problem is how much xattr should we move to the new one and when should
4403 * we update first_bh and header_bh?
4404 * 1. If cluster size > bucket size, that means the previous cluster has more
4405 * than 1 bucket, so just move half nums of bucket into the new cluster and
4406 * update the first_bh and header_bh if the insert bucket has been moved
4407 * to the new cluster.
4408 * 2. If cluster_size == bucket_size:
4409 * a) If the previous extent rec has more than one cluster and the insert
4410 * place isn't in the last cluster, copy the entire last cluster to the
4411 * new one. This time, we don't need to upate the first_bh and header_bh
4412 * since they will not be moved into the new cluster.
4413 * b) Otherwise, move the bottom half of the xattrs in the last cluster into
4414 * the new one. And we set the extend flag to zero if the insert place is
4415 * moved into the new allocated cluster since no extend is needed.
4417 static int ocfs2_adjust_xattr_cross_cluster(struct inode
*inode
,
4419 struct ocfs2_xattr_bucket
*first
,
4420 struct ocfs2_xattr_bucket
*target
,
4428 mlog(0, "adjust xattrs from cluster %llu len %u to %llu\n",
4429 (unsigned long long)bucket_blkno(first
), prev_clusters
,
4430 (unsigned long long)new_blk
);
4432 if (ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode
->i_sb
)) > 1) {
4433 ret
= ocfs2_mv_xattr_bucket_cross_cluster(inode
,
4442 /* The start of the last cluster in the first extent */
4443 u64 last_blk
= bucket_blkno(first
) +
4444 ((prev_clusters
- 1) *
4445 ocfs2_clusters_to_blocks(inode
->i_sb
, 1));
4447 if (prev_clusters
> 1 && bucket_blkno(target
) != last_blk
) {
4448 ret
= ocfs2_mv_xattr_buckets(inode
, handle
,
4449 bucket_blkno(first
),
4450 last_blk
, new_blk
, 0,
4455 ret
= ocfs2_divide_xattr_cluster(inode
, handle
,
4461 if ((bucket_blkno(target
) == last_blk
) && extend
)
4470 * Add a new cluster for xattr storage.
4472 * If the new cluster is contiguous with the previous one, it will be
4473 * appended to the same extent record, and num_clusters will be updated.
4474 * If not, we will insert a new extent for it and move some xattrs in
4475 * the last cluster into the new allocated one.
4476 * We also need to limit the maximum size of a btree leaf, otherwise we'll
4477 * lose the benefits of hashing because we'll have to search large leaves.
4478 * So now the maximum size is OCFS2_MAX_XATTR_TREE_LEAF_SIZE(or clustersize,
4481 * first_bh is the first block of the previous extent rec and header_bh
4482 * indicates the bucket we will insert the new xattrs. They will be updated
4483 * when the header_bh is moved into the new cluster.
4485 static int ocfs2_add_new_xattr_cluster(struct inode
*inode
,
4486 struct buffer_head
*root_bh
,
4487 struct ocfs2_xattr_bucket
*first
,
4488 struct ocfs2_xattr_bucket
*target
,
4492 struct ocfs2_xattr_set_ctxt
*ctxt
)
4495 u16 bpc
= ocfs2_clusters_to_blocks(inode
->i_sb
, 1);
4496 u32 prev_clusters
= *num_clusters
;
4497 u32 clusters_to_add
= 1, bit_off
, num_bits
, v_start
= 0;
4499 handle_t
*handle
= ctxt
->handle
;
4500 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
4501 struct ocfs2_extent_tree et
;
4503 mlog(0, "Add new xattr cluster for %llu, previous xattr hash = %u, "
4504 "previous xattr blkno = %llu\n",
4505 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
4506 prev_cpos
, (unsigned long long)bucket_blkno(first
));
4508 ocfs2_init_xattr_tree_extent_tree(&et
, INODE_CACHE(inode
), root_bh
);
4510 ret
= ocfs2_journal_access_xb(handle
, INODE_CACHE(inode
), root_bh
,
4511 OCFS2_JOURNAL_ACCESS_WRITE
);
4517 ret
= __ocfs2_claim_clusters(osb
, handle
, ctxt
->data_ac
, 1,
4518 clusters_to_add
, &bit_off
, &num_bits
);
4525 BUG_ON(num_bits
> clusters_to_add
);
4527 block
= ocfs2_clusters_to_blocks(osb
->sb
, bit_off
);
4528 mlog(0, "Allocating %u clusters at block %u for xattr in inode %llu\n",
4529 num_bits
, bit_off
, (unsigned long long)OCFS2_I(inode
)->ip_blkno
);
4531 if (bucket_blkno(first
) + (prev_clusters
* bpc
) == block
&&
4532 (prev_clusters
+ num_bits
) << osb
->s_clustersize_bits
<=
4533 OCFS2_MAX_XATTR_TREE_LEAF_SIZE
) {
4535 * If this cluster is contiguous with the old one and
4536 * adding this new cluster, we don't surpass the limit of
4537 * OCFS2_MAX_XATTR_TREE_LEAF_SIZE, cool. We will let it be
4538 * initialized and used like other buckets in the previous
4540 * So add it as a contiguous one. The caller will handle
4543 v_start
= prev_cpos
+ prev_clusters
;
4544 *num_clusters
= prev_clusters
+ num_bits
;
4545 mlog(0, "Add contiguous %u clusters to previous extent rec.\n",
4548 ret
= ocfs2_adjust_xattr_cross_cluster(inode
,
4562 mlog(0, "Insert %u clusters at block %llu for xattr at %u\n",
4563 num_bits
, (unsigned long long)block
, v_start
);
4564 ret
= ocfs2_insert_extent(handle
, &et
, v_start
, block
,
4565 num_bits
, 0, ctxt
->meta_ac
);
4571 ret
= ocfs2_journal_dirty(handle
, root_bh
);
4580 * We are given an extent. 'first' is the bucket at the very front of
4581 * the extent. The extent has space for an additional bucket past
4582 * bucket_xh(first)->xh_num_buckets. 'target_blkno' is the block number
4583 * of the target bucket. We wish to shift every bucket past the target
4584 * down one, filling in that additional space. When we get back to the
4585 * target, we split the target between itself and the now-empty bucket
4586 * at target+1 (aka, target_blkno + blks_per_bucket).
4588 static int ocfs2_extend_xattr_bucket(struct inode
*inode
,
4590 struct ocfs2_xattr_bucket
*first
,
4595 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
4596 u16 blk_per_bucket
= ocfs2_blocks_per_xattr_bucket(inode
->i_sb
);
4598 u16 new_bucket
= le16_to_cpu(bucket_xh(first
)->xh_num_buckets
);
4600 mlog(0, "extend xattr bucket in %llu, xattr extend rec starting "
4601 "from %llu, len = %u\n", (unsigned long long)target_blk
,
4602 (unsigned long long)bucket_blkno(first
), num_clusters
);
4604 /* The extent must have room for an additional bucket */
4605 BUG_ON(new_bucket
>=
4606 (num_clusters
* ocfs2_xattr_buckets_per_cluster(osb
)));
4608 /* end_blk points to the last existing bucket */
4609 end_blk
= bucket_blkno(first
) + ((new_bucket
- 1) * blk_per_bucket
);
4612 * end_blk is the start of the last existing bucket.
4613 * Thus, (end_blk - target_blk) covers the target bucket and
4614 * every bucket after it up to, but not including, the last
4615 * existing bucket. Then we add the last existing bucket, the
4616 * new bucket, and the first bucket (3 * blk_per_bucket).
4618 credits
= (end_blk
- target_blk
) + (3 * blk_per_bucket
) +
4619 handle
->h_buffer_credits
;
4620 ret
= ocfs2_extend_trans(handle
, credits
);
4626 ret
= ocfs2_xattr_bucket_journal_access(handle
, first
,
4627 OCFS2_JOURNAL_ACCESS_WRITE
);
4633 while (end_blk
!= target_blk
) {
4634 ret
= ocfs2_cp_xattr_bucket(inode
, handle
, end_blk
,
4635 end_blk
+ blk_per_bucket
, 0);
4638 end_blk
-= blk_per_bucket
;
4641 /* Move half of the xattr in target_blkno to the next bucket. */
4642 ret
= ocfs2_divide_xattr_bucket(inode
, handle
, target_blk
,
4643 target_blk
+ blk_per_bucket
, NULL
, 0);
4645 le16_add_cpu(&bucket_xh(first
)->xh_num_buckets
, 1);
4646 ocfs2_xattr_bucket_journal_dirty(handle
, first
);
4653 * Add new xattr bucket in an extent record and adjust the buckets
4654 * accordingly. xb_bh is the ocfs2_xattr_block, and target is the
4655 * bucket we want to insert into.
4657 * In the easy case, we will move all the buckets after target down by
4658 * one. Half of target's xattrs will be moved to the next bucket.
4660 * If current cluster is full, we'll allocate a new one. This may not
4661 * be contiguous. The underlying calls will make sure that there is
4662 * space for the insert, shifting buckets around if necessary.
4663 * 'target' may be moved by those calls.
4665 static int ocfs2_add_new_xattr_bucket(struct inode
*inode
,
4666 struct buffer_head
*xb_bh
,
4667 struct ocfs2_xattr_bucket
*target
,
4668 struct ocfs2_xattr_set_ctxt
*ctxt
)
4670 struct ocfs2_xattr_block
*xb
=
4671 (struct ocfs2_xattr_block
*)xb_bh
->b_data
;
4672 struct ocfs2_xattr_tree_root
*xb_root
= &xb
->xb_attrs
.xb_root
;
4673 struct ocfs2_extent_list
*el
= &xb_root
->xt_list
;
4675 le32_to_cpu(bucket_xh(target
)->xh_entries
[0].xe_name_hash
);
4676 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
4677 int ret
, num_buckets
, extend
= 1;
4679 u32 e_cpos
, num_clusters
;
4680 /* The bucket at the front of the extent */
4681 struct ocfs2_xattr_bucket
*first
;
4683 mlog(0, "Add new xattr bucket starting from %llu\n",
4684 (unsigned long long)bucket_blkno(target
));
4686 /* The first bucket of the original extent */
4687 first
= ocfs2_xattr_bucket_new(inode
);
4694 ret
= ocfs2_xattr_get_rec(inode
, name_hash
, &p_blkno
, &e_cpos
,
4701 ret
= ocfs2_read_xattr_bucket(first
, p_blkno
);
4707 num_buckets
= ocfs2_xattr_buckets_per_cluster(osb
) * num_clusters
;
4708 if (num_buckets
== le16_to_cpu(bucket_xh(first
)->xh_num_buckets
)) {
4710 * This can move first+target if the target bucket moves
4711 * to the new extent.
4713 ret
= ocfs2_add_new_xattr_cluster(inode
,
4728 ret
= ocfs2_extend_xattr_bucket(inode
,
4731 bucket_blkno(target
),
4738 ocfs2_xattr_bucket_free(first
);
4743 static inline char *ocfs2_xattr_bucket_get_val(struct inode
*inode
,
4744 struct ocfs2_xattr_bucket
*bucket
,
4747 int block_off
= offs
>> inode
->i_sb
->s_blocksize_bits
;
4749 offs
= offs
% inode
->i_sb
->s_blocksize
;
4750 return bucket_block(bucket
, block_off
) + offs
;
4754 * Handle the normal xattr set, including replace, delete and new.
4756 * Note: "local" indicates the real data's locality. So we can't
4757 * just its bucket locality by its length.
4759 static void ocfs2_xattr_set_entry_normal(struct inode
*inode
,
4760 struct ocfs2_xattr_info
*xi
,
4761 struct ocfs2_xattr_search
*xs
,
4765 struct ocfs2_xattr_entry
*last
, *xe
;
4766 int name_len
= strlen(xi
->name
);
4767 struct ocfs2_xattr_header
*xh
= xs
->header
;
4768 u16 count
= le16_to_cpu(xh
->xh_count
), start
;
4769 size_t blocksize
= inode
->i_sb
->s_blocksize
;
4771 size_t offs
, size
, new_size
;
4773 last
= &xh
->xh_entries
[count
];
4774 if (!xs
->not_found
) {
4776 offs
= le16_to_cpu(xe
->xe_name_offset
);
4777 if (ocfs2_xattr_is_local(xe
))
4778 size
= OCFS2_XATTR_SIZE(name_len
) +
4779 OCFS2_XATTR_SIZE(le64_to_cpu(xe
->xe_value_size
));
4781 size
= OCFS2_XATTR_SIZE(name_len
) +
4782 OCFS2_XATTR_SIZE(OCFS2_XATTR_ROOT_SIZE
);
4785 * If the new value will be stored outside, xi->value has been
4786 * initalized as an empty ocfs2_xattr_value_root, and the same
4787 * goes with xi->value_len, so we can set new_size safely here.
4788 * See ocfs2_xattr_set_in_bucket.
4790 new_size
= OCFS2_XATTR_SIZE(name_len
) +
4791 OCFS2_XATTR_SIZE(xi
->value_len
);
4793 le16_add_cpu(&xh
->xh_name_value_len
, -size
);
4795 if (new_size
> size
)
4796 goto set_new_name_value
;
4798 /* Now replace the old value with new one. */
4800 xe
->xe_value_size
= cpu_to_le64(xi
->value_len
);
4802 xe
->xe_value_size
= 0;
4804 val
= ocfs2_xattr_bucket_get_val(inode
,
4806 memset(val
+ OCFS2_XATTR_SIZE(name_len
), 0,
4807 size
- OCFS2_XATTR_SIZE(name_len
));
4808 if (OCFS2_XATTR_SIZE(xi
->value_len
) > 0)
4809 memcpy(val
+ OCFS2_XATTR_SIZE(name_len
),
4810 xi
->value
, xi
->value_len
);
4812 le16_add_cpu(&xh
->xh_name_value_len
, new_size
);
4813 ocfs2_xattr_set_local(xe
, local
);
4817 * Remove the old entry if there is more than one.
4818 * We don't remove the last entry so that we can
4819 * use it to indicate the hash value of the empty
4823 le16_add_cpu(&xh
->xh_count
, -1);
4826 (void *)last
- (void *)xe
);
4828 sizeof(struct ocfs2_xattr_entry
));
4831 cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE
);
4836 /* find a new entry for insert. */
4837 int low
= 0, high
= count
- 1, tmp
;
4838 struct ocfs2_xattr_entry
*tmp_xe
;
4840 while (low
<= high
&& count
) {
4841 tmp
= (low
+ high
) / 2;
4842 tmp_xe
= &xh
->xh_entries
[tmp
];
4844 if (name_hash
> le32_to_cpu(tmp_xe
->xe_name_hash
))
4846 else if (name_hash
<
4847 le32_to_cpu(tmp_xe
->xe_name_hash
))
4855 xe
= &xh
->xh_entries
[low
];
4857 memmove(xe
+ 1, xe
, (void *)last
- (void *)xe
);
4859 le16_add_cpu(&xh
->xh_count
, 1);
4860 memset(xe
, 0, sizeof(struct ocfs2_xattr_entry
));
4861 xe
->xe_name_hash
= cpu_to_le32(name_hash
);
4862 xe
->xe_name_len
= name_len
;
4863 ocfs2_xattr_set_type(xe
, xi
->name_index
);
4867 /* Insert the new name+value. */
4868 size
= OCFS2_XATTR_SIZE(name_len
) + OCFS2_XATTR_SIZE(xi
->value_len
);
4871 * We must make sure that the name/value pair
4872 * exists in the same block.
4874 offs
= le16_to_cpu(xh
->xh_free_start
);
4875 start
= offs
- size
;
4877 if (start
>> inode
->i_sb
->s_blocksize_bits
!=
4878 (offs
- 1) >> inode
->i_sb
->s_blocksize_bits
) {
4879 offs
= offs
- offs
% blocksize
;
4880 xh
->xh_free_start
= cpu_to_le16(offs
);
4883 val
= ocfs2_xattr_bucket_get_val(inode
, xs
->bucket
, offs
- size
);
4884 xe
->xe_name_offset
= cpu_to_le16(offs
- size
);
4886 memset(val
, 0, size
);
4887 memcpy(val
, xi
->name
, name_len
);
4888 memcpy(val
+ OCFS2_XATTR_SIZE(name_len
), xi
->value
, xi
->value_len
);
4890 xe
->xe_value_size
= cpu_to_le64(xi
->value_len
);
4891 ocfs2_xattr_set_local(xe
, local
);
4893 le16_add_cpu(&xh
->xh_free_start
, -size
);
4894 le16_add_cpu(&xh
->xh_name_value_len
, size
);
4900 * Set the xattr entry in the specified bucket.
4901 * The bucket is indicated by xs->bucket and it should have the enough
4902 * space for the xattr insertion.
4904 static int ocfs2_xattr_set_entry_in_bucket(struct inode
*inode
,
4906 struct ocfs2_xattr_info
*xi
,
4907 struct ocfs2_xattr_search
*xs
,
4914 mlog(0, "Set xattr entry len = %lu index = %d in bucket %llu\n",
4915 (unsigned long)xi
->value_len
, xi
->name_index
,
4916 (unsigned long long)bucket_blkno(xs
->bucket
));
4918 if (!xs
->bucket
->bu_bhs
[1]) {
4919 blkno
= bucket_blkno(xs
->bucket
);
4920 ocfs2_xattr_bucket_relse(xs
->bucket
);
4921 ret
= ocfs2_read_xattr_bucket(xs
->bucket
, blkno
);
4928 ret
= ocfs2_xattr_bucket_journal_access(handle
, xs
->bucket
,
4929 OCFS2_JOURNAL_ACCESS_WRITE
);
4935 ocfs2_xattr_set_entry_normal(inode
, xi
, xs
, name_hash
, local
);
4936 ocfs2_xattr_bucket_journal_dirty(handle
, xs
->bucket
);
4943 * Truncate the specified xe_off entry in xattr bucket.
4944 * bucket is indicated by header_bh and len is the new length.
4945 * Both the ocfs2_xattr_value_root and the entry will be updated here.
4947 * Copy the new updated xe and xe_value_root to new_xe and new_xv if needed.
4949 static int ocfs2_xattr_bucket_value_truncate(struct inode
*inode
,
4950 struct ocfs2_xattr_bucket
*bucket
,
4953 struct ocfs2_xattr_set_ctxt
*ctxt
)
4957 struct ocfs2_xattr_entry
*xe
;
4958 struct ocfs2_xattr_header
*xh
= bucket_xh(bucket
);
4959 size_t blocksize
= inode
->i_sb
->s_blocksize
;
4960 struct ocfs2_xattr_value_buf vb
= {
4961 .vb_access
= ocfs2_journal_access
,
4964 xe
= &xh
->xh_entries
[xe_off
];
4966 BUG_ON(!xe
|| ocfs2_xattr_is_local(xe
));
4968 offset
= le16_to_cpu(xe
->xe_name_offset
) +
4969 OCFS2_XATTR_SIZE(xe
->xe_name_len
);
4971 value_blk
= offset
/ blocksize
;
4973 /* We don't allow ocfs2_xattr_value to be stored in different block. */
4974 BUG_ON(value_blk
!= (offset
+ OCFS2_XATTR_ROOT_SIZE
- 1) / blocksize
);
4976 vb
.vb_bh
= bucket
->bu_bhs
[value_blk
];
4979 vb
.vb_xv
= (struct ocfs2_xattr_value_root
*)
4980 (vb
.vb_bh
->b_data
+ offset
% blocksize
);
4983 * From here on out we have to dirty the bucket. The generic
4984 * value calls only modify one of the bucket's bhs, but we need
4985 * to send the bucket at once. So if they error, they *could* have
4986 * modified something. We have to assume they did, and dirty
4987 * the whole bucket. This leaves us in a consistent state.
4989 mlog(0, "truncate %u in xattr bucket %llu to %d bytes.\n",
4990 xe_off
, (unsigned long long)bucket_blkno(bucket
), len
);
4991 ret
= ocfs2_xattr_value_truncate(inode
, &vb
, len
, ctxt
);
4997 ret
= ocfs2_xattr_bucket_journal_access(ctxt
->handle
, bucket
,
4998 OCFS2_JOURNAL_ACCESS_WRITE
);
5004 xe
->xe_value_size
= cpu_to_le64(len
);
5006 ocfs2_xattr_bucket_journal_dirty(ctxt
->handle
, bucket
);
5012 static int ocfs2_xattr_bucket_value_truncate_xs(struct inode
*inode
,
5013 struct ocfs2_xattr_search
*xs
,
5015 struct ocfs2_xattr_set_ctxt
*ctxt
)
5018 struct ocfs2_xattr_entry
*xe
= xs
->here
;
5019 struct ocfs2_xattr_header
*xh
= (struct ocfs2_xattr_header
*)xs
->base
;
5021 BUG_ON(!xs
->bucket
->bu_bhs
[0] || !xe
|| ocfs2_xattr_is_local(xe
));
5023 offset
= xe
- xh
->xh_entries
;
5024 ret
= ocfs2_xattr_bucket_value_truncate(inode
, xs
->bucket
,
5032 static int ocfs2_xattr_bucket_set_value_outside(struct inode
*inode
,
5034 struct ocfs2_xattr_search
*xs
,
5038 int ret
, offset
, block_off
;
5039 struct ocfs2_xattr_value_root
*xv
;
5040 struct ocfs2_xattr_entry
*xe
= xs
->here
;
5041 struct ocfs2_xattr_header
*xh
= bucket_xh(xs
->bucket
);
5043 struct ocfs2_xattr_value_buf vb
= {
5044 .vb_access
= ocfs2_journal_access
,
5047 BUG_ON(!xs
->base
|| !xe
|| ocfs2_xattr_is_local(xe
));
5049 ret
= ocfs2_xattr_bucket_get_name_value(inode
->i_sb
, xh
,
5050 xe
- xh
->xh_entries
,
5058 base
= bucket_block(xs
->bucket
, block_off
);
5059 xv
= (struct ocfs2_xattr_value_root
*)(base
+ offset
+
5060 OCFS2_XATTR_SIZE(xe
->xe_name_len
));
5063 vb
.vb_bh
= xs
->bucket
->bu_bhs
[block_off
];
5064 ret
= __ocfs2_xattr_set_value_outside(inode
, handle
,
5065 &vb
, val
, value_len
);
5072 static int ocfs2_rm_xattr_cluster(struct inode
*inode
,
5073 struct buffer_head
*root_bh
,
5080 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
5081 struct inode
*tl_inode
= osb
->osb_tl_inode
;
5083 struct ocfs2_xattr_block
*xb
=
5084 (struct ocfs2_xattr_block
*)root_bh
->b_data
;
5085 struct ocfs2_alloc_context
*meta_ac
= NULL
;
5086 struct ocfs2_cached_dealloc_ctxt dealloc
;
5087 struct ocfs2_extent_tree et
;
5089 ret
= ocfs2_iterate_xattr_buckets(inode
, blkno
, len
,
5090 ocfs2_delete_xattr_in_bucket
, para
);
5096 ocfs2_init_xattr_tree_extent_tree(&et
, INODE_CACHE(inode
), root_bh
);
5098 ocfs2_init_dealloc_ctxt(&dealloc
);
5100 mlog(0, "rm xattr extent rec at %u len = %u, start from %llu\n",
5101 cpos
, len
, (unsigned long long)blkno
);
5103 ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode
), blkno
,
5106 ret
= ocfs2_lock_allocators(inode
, &et
, 0, 1, NULL
, &meta_ac
);
5112 mutex_lock(&tl_inode
->i_mutex
);
5114 if (ocfs2_truncate_log_needs_flush(osb
)) {
5115 ret
= __ocfs2_flush_truncate_log(osb
);
5122 handle
= ocfs2_start_trans(osb
, ocfs2_remove_extent_credits(osb
->sb
));
5123 if (IS_ERR(handle
)) {
5129 ret
= ocfs2_journal_access_xb(handle
, INODE_CACHE(inode
), root_bh
,
5130 OCFS2_JOURNAL_ACCESS_WRITE
);
5136 ret
= ocfs2_remove_extent(handle
, &et
, cpos
, len
, meta_ac
,
5143 le32_add_cpu(&xb
->xb_attrs
.xb_root
.xt_clusters
, -len
);
5145 ret
= ocfs2_journal_dirty(handle
, root_bh
);
5151 ret
= ocfs2_truncate_log_append(osb
, handle
, blkno
, len
);
5156 ocfs2_commit_trans(osb
, handle
);
5158 ocfs2_schedule_truncate_log_flush(osb
, 1);
5160 mutex_unlock(&tl_inode
->i_mutex
);
5163 ocfs2_free_alloc_context(meta_ac
);
5165 ocfs2_run_deallocs(osb
, &dealloc
);
5170 static void ocfs2_xattr_bucket_remove_xs(struct inode
*inode
,
5172 struct ocfs2_xattr_search
*xs
)
5174 struct ocfs2_xattr_header
*xh
= bucket_xh(xs
->bucket
);
5175 struct ocfs2_xattr_entry
*last
= &xh
->xh_entries
[
5176 le16_to_cpu(xh
->xh_count
) - 1];
5179 ret
= ocfs2_xattr_bucket_journal_access(handle
, xs
->bucket
,
5180 OCFS2_JOURNAL_ACCESS_WRITE
);
5186 /* Remove the old entry. */
5187 memmove(xs
->here
, xs
->here
+ 1,
5188 (void *)last
- (void *)xs
->here
);
5189 memset(last
, 0, sizeof(struct ocfs2_xattr_entry
));
5190 le16_add_cpu(&xh
->xh_count
, -1);
5192 ocfs2_xattr_bucket_journal_dirty(handle
, xs
->bucket
);
5196 * Set the xattr name/value in the bucket specified in xs.
5198 * As the new value in xi may be stored in the bucket or in an outside cluster,
5199 * we divide the whole process into 3 steps:
5200 * 1. insert name/value in the bucket(ocfs2_xattr_set_entry_in_bucket)
5201 * 2. truncate of the outside cluster(ocfs2_xattr_bucket_value_truncate_xs)
5202 * 3. Set the value to the outside cluster(ocfs2_xattr_bucket_set_value_outside)
5203 * 4. If the clusters for the new outside value can't be allocated, we need
5204 * to free the xattr we allocated in set.
5206 static int ocfs2_xattr_set_in_bucket(struct inode
*inode
,
5207 struct ocfs2_xattr_info
*xi
,
5208 struct ocfs2_xattr_search
*xs
,
5209 struct ocfs2_xattr_set_ctxt
*ctxt
)
5213 char *val
= (char *)xi
->value
;
5214 struct ocfs2_xattr_entry
*xe
= xs
->here
;
5215 u32 name_hash
= ocfs2_xattr_name_hash(inode
, xi
->name
,
5218 if (!xs
->not_found
&& !ocfs2_xattr_is_local(xe
)) {
5220 * We need to truncate the xattr storage first.
5222 * If both the old and new value are stored to
5223 * outside block, we only need to truncate
5224 * the storage and then set the value outside.
5226 * If the new value should be stored within block,
5227 * we should free all the outside block first and
5228 * the modification to the xattr block will be done
5229 * by following steps.
5231 if (xi
->value_len
> OCFS2_XATTR_INLINE_SIZE
)
5232 value_len
= xi
->value_len
;
5236 ret
= ocfs2_xattr_bucket_value_truncate_xs(inode
, xs
,
5243 goto set_value_outside
;
5246 value_len
= xi
->value_len
;
5247 /* So we have to handle the inside block change now. */
5248 if (value_len
> OCFS2_XATTR_INLINE_SIZE
) {
5250 * If the new value will be stored outside of block,
5251 * initalize a new empty value root and insert it first.
5254 xi
->value
= &def_xv
;
5255 xi
->value_len
= OCFS2_XATTR_ROOT_SIZE
;
5258 ret
= ocfs2_xattr_set_entry_in_bucket(inode
, ctxt
->handle
, xi
, xs
,
5265 if (value_len
<= OCFS2_XATTR_INLINE_SIZE
)
5268 /* allocate the space now for the outside block storage. */
5269 ret
= ocfs2_xattr_bucket_value_truncate_xs(inode
, xs
,
5274 if (xs
->not_found
) {
5276 * We can't allocate enough clusters for outside
5277 * storage and we have allocated xattr already,
5278 * so need to remove it.
5280 ocfs2_xattr_bucket_remove_xs(inode
, ctxt
->handle
, xs
);
5286 ret
= ocfs2_xattr_bucket_set_value_outside(inode
, ctxt
->handle
,
5287 xs
, val
, value_len
);
5293 * check whether the xattr bucket is filled up with the same hash value.
5294 * If we want to insert the xattr with the same hash, return -ENOSPC.
5295 * If we want to insert a xattr with different hash value, go ahead
5296 * and ocfs2_divide_xattr_bucket will handle this.
5298 static int ocfs2_check_xattr_bucket_collision(struct inode
*inode
,
5299 struct ocfs2_xattr_bucket
*bucket
,
5302 struct ocfs2_xattr_header
*xh
= bucket_xh(bucket
);
5303 u32 name_hash
= ocfs2_xattr_name_hash(inode
, name
, strlen(name
));
5305 if (name_hash
!= le32_to_cpu(xh
->xh_entries
[0].xe_name_hash
))
5308 if (xh
->xh_entries
[le16_to_cpu(xh
->xh_count
) - 1].xe_name_hash
==
5309 xh
->xh_entries
[0].xe_name_hash
) {
5310 mlog(ML_ERROR
, "Too much hash collision in xattr bucket %llu, "
5312 (unsigned long long)bucket_blkno(bucket
),
5313 le32_to_cpu(xh
->xh_entries
[0].xe_name_hash
));
5320 static int ocfs2_xattr_set_entry_index_block(struct inode
*inode
,
5321 struct ocfs2_xattr_info
*xi
,
5322 struct ocfs2_xattr_search
*xs
,
5323 struct ocfs2_xattr_set_ctxt
*ctxt
)
5325 struct ocfs2_xattr_header
*xh
;
5326 struct ocfs2_xattr_entry
*xe
;
5327 u16 count
, header_size
, xh_free_start
;
5328 int free
, max_free
, need
, old
;
5329 size_t value_size
= 0, name_len
= strlen(xi
->name
);
5330 size_t blocksize
= inode
->i_sb
->s_blocksize
;
5331 int ret
, allocation
= 0;
5333 mlog_entry("Set xattr %s in xattr index block\n", xi
->name
);
5337 count
= le16_to_cpu(xh
->xh_count
);
5338 xh_free_start
= le16_to_cpu(xh
->xh_free_start
);
5339 header_size
= sizeof(struct ocfs2_xattr_header
) +
5340 count
* sizeof(struct ocfs2_xattr_entry
);
5341 max_free
= OCFS2_XATTR_BUCKET_SIZE
- header_size
-
5342 le16_to_cpu(xh
->xh_name_value_len
) - OCFS2_XATTR_HEADER_GAP
;
5344 mlog_bug_on_msg(header_size
> blocksize
, "bucket %llu has header size "
5345 "of %u which exceed block size\n",
5346 (unsigned long long)bucket_blkno(xs
->bucket
),
5349 if (xi
->value
&& xi
->value_len
> OCFS2_XATTR_INLINE_SIZE
)
5350 value_size
= OCFS2_XATTR_ROOT_SIZE
;
5352 value_size
= OCFS2_XATTR_SIZE(xi
->value_len
);
5355 need
= sizeof(struct ocfs2_xattr_entry
) +
5356 OCFS2_XATTR_SIZE(name_len
) + value_size
;
5358 need
= value_size
+ OCFS2_XATTR_SIZE(name_len
);
5361 * We only replace the old value if the new length is smaller
5362 * than the old one. Otherwise we will allocate new space in the
5363 * bucket to store it.
5366 if (ocfs2_xattr_is_local(xe
))
5367 old
= OCFS2_XATTR_SIZE(le64_to_cpu(xe
->xe_value_size
));
5369 old
= OCFS2_XATTR_SIZE(OCFS2_XATTR_ROOT_SIZE
);
5371 if (old
>= value_size
)
5375 free
= xh_free_start
- header_size
- OCFS2_XATTR_HEADER_GAP
;
5377 * We need to make sure the new name/value pair
5378 * can exist in the same block.
5380 if (xh_free_start
% blocksize
< need
)
5381 free
-= xh_free_start
% blocksize
;
5383 mlog(0, "xs->not_found = %d, in xattr bucket %llu: free = %d, "
5384 "need = %d, max_free = %d, xh_free_start = %u, xh_name_value_len ="
5385 " %u\n", xs
->not_found
,
5386 (unsigned long long)bucket_blkno(xs
->bucket
),
5387 free
, need
, max_free
, le16_to_cpu(xh
->xh_free_start
),
5388 le16_to_cpu(xh
->xh_name_value_len
));
5392 count
== ocfs2_xattr_max_xe_in_bucket(inode
->i_sb
))) {
5393 if (need
<= max_free
&&
5394 count
< ocfs2_xattr_max_xe_in_bucket(inode
->i_sb
)) {
5396 * We can create the space by defragment. Since only the
5397 * name/value will be moved, the xe shouldn't be changed
5400 ret
= ocfs2_defrag_xattr_bucket(inode
, ctxt
->handle
,
5407 xh_free_start
= le16_to_cpu(xh
->xh_free_start
);
5408 free
= xh_free_start
- header_size
5409 - OCFS2_XATTR_HEADER_GAP
;
5410 if (xh_free_start
% blocksize
< need
)
5411 free
-= xh_free_start
% blocksize
;
5416 mlog(0, "Can't get enough space for xattr insert by "
5417 "defragment. Need %u bytes, but we have %d, so "
5418 "allocate new bucket for it.\n", need
, free
);
5422 * We have to add new buckets or clusters and one
5423 * allocation should leave us enough space for insert.
5428 * We do not allow for overlapping ranges between buckets. And
5429 * the maximum number of collisions we will allow for then is
5430 * one bucket's worth, so check it here whether we need to
5431 * add a new bucket for the insert.
5433 ret
= ocfs2_check_xattr_bucket_collision(inode
,
5441 ret
= ocfs2_add_new_xattr_bucket(inode
,
5451 * ocfs2_add_new_xattr_bucket() will have updated
5452 * xs->bucket if it moved, but it will not have updated
5453 * any of the other search fields. Thus, we drop it and
5454 * re-search. Everything should be cached, so it'll be
5457 ocfs2_xattr_bucket_relse(xs
->bucket
);
5458 ret
= ocfs2_xattr_index_block_find(inode
, xs
->xattr_bh
,
5461 if (ret
&& ret
!= -ENODATA
)
5463 xs
->not_found
= ret
;
5469 ret
= ocfs2_xattr_set_in_bucket(inode
, xi
, xs
, ctxt
);
5475 static int ocfs2_delete_xattr_in_bucket(struct inode
*inode
,
5476 struct ocfs2_xattr_bucket
*bucket
,
5479 int ret
= 0, ref_credits
;
5480 struct ocfs2_xattr_header
*xh
= bucket_xh(bucket
);
5482 struct ocfs2_xattr_entry
*xe
;
5483 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
5484 struct ocfs2_xattr_set_ctxt ctxt
= {NULL
, NULL
,};
5485 int credits
= ocfs2_remove_extent_credits(osb
->sb
) +
5486 ocfs2_blocks_per_xattr_bucket(inode
->i_sb
);
5487 struct ocfs2_xattr_value_root
*xv
;
5488 struct ocfs2_rm_xattr_bucket_para
*args
=
5489 (struct ocfs2_rm_xattr_bucket_para
*)para
;
5491 ocfs2_init_dealloc_ctxt(&ctxt
.dealloc
);
5493 for (i
= 0; i
< le16_to_cpu(xh
->xh_count
); i
++) {
5494 xe
= &xh
->xh_entries
[i
];
5495 if (ocfs2_xattr_is_local(xe
))
5498 ret
= ocfs2_get_xattr_tree_value_root(inode
->i_sb
, bucket
,
5501 ret
= ocfs2_lock_xattr_remove_allocators(inode
, xv
,
5507 ctxt
.handle
= ocfs2_start_trans(osb
, credits
+ ref_credits
);
5508 if (IS_ERR(ctxt
.handle
)) {
5509 ret
= PTR_ERR(ctxt
.handle
);
5514 ret
= ocfs2_xattr_bucket_value_truncate(inode
, bucket
,
5517 ocfs2_commit_trans(osb
, ctxt
.handle
);
5519 ocfs2_free_alloc_context(ctxt
.meta_ac
);
5520 ctxt
.meta_ac
= NULL
;
5529 ocfs2_free_alloc_context(ctxt
.meta_ac
);
5530 ocfs2_schedule_truncate_log_flush(osb
, 1);
5531 ocfs2_run_deallocs(osb
, &ctxt
.dealloc
);
5536 * Whenever we modify a xattr value root in the bucket(e.g, CoW
5537 * or change the extent record flag), we need to recalculate
5538 * the metaecc for the whole bucket. So it is done here.
5541 * We have to give the extra credits for the caller.
5543 static int ocfs2_xattr_bucket_post_refcount(struct inode
*inode
,
5548 struct ocfs2_xattr_bucket
*bucket
=
5549 (struct ocfs2_xattr_bucket
*)para
;
5551 ret
= ocfs2_xattr_bucket_journal_access(handle
, bucket
,
5552 OCFS2_JOURNAL_ACCESS_WRITE
);
5558 ocfs2_xattr_bucket_journal_dirty(handle
, bucket
);
5564 * Special action we need if the xattr value is refcounted.
5566 * 1. If the xattr is refcounted, lock the tree.
5567 * 2. CoW the xattr if we are setting the new value and the value
5568 * will be stored outside.
5569 * 3. In other case, decrease_refcount will work for us, so just
5570 * lock the refcount tree, calculate the meta and credits is OK.
5572 * We have to do CoW before ocfs2_init_xattr_set_ctxt since
5573 * currently CoW is a completed transaction, while this function
5574 * will also lock the allocators and let us deadlock. So we will
5575 * CoW the whole xattr value.
5577 static int ocfs2_prepare_refcount_xattr(struct inode
*inode
,
5578 struct ocfs2_dinode
*di
,
5579 struct ocfs2_xattr_info
*xi
,
5580 struct ocfs2_xattr_search
*xis
,
5581 struct ocfs2_xattr_search
*xbs
,
5582 struct ocfs2_refcount_tree
**ref_tree
,
5587 struct ocfs2_xattr_block
*xb
;
5588 struct ocfs2_xattr_entry
*xe
;
5590 u32 p_cluster
, num_clusters
;
5591 unsigned int ext_flags
;
5592 int name_offset
, name_len
;
5593 struct ocfs2_xattr_value_buf vb
;
5594 struct ocfs2_xattr_bucket
*bucket
= NULL
;
5595 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
5596 struct ocfs2_post_refcount refcount
;
5597 struct ocfs2_post_refcount
*p
= NULL
;
5598 struct buffer_head
*ref_root_bh
= NULL
;
5600 if (!xis
->not_found
) {
5602 name_offset
= le16_to_cpu(xe
->xe_name_offset
);
5603 name_len
= OCFS2_XATTR_SIZE(xe
->xe_name_len
);
5605 vb
.vb_bh
= xis
->inode_bh
;
5606 vb
.vb_access
= ocfs2_journal_access_di
;
5608 int i
, block_off
= 0;
5609 xb
= (struct ocfs2_xattr_block
*)xbs
->xattr_bh
->b_data
;
5611 name_offset
= le16_to_cpu(xe
->xe_name_offset
);
5612 name_len
= OCFS2_XATTR_SIZE(xe
->xe_name_len
);
5613 i
= xbs
->here
- xbs
->header
->xh_entries
;
5615 if (le16_to_cpu(xb
->xb_flags
) & OCFS2_XATTR_INDEXED
) {
5616 ret
= ocfs2_xattr_bucket_get_name_value(inode
->i_sb
,
5617 bucket_xh(xbs
->bucket
),
5624 base
= bucket_block(xbs
->bucket
, block_off
);
5625 vb
.vb_bh
= xbs
->bucket
->bu_bhs
[block_off
];
5626 vb
.vb_access
= ocfs2_journal_access
;
5628 if (ocfs2_meta_ecc(osb
)) {
5629 /*create parameters for ocfs2_post_refcount. */
5630 bucket
= xbs
->bucket
;
5631 refcount
.credits
= bucket
->bu_blocks
;
5632 refcount
.para
= bucket
;
5634 ocfs2_xattr_bucket_post_refcount
;
5639 vb
.vb_bh
= xbs
->xattr_bh
;
5640 vb
.vb_access
= ocfs2_journal_access_xb
;
5644 if (ocfs2_xattr_is_local(xe
))
5647 vb
.vb_xv
= (struct ocfs2_xattr_value_root
*)
5648 (base
+ name_offset
+ name_len
);
5650 ret
= ocfs2_xattr_get_clusters(inode
, 0, &p_cluster
,
5651 &num_clusters
, &vb
.vb_xv
->xr_list
,
5659 * We just need to check the 1st extent record, since we always
5660 * CoW the whole xattr. So there shouldn't be a xattr with
5661 * some REFCOUNT extent recs after the 1st one.
5663 if (!(ext_flags
& OCFS2_EXT_REFCOUNTED
))
5666 ret
= ocfs2_lock_refcount_tree(osb
, le64_to_cpu(di
->i_refcount_loc
),
5667 1, ref_tree
, &ref_root_bh
);
5674 * If we are deleting the xattr or the new size will be stored inside,
5675 * cool, leave it there, the xattr truncate process will remove them
5676 * for us(it still needs the refcount tree lock and the meta, credits).
5677 * And the worse case is that every cluster truncate will split the
5678 * refcount tree, and make the original extent become 3. So we will need
5679 * 2 * cluster more extent recs at most.
5681 if (!xi
->value
|| xi
->value_len
<= OCFS2_XATTR_INLINE_SIZE
) {
5683 ret
= ocfs2_refcounted_xattr_delete_need(inode
,
5684 &(*ref_tree
)->rf_ci
,
5685 ref_root_bh
, vb
.vb_xv
,
5692 ret
= ocfs2_refcount_cow_xattr(inode
, di
, &vb
,
5693 *ref_tree
, ref_root_bh
, 0,
5694 le32_to_cpu(vb
.vb_xv
->xr_clusters
), p
);
5699 brelse(ref_root_bh
);
5704 * Add the REFCOUNTED flags for all the extent rec in ocfs2_xattr_value_root.
5705 * The physical clusters will be added to refcount tree.
5707 static int ocfs2_xattr_value_attach_refcount(struct inode
*inode
,
5708 struct ocfs2_xattr_value_root
*xv
,
5709 struct ocfs2_extent_tree
*value_et
,
5710 struct ocfs2_caching_info
*ref_ci
,
5711 struct buffer_head
*ref_root_bh
,
5712 struct ocfs2_cached_dealloc_ctxt
*dealloc
,
5713 struct ocfs2_post_refcount
*refcount
)
5716 u32 clusters
= le32_to_cpu(xv
->xr_clusters
);
5717 u32 cpos
, p_cluster
, num_clusters
;
5718 struct ocfs2_extent_list
*el
= &xv
->xr_list
;
5719 unsigned int ext_flags
;
5722 while (cpos
< clusters
) {
5723 ret
= ocfs2_xattr_get_clusters(inode
, cpos
, &p_cluster
,
5724 &num_clusters
, el
, &ext_flags
);
5726 cpos
+= num_clusters
;
5727 if ((ext_flags
& OCFS2_EXT_REFCOUNTED
))
5732 ret
= ocfs2_add_refcount_flag(inode
, value_et
,
5733 ref_ci
, ref_root_bh
,
5734 cpos
- num_clusters
,
5735 p_cluster
, num_clusters
,
5747 * Given a normal ocfs2_xattr_header, refcount all the entries which
5748 * have value stored outside.
5749 * Used for xattrs stored in inode and ocfs2_xattr_block.
5751 static int ocfs2_xattr_attach_refcount_normal(struct inode
*inode
,
5752 struct ocfs2_xattr_value_buf
*vb
,
5753 struct ocfs2_xattr_header
*header
,
5754 struct ocfs2_caching_info
*ref_ci
,
5755 struct buffer_head
*ref_root_bh
,
5756 struct ocfs2_cached_dealloc_ctxt
*dealloc
)
5759 struct ocfs2_xattr_entry
*xe
;
5760 struct ocfs2_xattr_value_root
*xv
;
5761 struct ocfs2_extent_tree et
;
5764 for (i
= 0; i
< le16_to_cpu(header
->xh_count
); i
++) {
5765 xe
= &header
->xh_entries
[i
];
5767 if (ocfs2_xattr_is_local(xe
))
5770 xv
= (struct ocfs2_xattr_value_root
*)((void *)header
+
5771 le16_to_cpu(xe
->xe_name_offset
) +
5772 OCFS2_XATTR_SIZE(xe
->xe_name_len
));
5775 ocfs2_init_xattr_value_extent_tree(&et
, INODE_CACHE(inode
), vb
);
5777 ret
= ocfs2_xattr_value_attach_refcount(inode
, xv
, &et
,
5778 ref_ci
, ref_root_bh
,
5789 static int ocfs2_xattr_inline_attach_refcount(struct inode
*inode
,
5790 struct buffer_head
*fe_bh
,
5791 struct ocfs2_caching_info
*ref_ci
,
5792 struct buffer_head
*ref_root_bh
,
5793 struct ocfs2_cached_dealloc_ctxt
*dealloc
)
5795 struct ocfs2_dinode
*di
= (struct ocfs2_dinode
*)fe_bh
->b_data
;
5796 struct ocfs2_xattr_header
*header
= (struct ocfs2_xattr_header
*)
5797 (fe_bh
->b_data
+ inode
->i_sb
->s_blocksize
-
5798 le16_to_cpu(di
->i_xattr_inline_size
));
5799 struct ocfs2_xattr_value_buf vb
= {
5801 .vb_access
= ocfs2_journal_access_di
,
5804 return ocfs2_xattr_attach_refcount_normal(inode
, &vb
, header
,
5805 ref_ci
, ref_root_bh
, dealloc
);
5808 struct ocfs2_xattr_tree_value_refcount_para
{
5809 struct ocfs2_caching_info
*ref_ci
;
5810 struct buffer_head
*ref_root_bh
;
5811 struct ocfs2_cached_dealloc_ctxt
*dealloc
;
5814 static int ocfs2_get_xattr_tree_value_root(struct super_block
*sb
,
5815 struct ocfs2_xattr_bucket
*bucket
,
5817 struct ocfs2_xattr_value_root
**xv
,
5818 struct buffer_head
**bh
)
5820 int ret
, block_off
, name_offset
;
5821 struct ocfs2_xattr_header
*xh
= bucket_xh(bucket
);
5822 struct ocfs2_xattr_entry
*xe
= &xh
->xh_entries
[offset
];
5825 ret
= ocfs2_xattr_bucket_get_name_value(sb
,
5835 base
= bucket_block(bucket
, block_off
);
5837 *xv
= (struct ocfs2_xattr_value_root
*)(base
+ name_offset
+
5838 OCFS2_XATTR_SIZE(xe
->xe_name_len
));
5841 *bh
= bucket
->bu_bhs
[block_off
];
5847 * For a given xattr bucket, refcount all the entries which
5848 * have value stored outside.
5850 static int ocfs2_xattr_bucket_value_refcount(struct inode
*inode
,
5851 struct ocfs2_xattr_bucket
*bucket
,
5855 struct ocfs2_extent_tree et
;
5856 struct ocfs2_xattr_tree_value_refcount_para
*ref
=
5857 (struct ocfs2_xattr_tree_value_refcount_para
*)para
;
5858 struct ocfs2_xattr_header
*xh
=
5859 (struct ocfs2_xattr_header
*)bucket
->bu_bhs
[0]->b_data
;
5860 struct ocfs2_xattr_entry
*xe
;
5861 struct ocfs2_xattr_value_buf vb
= {
5862 .vb_access
= ocfs2_journal_access
,
5864 struct ocfs2_post_refcount refcount
= {
5865 .credits
= bucket
->bu_blocks
,
5867 .func
= ocfs2_xattr_bucket_post_refcount
,
5869 struct ocfs2_post_refcount
*p
= NULL
;
5871 /* We only need post_refcount if we support metaecc. */
5872 if (ocfs2_meta_ecc(OCFS2_SB(inode
->i_sb
)))
5875 mlog(0, "refcount bucket %llu, count = %u\n",
5876 (unsigned long long)bucket_blkno(bucket
),
5877 le16_to_cpu(xh
->xh_count
));
5878 for (i
= 0; i
< le16_to_cpu(xh
->xh_count
); i
++) {
5879 xe
= &xh
->xh_entries
[i
];
5881 if (ocfs2_xattr_is_local(xe
))
5884 ret
= ocfs2_get_xattr_tree_value_root(inode
->i_sb
, bucket
, i
,
5885 &vb
.vb_xv
, &vb
.vb_bh
);
5891 ocfs2_init_xattr_value_extent_tree(&et
,
5892 INODE_CACHE(inode
), &vb
);
5894 ret
= ocfs2_xattr_value_attach_refcount(inode
, vb
.vb_xv
,
5908 static int ocfs2_refcount_xattr_tree_rec(struct inode
*inode
,
5909 struct buffer_head
*root_bh
,
5910 u64 blkno
, u32 cpos
, u32 len
, void *para
)
5912 return ocfs2_iterate_xattr_buckets(inode
, blkno
, len
,
5913 ocfs2_xattr_bucket_value_refcount
,
5917 static int ocfs2_xattr_block_attach_refcount(struct inode
*inode
,
5918 struct buffer_head
*blk_bh
,
5919 struct ocfs2_caching_info
*ref_ci
,
5920 struct buffer_head
*ref_root_bh
,
5921 struct ocfs2_cached_dealloc_ctxt
*dealloc
)
5924 struct ocfs2_xattr_block
*xb
=
5925 (struct ocfs2_xattr_block
*)blk_bh
->b_data
;
5927 if (!(le16_to_cpu(xb
->xb_flags
) & OCFS2_XATTR_INDEXED
)) {
5928 struct ocfs2_xattr_header
*header
= &xb
->xb_attrs
.xb_header
;
5929 struct ocfs2_xattr_value_buf vb
= {
5931 .vb_access
= ocfs2_journal_access_xb
,
5934 ret
= ocfs2_xattr_attach_refcount_normal(inode
, &vb
, header
,
5935 ref_ci
, ref_root_bh
,
5938 struct ocfs2_xattr_tree_value_refcount_para para
= {
5940 .ref_root_bh
= ref_root_bh
,
5944 ret
= ocfs2_iterate_xattr_index_block(inode
, blk_bh
,
5945 ocfs2_refcount_xattr_tree_rec
,
5952 int ocfs2_xattr_attach_refcount_tree(struct inode
*inode
,
5953 struct buffer_head
*fe_bh
,
5954 struct ocfs2_caching_info
*ref_ci
,
5955 struct buffer_head
*ref_root_bh
,
5956 struct ocfs2_cached_dealloc_ctxt
*dealloc
)
5959 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
5960 struct ocfs2_dinode
*di
= (struct ocfs2_dinode
*)fe_bh
->b_data
;
5961 struct buffer_head
*blk_bh
= NULL
;
5963 if (oi
->ip_dyn_features
& OCFS2_INLINE_XATTR_FL
) {
5964 ret
= ocfs2_xattr_inline_attach_refcount(inode
, fe_bh
,
5965 ref_ci
, ref_root_bh
,
5973 if (!di
->i_xattr_loc
)
5976 ret
= ocfs2_read_xattr_block(inode
, le64_to_cpu(di
->i_xattr_loc
),
5983 ret
= ocfs2_xattr_block_attach_refcount(inode
, blk_bh
, ref_ci
,
5984 ref_root_bh
, dealloc
);
5994 typedef int (should_xattr_reflinked
)(struct ocfs2_xattr_entry
*xe
);
5996 * Store the information we need in xattr reflink.
5997 * old_bh and new_bh are inode bh for the old and new inode.
5999 struct ocfs2_xattr_reflink
{
6000 struct inode
*old_inode
;
6001 struct inode
*new_inode
;
6002 struct buffer_head
*old_bh
;
6003 struct buffer_head
*new_bh
;
6004 struct ocfs2_caching_info
*ref_ci
;
6005 struct buffer_head
*ref_root_bh
;
6006 struct ocfs2_cached_dealloc_ctxt
*dealloc
;
6007 should_xattr_reflinked
*xattr_reflinked
;
6011 * Given a xattr header and xe offset,
6012 * return the proper xv and the corresponding bh.
6013 * xattr in inode, block and xattr tree have different implementaions.
6015 typedef int (get_xattr_value_root
)(struct super_block
*sb
,
6016 struct buffer_head
*bh
,
6017 struct ocfs2_xattr_header
*xh
,
6019 struct ocfs2_xattr_value_root
**xv
,
6020 struct buffer_head
**ret_bh
,
6024 * Calculate all the xattr value root metadata stored in this xattr header and
6025 * credits we need if we create them from the scratch.
6026 * We use get_xattr_value_root so that all types of xattr container can use it.
6028 static int ocfs2_value_metas_in_xattr_header(struct super_block
*sb
,
6029 struct buffer_head
*bh
,
6030 struct ocfs2_xattr_header
*xh
,
6031 int *metas
, int *credits
,
6033 get_xattr_value_root
*func
,
6037 struct ocfs2_xattr_value_root
*xv
;
6038 struct ocfs2_xattr_entry
*xe
;
6040 for (i
= 0; i
< le16_to_cpu(xh
->xh_count
); i
++) {
6041 xe
= &xh
->xh_entries
[i
];
6042 if (ocfs2_xattr_is_local(xe
))
6045 ret
= func(sb
, bh
, xh
, i
, &xv
, NULL
, para
);
6051 *metas
+= le16_to_cpu(xv
->xr_list
.l_tree_depth
) *
6052 le16_to_cpu(xv
->xr_list
.l_next_free_rec
);
6054 *credits
+= ocfs2_calc_extend_credits(sb
,
6056 le32_to_cpu(xv
->xr_clusters
));
6059 * If the value is a tree with depth > 1, We don't go deep
6060 * to the extent block, so just calculate a maximum record num.
6062 if (!xv
->xr_list
.l_tree_depth
)
6063 *num_recs
+= le16_to_cpu(xv
->xr_list
.l_next_free_rec
);
6065 *num_recs
+= ocfs2_clusters_for_bytes(sb
,
6072 /* Used by xattr inode and block to return the right xv and buffer_head. */
6073 static int ocfs2_get_xattr_value_root(struct super_block
*sb
,
6074 struct buffer_head
*bh
,
6075 struct ocfs2_xattr_header
*xh
,
6077 struct ocfs2_xattr_value_root
**xv
,
6078 struct buffer_head
**ret_bh
,
6081 struct ocfs2_xattr_entry
*xe
= &xh
->xh_entries
[offset
];
6083 *xv
= (struct ocfs2_xattr_value_root
*)((void *)xh
+
6084 le16_to_cpu(xe
->xe_name_offset
) +
6085 OCFS2_XATTR_SIZE(xe
->xe_name_len
));
6094 * Lock the meta_ac and caculate how much credits we need for reflink xattrs.
6095 * It is only used for inline xattr and xattr block.
6097 static int ocfs2_reflink_lock_xattr_allocators(struct ocfs2_super
*osb
,
6098 struct ocfs2_xattr_header
*xh
,
6099 struct buffer_head
*ref_root_bh
,
6101 struct ocfs2_alloc_context
**meta_ac
)
6103 int ret
, meta_add
= 0, num_recs
= 0;
6104 struct ocfs2_refcount_block
*rb
=
6105 (struct ocfs2_refcount_block
*)ref_root_bh
->b_data
;
6109 ret
= ocfs2_value_metas_in_xattr_header(osb
->sb
, NULL
, xh
,
6110 &meta_add
, credits
, &num_recs
,
6111 ocfs2_get_xattr_value_root
,
6119 * We need to add/modify num_recs in refcount tree, so just calculate
6120 * an approximate number we need for refcount tree change.
6121 * Sometimes we need to split the tree, and after split, half recs
6122 * will be moved to the new block, and a new block can only provide
6123 * half number of recs. So we multiple new blocks by 2.
6125 num_recs
= num_recs
/ ocfs2_refcount_recs_per_rb(osb
->sb
) * 2;
6126 meta_add
+= num_recs
;
6127 *credits
+= num_recs
+ num_recs
* OCFS2_EXPAND_REFCOUNT_TREE_CREDITS
;
6128 if (le32_to_cpu(rb
->rf_flags
) & OCFS2_REFCOUNT_TREE_FL
)
6129 *credits
+= le16_to_cpu(rb
->rf_list
.l_tree_depth
) *
6130 le16_to_cpu(rb
->rf_list
.l_next_free_rec
) + 1;
6134 ret
= ocfs2_reserve_new_metadata_blocks(osb
, meta_add
, meta_ac
);
6143 * Given a xattr header, reflink all the xattrs in this container.
6144 * It can be used for inode, block and bucket.
6147 * Before we call this function, the caller has memcpy the xattr in
6148 * old_xh to the new_xh.
6150 * If args.xattr_reflinked is set, call it to decide whether the xe should
6151 * be reflinked or not. If not, remove it from the new xattr header.
6153 static int ocfs2_reflink_xattr_header(handle_t
*handle
,
6154 struct ocfs2_xattr_reflink
*args
,
6155 struct buffer_head
*old_bh
,
6156 struct ocfs2_xattr_header
*xh
,
6157 struct buffer_head
*new_bh
,
6158 struct ocfs2_xattr_header
*new_xh
,
6159 struct ocfs2_xattr_value_buf
*vb
,
6160 struct ocfs2_alloc_context
*meta_ac
,
6161 get_xattr_value_root
*func
,
6165 struct super_block
*sb
= args
->old_inode
->i_sb
;
6166 struct buffer_head
*value_bh
;
6167 struct ocfs2_xattr_entry
*xe
, *last
;
6168 struct ocfs2_xattr_value_root
*xv
, *new_xv
;
6169 struct ocfs2_extent_tree data_et
;
6170 u32 clusters
, cpos
, p_cluster
, num_clusters
;
6171 unsigned int ext_flags
= 0;
6173 mlog(0, "reflink xattr in container %llu, count = %u\n",
6174 (unsigned long long)old_bh
->b_blocknr
, le16_to_cpu(xh
->xh_count
));
6176 last
= &new_xh
->xh_entries
[le16_to_cpu(new_xh
->xh_count
)];
6177 for (i
= 0, j
= 0; i
< le16_to_cpu(xh
->xh_count
); i
++, j
++) {
6178 xe
= &xh
->xh_entries
[i
];
6180 if (args
->xattr_reflinked
&& !args
->xattr_reflinked(xe
)) {
6181 xe
= &new_xh
->xh_entries
[j
];
6183 le16_add_cpu(&new_xh
->xh_count
, -1);
6184 if (new_xh
->xh_count
) {
6186 (void *)last
- (void *)xe
);
6188 sizeof(struct ocfs2_xattr_entry
));
6192 * We don't want j to increase in the next round since
6193 * it is already moved ahead.
6199 if (ocfs2_xattr_is_local(xe
))
6202 ret
= func(sb
, old_bh
, xh
, i
, &xv
, NULL
, para
);
6208 ret
= func(sb
, new_bh
, new_xh
, j
, &new_xv
, &value_bh
, para
);
6215 * For the xattr which has l_tree_depth = 0, all the extent
6216 * recs have already be copied to the new xh with the
6217 * propriate OCFS2_EXT_REFCOUNTED flag we just need to
6218 * increase the refount count int the refcount tree.
6220 * For the xattr which has l_tree_depth > 0, we need
6221 * to initialize it to the empty default value root,
6222 * and then insert the extents one by one.
6224 if (xv
->xr_list
.l_tree_depth
) {
6225 memcpy(new_xv
, &def_xv
, sizeof(def_xv
));
6227 vb
->vb_bh
= value_bh
;
6228 ocfs2_init_xattr_value_extent_tree(&data_et
,
6229 INODE_CACHE(args
->new_inode
), vb
);
6232 clusters
= le32_to_cpu(xv
->xr_clusters
);
6234 while (cpos
< clusters
) {
6235 ret
= ocfs2_xattr_get_clusters(args
->old_inode
,
6248 if (xv
->xr_list
.l_tree_depth
) {
6249 ret
= ocfs2_insert_extent(handle
,
6251 ocfs2_clusters_to_blocks(
6252 args
->old_inode
->i_sb
,
6254 num_clusters
, ext_flags
,
6262 ret
= ocfs2_increase_refcount(handle
, args
->ref_ci
,
6264 p_cluster
, num_clusters
,
6265 meta_ac
, args
->dealloc
);
6271 cpos
+= num_clusters
;
6279 static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink
*args
)
6281 int ret
= 0, credits
= 0;
6283 struct ocfs2_super
*osb
= OCFS2_SB(args
->old_inode
->i_sb
);
6284 struct ocfs2_dinode
*di
= (struct ocfs2_dinode
*)args
->old_bh
->b_data
;
6285 int inline_size
= le16_to_cpu(di
->i_xattr_inline_size
);
6286 int header_off
= osb
->sb
->s_blocksize
- inline_size
;
6287 struct ocfs2_xattr_header
*xh
= (struct ocfs2_xattr_header
*)
6288 (args
->old_bh
->b_data
+ header_off
);
6289 struct ocfs2_xattr_header
*new_xh
= (struct ocfs2_xattr_header
*)
6290 (args
->new_bh
->b_data
+ header_off
);
6291 struct ocfs2_alloc_context
*meta_ac
= NULL
;
6292 struct ocfs2_inode_info
*new_oi
;
6293 struct ocfs2_dinode
*new_di
;
6294 struct ocfs2_xattr_value_buf vb
= {
6295 .vb_bh
= args
->new_bh
,
6296 .vb_access
= ocfs2_journal_access_di
,
6299 ret
= ocfs2_reflink_lock_xattr_allocators(osb
, xh
, args
->ref_root_bh
,
6300 &credits
, &meta_ac
);
6306 handle
= ocfs2_start_trans(osb
, credits
);
6307 if (IS_ERR(handle
)) {
6308 ret
= PTR_ERR(handle
);
6313 ret
= ocfs2_journal_access_di(handle
, INODE_CACHE(args
->new_inode
),
6314 args
->new_bh
, OCFS2_JOURNAL_ACCESS_WRITE
);
6320 memcpy(args
->new_bh
->b_data
+ header_off
,
6321 args
->old_bh
->b_data
+ header_off
, inline_size
);
6323 new_di
= (struct ocfs2_dinode
*)args
->new_bh
->b_data
;
6324 new_di
->i_xattr_inline_size
= cpu_to_le16(inline_size
);
6326 ret
= ocfs2_reflink_xattr_header(handle
, args
, args
->old_bh
, xh
,
6327 args
->new_bh
, new_xh
, &vb
, meta_ac
,
6328 ocfs2_get_xattr_value_root
, NULL
);
6334 new_oi
= OCFS2_I(args
->new_inode
);
6335 spin_lock(&new_oi
->ip_lock
);
6336 new_oi
->ip_dyn_features
|= OCFS2_HAS_XATTR_FL
| OCFS2_INLINE_XATTR_FL
;
6337 new_di
->i_dyn_features
= cpu_to_le16(new_oi
->ip_dyn_features
);
6338 spin_unlock(&new_oi
->ip_lock
);
6340 ocfs2_journal_dirty(handle
, args
->new_bh
);
6343 ocfs2_commit_trans(osb
, handle
);
6347 ocfs2_free_alloc_context(meta_ac
);
6351 static int ocfs2_create_empty_xattr_block(struct inode
*inode
,
6352 struct buffer_head
*fe_bh
,
6353 struct buffer_head
**ret_bh
,
6358 struct ocfs2_alloc_context
*meta_ac
;
6359 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
6361 ret
= ocfs2_reserve_new_metadata_blocks(osb
, 1, &meta_ac
);
6367 handle
= ocfs2_start_trans(osb
, OCFS2_XATTR_BLOCK_CREATE_CREDITS
);
6368 if (IS_ERR(handle
)) {
6369 ret
= PTR_ERR(handle
);
6374 mlog(0, "create new xattr block for inode %llu, index = %d\n",
6375 (unsigned long long)fe_bh
->b_blocknr
, indexed
);
6376 ret
= ocfs2_create_xattr_block(handle
, inode
, fe_bh
,
6377 meta_ac
, ret_bh
, indexed
);
6381 ocfs2_commit_trans(osb
, handle
);
6383 ocfs2_free_alloc_context(meta_ac
);
6387 static int ocfs2_reflink_xattr_block(struct ocfs2_xattr_reflink
*args
,
6388 struct buffer_head
*blk_bh
,
6389 struct buffer_head
*new_blk_bh
)
6391 int ret
= 0, credits
= 0;
6393 struct ocfs2_inode_info
*new_oi
= OCFS2_I(args
->new_inode
);
6394 struct ocfs2_dinode
*new_di
;
6395 struct ocfs2_super
*osb
= OCFS2_SB(args
->new_inode
->i_sb
);
6396 int header_off
= offsetof(struct ocfs2_xattr_block
, xb_attrs
.xb_header
);
6397 struct ocfs2_xattr_block
*xb
=
6398 (struct ocfs2_xattr_block
*)blk_bh
->b_data
;
6399 struct ocfs2_xattr_header
*xh
= &xb
->xb_attrs
.xb_header
;
6400 struct ocfs2_xattr_block
*new_xb
=
6401 (struct ocfs2_xattr_block
*)new_blk_bh
->b_data
;
6402 struct ocfs2_xattr_header
*new_xh
= &new_xb
->xb_attrs
.xb_header
;
6403 struct ocfs2_alloc_context
*meta_ac
;
6404 struct ocfs2_xattr_value_buf vb
= {
6405 .vb_bh
= new_blk_bh
,
6406 .vb_access
= ocfs2_journal_access_xb
,
6409 ret
= ocfs2_reflink_lock_xattr_allocators(osb
, xh
, args
->ref_root_bh
,
6410 &credits
, &meta_ac
);
6416 /* One more credits in case we need to add xattr flags in new inode. */
6417 handle
= ocfs2_start_trans(osb
, credits
+ 1);
6418 if (IS_ERR(handle
)) {
6419 ret
= PTR_ERR(handle
);
6424 if (!(new_oi
->ip_dyn_features
& OCFS2_HAS_XATTR_FL
)) {
6425 ret
= ocfs2_journal_access_di(handle
,
6426 INODE_CACHE(args
->new_inode
),
6428 OCFS2_JOURNAL_ACCESS_WRITE
);
6435 ret
= ocfs2_journal_access_xb(handle
, INODE_CACHE(args
->new_inode
),
6436 new_blk_bh
, OCFS2_JOURNAL_ACCESS_WRITE
);
6442 memcpy(new_blk_bh
->b_data
+ header_off
, blk_bh
->b_data
+ header_off
,
6443 osb
->sb
->s_blocksize
- header_off
);
6445 ret
= ocfs2_reflink_xattr_header(handle
, args
, blk_bh
, xh
,
6446 new_blk_bh
, new_xh
, &vb
, meta_ac
,
6447 ocfs2_get_xattr_value_root
, NULL
);
6453 ocfs2_journal_dirty(handle
, new_blk_bh
);
6455 if (!(new_oi
->ip_dyn_features
& OCFS2_HAS_XATTR_FL
)) {
6456 new_di
= (struct ocfs2_dinode
*)args
->new_bh
->b_data
;
6457 spin_lock(&new_oi
->ip_lock
);
6458 new_oi
->ip_dyn_features
|= OCFS2_HAS_XATTR_FL
;
6459 new_di
->i_dyn_features
= cpu_to_le16(new_oi
->ip_dyn_features
);
6460 spin_unlock(&new_oi
->ip_lock
);
6462 ocfs2_journal_dirty(handle
, args
->new_bh
);
6466 ocfs2_commit_trans(osb
, handle
);
6469 ocfs2_free_alloc_context(meta_ac
);
6473 struct ocfs2_reflink_xattr_tree_args
{
6474 struct ocfs2_xattr_reflink
*reflink
;
6475 struct buffer_head
*old_blk_bh
;
6476 struct buffer_head
*new_blk_bh
;
6477 struct ocfs2_xattr_bucket
*old_bucket
;
6478 struct ocfs2_xattr_bucket
*new_bucket
;
6483 * We have to handle the case that both old bucket and new bucket
6484 * will call this function to get the right ret_bh.
6485 * So The caller must give us the right bh.
6487 static int ocfs2_get_reflink_xattr_value_root(struct super_block
*sb
,
6488 struct buffer_head
*bh
,
6489 struct ocfs2_xattr_header
*xh
,
6491 struct ocfs2_xattr_value_root
**xv
,
6492 struct buffer_head
**ret_bh
,
6495 struct ocfs2_reflink_xattr_tree_args
*args
=
6496 (struct ocfs2_reflink_xattr_tree_args
*)para
;
6497 struct ocfs2_xattr_bucket
*bucket
;
6499 if (bh
== args
->old_bucket
->bu_bhs
[0])
6500 bucket
= args
->old_bucket
;
6502 bucket
= args
->new_bucket
;
6504 return ocfs2_get_xattr_tree_value_root(sb
, bucket
, offset
,
6508 struct ocfs2_value_tree_metas
{
6514 static int ocfs2_value_tree_metas_in_bucket(struct super_block
*sb
,
6515 struct buffer_head
*bh
,
6516 struct ocfs2_xattr_header
*xh
,
6518 struct ocfs2_xattr_value_root
**xv
,
6519 struct buffer_head
**ret_bh
,
6522 struct ocfs2_xattr_bucket
*bucket
=
6523 (struct ocfs2_xattr_bucket
*)para
;
6525 return ocfs2_get_xattr_tree_value_root(sb
, bucket
, offset
,
6529 static int ocfs2_calc_value_tree_metas(struct inode
*inode
,
6530 struct ocfs2_xattr_bucket
*bucket
,
6533 struct ocfs2_value_tree_metas
*metas
=
6534 (struct ocfs2_value_tree_metas
*)para
;
6535 struct ocfs2_xattr_header
*xh
=
6536 (struct ocfs2_xattr_header
*)bucket
->bu_bhs
[0]->b_data
;
6538 /* Add the credits for this bucket first. */
6539 metas
->credits
+= bucket
->bu_blocks
;
6540 return ocfs2_value_metas_in_xattr_header(inode
->i_sb
, bucket
->bu_bhs
[0],
6541 xh
, &metas
->num_metas
,
6542 &metas
->credits
, &metas
->num_recs
,
6543 ocfs2_value_tree_metas_in_bucket
,
6548 * Given a xattr extent rec starting from blkno and having len clusters,
6549 * iterate all the buckets calculate how much metadata we need for reflinking
6550 * all the ocfs2_xattr_value_root and lock the allocators accordingly.
6552 static int ocfs2_lock_reflink_xattr_rec_allocators(
6553 struct ocfs2_reflink_xattr_tree_args
*args
,
6554 struct ocfs2_extent_tree
*xt_et
,
6555 u64 blkno
, u32 len
, int *credits
,
6556 struct ocfs2_alloc_context
**meta_ac
,
6557 struct ocfs2_alloc_context
**data_ac
)
6559 int ret
, num_free_extents
;
6560 struct ocfs2_value_tree_metas metas
;
6561 struct ocfs2_super
*osb
= OCFS2_SB(args
->reflink
->old_inode
->i_sb
);
6562 struct ocfs2_refcount_block
*rb
;
6564 memset(&metas
, 0, sizeof(metas
));
6566 ret
= ocfs2_iterate_xattr_buckets(args
->reflink
->old_inode
, blkno
, len
,
6567 ocfs2_calc_value_tree_metas
, &metas
);
6573 *credits
= metas
.credits
;
6576 * Calculate we need for refcount tree change.
6578 * We need to add/modify num_recs in refcount tree, so just calculate
6579 * an approximate number we need for refcount tree change.
6580 * Sometimes we need to split the tree, and after split, half recs
6581 * will be moved to the new block, and a new block can only provide
6582 * half number of recs. So we multiple new blocks by 2.
6583 * In the end, we have to add credits for modifying the already
6584 * existed refcount block.
6586 rb
= (struct ocfs2_refcount_block
*)args
->reflink
->ref_root_bh
->b_data
;
6588 (metas
.num_recs
+ ocfs2_refcount_recs_per_rb(osb
->sb
) - 1) /
6589 ocfs2_refcount_recs_per_rb(osb
->sb
) * 2;
6590 metas
.num_metas
+= metas
.num_recs
;
6591 *credits
+= metas
.num_recs
+
6592 metas
.num_recs
* OCFS2_EXPAND_REFCOUNT_TREE_CREDITS
;
6593 if (le32_to_cpu(rb
->rf_flags
) & OCFS2_REFCOUNT_TREE_FL
)
6594 *credits
+= le16_to_cpu(rb
->rf_list
.l_tree_depth
) *
6595 le16_to_cpu(rb
->rf_list
.l_next_free_rec
) + 1;
6599 /* count in the xattr tree change. */
6600 num_free_extents
= ocfs2_num_free_extents(osb
, xt_et
);
6601 if (num_free_extents
< 0) {
6602 ret
= num_free_extents
;
6607 if (num_free_extents
< len
)
6608 metas
.num_metas
+= ocfs2_extend_meta_needed(xt_et
->et_root_el
);
6610 *credits
+= ocfs2_calc_extend_credits(osb
->sb
,
6611 xt_et
->et_root_el
, len
);
6613 if (metas
.num_metas
) {
6614 ret
= ocfs2_reserve_new_metadata_blocks(osb
, metas
.num_metas
,
6623 ret
= ocfs2_reserve_clusters(osb
, len
, data_ac
);
6630 ocfs2_free_alloc_context(*meta_ac
);
6638 static int ocfs2_reflink_xattr_buckets(handle_t
*handle
,
6639 u64 blkno
, u64 new_blkno
, u32 clusters
,
6640 struct ocfs2_alloc_context
*meta_ac
,
6641 struct ocfs2_alloc_context
*data_ac
,
6642 struct ocfs2_reflink_xattr_tree_args
*args
)
6645 struct super_block
*sb
= args
->reflink
->old_inode
->i_sb
;
6646 u32 bpc
= ocfs2_xattr_buckets_per_cluster(OCFS2_SB(sb
));
6647 u32 num_buckets
= clusters
* bpc
;
6648 int bpb
= args
->old_bucket
->bu_blocks
;
6649 struct ocfs2_xattr_value_buf vb
= {
6650 .vb_access
= ocfs2_journal_access
,
6653 for (i
= 0; i
< num_buckets
; i
++, blkno
+= bpb
, new_blkno
+= bpb
) {
6654 ret
= ocfs2_read_xattr_bucket(args
->old_bucket
, blkno
);
6660 ret
= ocfs2_init_xattr_bucket(args
->new_bucket
, new_blkno
);
6667 * The real bucket num in this series of blocks is stored
6668 * in the 1st bucket.
6671 num_buckets
= le16_to_cpu(
6672 bucket_xh(args
->old_bucket
)->xh_num_buckets
);
6674 ret
= ocfs2_xattr_bucket_journal_access(handle
,
6676 OCFS2_JOURNAL_ACCESS_CREATE
);
6682 for (j
= 0; j
< bpb
; j
++)
6683 memcpy(bucket_block(args
->new_bucket
, j
),
6684 bucket_block(args
->old_bucket
, j
),
6687 ocfs2_xattr_bucket_journal_dirty(handle
, args
->new_bucket
);
6689 ret
= ocfs2_reflink_xattr_header(handle
, args
->reflink
,
6690 args
->old_bucket
->bu_bhs
[0],
6691 bucket_xh(args
->old_bucket
),
6692 args
->new_bucket
->bu_bhs
[0],
6693 bucket_xh(args
->new_bucket
),
6695 ocfs2_get_reflink_xattr_value_root
,
6703 * Re-access and dirty the bucket to calculate metaecc.
6704 * Because we may extend the transaction in reflink_xattr_header
6705 * which will let the already accessed block gone.
6707 ret
= ocfs2_xattr_bucket_journal_access(handle
,
6709 OCFS2_JOURNAL_ACCESS_WRITE
);
6715 ocfs2_xattr_bucket_journal_dirty(handle
, args
->new_bucket
);
6716 ocfs2_xattr_bucket_relse(args
->old_bucket
);
6717 ocfs2_xattr_bucket_relse(args
->new_bucket
);
6720 ocfs2_xattr_bucket_relse(args
->old_bucket
);
6721 ocfs2_xattr_bucket_relse(args
->new_bucket
);
6725 * Create the same xattr extent record in the new inode's xattr tree.
6727 static int ocfs2_reflink_xattr_rec(struct inode
*inode
,
6728 struct buffer_head
*root_bh
,
6734 int ret
, credits
= 0;
6735 u32 p_cluster
, num_clusters
;
6738 struct ocfs2_reflink_xattr_tree_args
*args
=
6739 (struct ocfs2_reflink_xattr_tree_args
*)para
;
6740 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
6741 struct ocfs2_alloc_context
*meta_ac
= NULL
;
6742 struct ocfs2_alloc_context
*data_ac
= NULL
;
6743 struct ocfs2_extent_tree et
;
6745 ocfs2_init_xattr_tree_extent_tree(&et
,
6746 INODE_CACHE(args
->reflink
->new_inode
),
6749 ret
= ocfs2_lock_reflink_xattr_rec_allocators(args
, &et
, blkno
,
6751 &meta_ac
, &data_ac
);
6757 handle
= ocfs2_start_trans(osb
, credits
);
6758 if (IS_ERR(handle
)) {
6759 ret
= PTR_ERR(handle
);
6764 ret
= ocfs2_claim_clusters(osb
, handle
, data_ac
,
6765 len
, &p_cluster
, &num_clusters
);
6771 new_blkno
= ocfs2_clusters_to_blocks(osb
->sb
, p_cluster
);
6773 mlog(0, "reflink xattr buckets %llu to %llu, len %u\n",
6774 (unsigned long long)blkno
, (unsigned long long)new_blkno
, len
);
6775 ret
= ocfs2_reflink_xattr_buckets(handle
, blkno
, new_blkno
, len
,
6776 meta_ac
, data_ac
, args
);
6782 mlog(0, "insert new xattr extent rec start %llu len %u to %u\n",
6783 (unsigned long long)new_blkno
, len
, cpos
);
6784 ret
= ocfs2_insert_extent(handle
, &et
, cpos
, new_blkno
,
6790 ocfs2_commit_trans(osb
, handle
);
6794 ocfs2_free_alloc_context(meta_ac
);
6796 ocfs2_free_alloc_context(data_ac
);
6801 * Create reflinked xattr buckets.
6802 * We will add bucket one by one, and refcount all the xattrs in the bucket
6803 * if they are stored outside.
6805 static int ocfs2_reflink_xattr_tree(struct ocfs2_xattr_reflink
*args
,
6806 struct buffer_head
*blk_bh
,
6807 struct buffer_head
*new_blk_bh
)
6810 struct ocfs2_reflink_xattr_tree_args para
;
6812 memset(¶
, 0, sizeof(para
));
6813 para
.reflink
= args
;
6814 para
.old_blk_bh
= blk_bh
;
6815 para
.new_blk_bh
= new_blk_bh
;
6817 para
.old_bucket
= ocfs2_xattr_bucket_new(args
->old_inode
);
6818 if (!para
.old_bucket
) {
6819 mlog_errno(-ENOMEM
);
6823 para
.new_bucket
= ocfs2_xattr_bucket_new(args
->new_inode
);
6824 if (!para
.new_bucket
) {
6830 ret
= ocfs2_iterate_xattr_index_block(args
->old_inode
, blk_bh
,
6831 ocfs2_reflink_xattr_rec
,
6837 ocfs2_xattr_bucket_free(para
.old_bucket
);
6838 ocfs2_xattr_bucket_free(para
.new_bucket
);
6842 static int ocfs2_reflink_xattr_in_block(struct ocfs2_xattr_reflink
*args
,
6843 struct buffer_head
*blk_bh
)
6845 int ret
, indexed
= 0;
6846 struct buffer_head
*new_blk_bh
= NULL
;
6847 struct ocfs2_xattr_block
*xb
=
6848 (struct ocfs2_xattr_block
*)blk_bh
->b_data
;
6851 if (le16_to_cpu(xb
->xb_flags
) & OCFS2_XATTR_INDEXED
)
6854 ret
= ocfs2_create_empty_xattr_block(args
->new_inode
, args
->new_bh
,
6855 &new_blk_bh
, indexed
);
6861 if (!(le16_to_cpu(xb
->xb_flags
) & OCFS2_XATTR_INDEXED
))
6862 ret
= ocfs2_reflink_xattr_block(args
, blk_bh
, new_blk_bh
);
6864 ret
= ocfs2_reflink_xattr_tree(args
, blk_bh
, new_blk_bh
);
6873 static int ocfs2_reflink_xattr_no_security(struct ocfs2_xattr_entry
*xe
)
6875 int type
= ocfs2_xattr_get_type(xe
);
6877 return type
!= OCFS2_XATTR_INDEX_SECURITY
&&
6878 type
!= OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS
&&
6879 type
!= OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT
;
6882 int ocfs2_reflink_xattrs(struct inode
*old_inode
,
6883 struct buffer_head
*old_bh
,
6884 struct inode
*new_inode
,
6885 struct buffer_head
*new_bh
,
6886 bool preserve_security
)
6889 struct ocfs2_xattr_reflink args
;
6890 struct ocfs2_inode_info
*oi
= OCFS2_I(old_inode
);
6891 struct ocfs2_dinode
*di
= (struct ocfs2_dinode
*)old_bh
->b_data
;
6892 struct buffer_head
*blk_bh
= NULL
;
6893 struct ocfs2_cached_dealloc_ctxt dealloc
;
6894 struct ocfs2_refcount_tree
*ref_tree
;
6895 struct buffer_head
*ref_root_bh
= NULL
;
6897 ret
= ocfs2_lock_refcount_tree(OCFS2_SB(old_inode
->i_sb
),
6898 le64_to_cpu(di
->i_refcount_loc
),
6899 1, &ref_tree
, &ref_root_bh
);
6905 ocfs2_init_dealloc_ctxt(&dealloc
);
6907 args
.old_inode
= old_inode
;
6908 args
.new_inode
= new_inode
;
6909 args
.old_bh
= old_bh
;
6910 args
.new_bh
= new_bh
;
6911 args
.ref_ci
= &ref_tree
->rf_ci
;
6912 args
.ref_root_bh
= ref_root_bh
;
6913 args
.dealloc
= &dealloc
;
6914 if (preserve_security
)
6915 args
.xattr_reflinked
= NULL
;
6917 args
.xattr_reflinked
= ocfs2_reflink_xattr_no_security
;
6919 if (oi
->ip_dyn_features
& OCFS2_INLINE_XATTR_FL
) {
6920 ret
= ocfs2_reflink_xattr_inline(&args
);
6927 if (!di
->i_xattr_loc
)
6930 ret
= ocfs2_read_xattr_block(old_inode
, le64_to_cpu(di
->i_xattr_loc
),
6937 ret
= ocfs2_reflink_xattr_in_block(&args
, blk_bh
);
6944 ocfs2_unlock_refcount_tree(OCFS2_SB(old_inode
->i_sb
),
6946 brelse(ref_root_bh
);
6948 if (ocfs2_dealloc_has_cluster(&dealloc
)) {
6949 ocfs2_schedule_truncate_log_flush(OCFS2_SB(old_inode
->i_sb
), 1);
6950 ocfs2_run_deallocs(OCFS2_SB(old_inode
->i_sb
), &dealloc
);
6958 * Initialize security and acl for a already created inode.
6959 * Used for reflink a non-preserve-security file.
6961 * It uses common api like ocfs2_xattr_set, so the caller
6962 * must not hold any lock expect i_mutex.
6964 int ocfs2_init_security_and_acl(struct inode
*dir
,
6965 struct inode
*inode
)
6968 struct buffer_head
*dir_bh
= NULL
;
6969 struct ocfs2_security_xattr_info si
= {
6973 ret
= ocfs2_init_security_get(inode
, dir
, &si
);
6975 ret
= ocfs2_xattr_set(inode
, OCFS2_XATTR_INDEX_SECURITY
,
6976 si
.name
, si
.value
, si
.value_len
,
6982 } else if (ret
!= -EOPNOTSUPP
) {
6987 ret
= ocfs2_inode_lock(dir
, &dir_bh
, 0);
6993 ret
= ocfs2_init_acl(NULL
, inode
, dir
, NULL
, dir_bh
, NULL
, NULL
);
6997 ocfs2_inode_unlock(dir
, 0);
7003 * 'security' attributes support
7005 static size_t ocfs2_xattr_security_list(struct dentry
*dentry
, char *list
,
7006 size_t list_size
, const char *name
,
7007 size_t name_len
, int type
)
7009 const size_t prefix_len
= XATTR_SECURITY_PREFIX_LEN
;
7010 const size_t total_len
= prefix_len
+ name_len
+ 1;
7012 if (list
&& total_len
<= list_size
) {
7013 memcpy(list
, XATTR_SECURITY_PREFIX
, prefix_len
);
7014 memcpy(list
+ prefix_len
, name
, name_len
);
7015 list
[prefix_len
+ name_len
] = '\0';
7020 static int ocfs2_xattr_security_get(struct dentry
*dentry
, const char *name
,
7021 void *buffer
, size_t size
, int type
)
7023 if (strcmp(name
, "") == 0)
7025 return ocfs2_xattr_get(dentry
->d_inode
, OCFS2_XATTR_INDEX_SECURITY
,
7026 name
, buffer
, size
);
7029 static int ocfs2_xattr_security_set(struct dentry
*dentry
, const char *name
,
7030 const void *value
, size_t size
, int flags
, int type
)
7032 if (strcmp(name
, "") == 0)
7035 return ocfs2_xattr_set(dentry
->d_inode
, OCFS2_XATTR_INDEX_SECURITY
,
7036 name
, value
, size
, flags
);
7039 int ocfs2_init_security_get(struct inode
*inode
,
7041 struct ocfs2_security_xattr_info
*si
)
7043 /* check whether ocfs2 support feature xattr */
7044 if (!ocfs2_supports_xattr(OCFS2_SB(dir
->i_sb
)))
7046 return security_inode_init_security(inode
, dir
, &si
->name
, &si
->value
,
7050 int ocfs2_init_security_set(handle_t
*handle
,
7051 struct inode
*inode
,
7052 struct buffer_head
*di_bh
,
7053 struct ocfs2_security_xattr_info
*si
,
7054 struct ocfs2_alloc_context
*xattr_ac
,
7055 struct ocfs2_alloc_context
*data_ac
)
7057 return ocfs2_xattr_set_handle(handle
, inode
, di_bh
,
7058 OCFS2_XATTR_INDEX_SECURITY
,
7059 si
->name
, si
->value
, si
->value_len
, 0,
7063 struct xattr_handler ocfs2_xattr_security_handler
= {
7064 .prefix
= XATTR_SECURITY_PREFIX
,
7065 .list
= ocfs2_xattr_security_list
,
7066 .get
= ocfs2_xattr_security_get
,
7067 .set
= ocfs2_xattr_security_set
,
7071 * 'trusted' attributes support
7073 static size_t ocfs2_xattr_trusted_list(struct dentry
*dentry
, char *list
,
7074 size_t list_size
, const char *name
,
7075 size_t name_len
, int type
)
7077 const size_t prefix_len
= XATTR_TRUSTED_PREFIX_LEN
;
7078 const size_t total_len
= prefix_len
+ name_len
+ 1;
7080 if (list
&& total_len
<= list_size
) {
7081 memcpy(list
, XATTR_TRUSTED_PREFIX
, prefix_len
);
7082 memcpy(list
+ prefix_len
, name
, name_len
);
7083 list
[prefix_len
+ name_len
] = '\0';
7088 static int ocfs2_xattr_trusted_get(struct dentry
*dentry
, const char *name
,
7089 void *buffer
, size_t size
, int type
)
7091 if (strcmp(name
, "") == 0)
7093 return ocfs2_xattr_get(dentry
->d_inode
, OCFS2_XATTR_INDEX_TRUSTED
,
7094 name
, buffer
, size
);
7097 static int ocfs2_xattr_trusted_set(struct dentry
*dentry
, const char *name
,
7098 const void *value
, size_t size
, int flags
, int type
)
7100 if (strcmp(name
, "") == 0)
7103 return ocfs2_xattr_set(dentry
->d_inode
, OCFS2_XATTR_INDEX_TRUSTED
,
7104 name
, value
, size
, flags
);
7107 struct xattr_handler ocfs2_xattr_trusted_handler
= {
7108 .prefix
= XATTR_TRUSTED_PREFIX
,
7109 .list
= ocfs2_xattr_trusted_list
,
7110 .get
= ocfs2_xattr_trusted_get
,
7111 .set
= ocfs2_xattr_trusted_set
,
7115 * 'user' attributes support
7117 static size_t ocfs2_xattr_user_list(struct dentry
*dentry
, char *list
,
7118 size_t list_size
, const char *name
,
7119 size_t name_len
, int type
)
7121 const size_t prefix_len
= XATTR_USER_PREFIX_LEN
;
7122 const size_t total_len
= prefix_len
+ name_len
+ 1;
7123 struct ocfs2_super
*osb
= OCFS2_SB(dentry
->d_sb
);
7125 if (osb
->s_mount_opt
& OCFS2_MOUNT_NOUSERXATTR
)
7128 if (list
&& total_len
<= list_size
) {
7129 memcpy(list
, XATTR_USER_PREFIX
, prefix_len
);
7130 memcpy(list
+ prefix_len
, name
, name_len
);
7131 list
[prefix_len
+ name_len
] = '\0';
7136 static int ocfs2_xattr_user_get(struct dentry
*dentry
, const char *name
,
7137 void *buffer
, size_t size
, int type
)
7139 struct ocfs2_super
*osb
= OCFS2_SB(dentry
->d_sb
);
7141 if (strcmp(name
, "") == 0)
7143 if (osb
->s_mount_opt
& OCFS2_MOUNT_NOUSERXATTR
)
7145 return ocfs2_xattr_get(dentry
->d_inode
, OCFS2_XATTR_INDEX_USER
, name
,
7149 static int ocfs2_xattr_user_set(struct dentry
*dentry
, const char *name
,
7150 const void *value
, size_t size
, int flags
, int type
)
7152 struct ocfs2_super
*osb
= OCFS2_SB(dentry
->d_sb
);
7154 if (strcmp(name
, "") == 0)
7156 if (osb
->s_mount_opt
& OCFS2_MOUNT_NOUSERXATTR
)
7159 return ocfs2_xattr_set(dentry
->d_inode
, OCFS2_XATTR_INDEX_USER
,
7160 name
, value
, size
, flags
);
7163 struct xattr_handler ocfs2_xattr_user_handler
= {
7164 .prefix
= XATTR_USER_PREFIX
,
7165 .list
= ocfs2_xattr_user_list
,
7166 .get
= ocfs2_xattr_user_get
,
7167 .set
= ocfs2_xattr_user_set
,