mmc: sdhci: Check mrq->cmd in sdhci_tasklet_finish
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / ocfs2 / xattr.c
blob8fc6fb071c6d13c33dd24401a13308ab737e0b1a
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
4 * xattr.c
6 * Copyright (C) 2004, 2008 Oracle. All rights reserved.
8 * CREDITS:
9 * Lots of code in this file is copy from linux/fs/ext3/xattr.c.
10 * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public
14 * License version 2 as published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
22 #include <linux/capability.h>
23 #include <linux/fs.h>
24 #include <linux/types.h>
25 #include <linux/slab.h>
26 #include <linux/highmem.h>
27 #include <linux/pagemap.h>
28 #include <linux/uio.h>
29 #include <linux/sched.h>
30 #include <linux/splice.h>
31 #include <linux/mount.h>
32 #include <linux/writeback.h>
33 #include <linux/falloc.h>
34 #include <linux/sort.h>
35 #include <linux/init.h>
36 #include <linux/module.h>
37 #include <linux/string.h>
38 #include <linux/security.h>
40 #define MLOG_MASK_PREFIX ML_XATTR
41 #include <cluster/masklog.h>
43 #include "ocfs2.h"
44 #include "alloc.h"
45 #include "blockcheck.h"
46 #include "dlmglue.h"
47 #include "file.h"
48 #include "symlink.h"
49 #include "sysfile.h"
50 #include "inode.h"
51 #include "journal.h"
52 #include "ocfs2_fs.h"
53 #include "suballoc.h"
54 #include "uptodate.h"
55 #include "buffer_head_io.h"
56 #include "super.h"
57 #include "xattr.h"
58 #include "refcounttree.h"
59 #include "acl.h"
61 struct ocfs2_xattr_def_value_root {
62 struct ocfs2_xattr_value_root xv;
63 struct ocfs2_extent_rec er;
66 struct ocfs2_xattr_bucket {
67 /* The inode these xattrs are associated with */
68 struct inode *bu_inode;
70 /* The actual buffers that make up the bucket */
71 struct buffer_head *bu_bhs[OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET];
73 /* How many blocks make up one bucket for this filesystem */
74 int bu_blocks;
77 struct ocfs2_xattr_set_ctxt {
78 handle_t *handle;
79 struct ocfs2_alloc_context *meta_ac;
80 struct ocfs2_alloc_context *data_ac;
81 struct ocfs2_cached_dealloc_ctxt dealloc;
84 #define OCFS2_XATTR_ROOT_SIZE (sizeof(struct ocfs2_xattr_def_value_root))
85 #define OCFS2_XATTR_INLINE_SIZE 80
86 #define OCFS2_XATTR_HEADER_GAP 4
87 #define OCFS2_XATTR_FREE_IN_IBODY (OCFS2_MIN_XATTR_INLINE_SIZE \
88 - sizeof(struct ocfs2_xattr_header) \
89 - OCFS2_XATTR_HEADER_GAP)
90 #define OCFS2_XATTR_FREE_IN_BLOCK(ptr) ((ptr)->i_sb->s_blocksize \
91 - sizeof(struct ocfs2_xattr_block) \
92 - sizeof(struct ocfs2_xattr_header) \
93 - OCFS2_XATTR_HEADER_GAP)
95 static struct ocfs2_xattr_def_value_root def_xv = {
96 .xv.xr_list.l_count = cpu_to_le16(1),
99 struct xattr_handler *ocfs2_xattr_handlers[] = {
100 &ocfs2_xattr_user_handler,
101 &ocfs2_xattr_acl_access_handler,
102 &ocfs2_xattr_acl_default_handler,
103 &ocfs2_xattr_trusted_handler,
104 &ocfs2_xattr_security_handler,
105 NULL
108 static struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = {
109 [OCFS2_XATTR_INDEX_USER] = &ocfs2_xattr_user_handler,
110 [OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS]
111 = &ocfs2_xattr_acl_access_handler,
112 [OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT]
113 = &ocfs2_xattr_acl_default_handler,
114 [OCFS2_XATTR_INDEX_TRUSTED] = &ocfs2_xattr_trusted_handler,
115 [OCFS2_XATTR_INDEX_SECURITY] = &ocfs2_xattr_security_handler,
118 struct ocfs2_xattr_info {
119 int name_index;
120 const char *name;
121 const void *value;
122 size_t value_len;
125 struct ocfs2_xattr_search {
126 struct buffer_head *inode_bh;
128 * xattr_bh point to the block buffer head which has extended attribute
129 * when extended attribute in inode, xattr_bh is equal to inode_bh.
131 struct buffer_head *xattr_bh;
132 struct ocfs2_xattr_header *header;
133 struct ocfs2_xattr_bucket *bucket;
134 void *base;
135 void *end;
136 struct ocfs2_xattr_entry *here;
137 int not_found;
140 static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb,
141 struct ocfs2_xattr_header *xh,
142 int index,
143 int *block_off,
144 int *new_offset);
146 static int ocfs2_xattr_block_find(struct inode *inode,
147 int name_index,
148 const char *name,
149 struct ocfs2_xattr_search *xs);
150 static int ocfs2_xattr_index_block_find(struct inode *inode,
151 struct buffer_head *root_bh,
152 int name_index,
153 const char *name,
154 struct ocfs2_xattr_search *xs);
156 static int ocfs2_xattr_tree_list_index_block(struct inode *inode,
157 struct buffer_head *blk_bh,
158 char *buffer,
159 size_t buffer_size);
161 static int ocfs2_xattr_create_index_block(struct inode *inode,
162 struct ocfs2_xattr_search *xs,
163 struct ocfs2_xattr_set_ctxt *ctxt);
165 static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
166 struct ocfs2_xattr_info *xi,
167 struct ocfs2_xattr_search *xs,
168 struct ocfs2_xattr_set_ctxt *ctxt);
170 typedef int (xattr_tree_rec_func)(struct inode *inode,
171 struct buffer_head *root_bh,
172 u64 blkno, u32 cpos, u32 len, void *para);
173 static int ocfs2_iterate_xattr_index_block(struct inode *inode,
174 struct buffer_head *root_bh,
175 xattr_tree_rec_func *rec_func,
176 void *para);
177 static int ocfs2_delete_xattr_in_bucket(struct inode *inode,
178 struct ocfs2_xattr_bucket *bucket,
179 void *para);
180 static int ocfs2_rm_xattr_cluster(struct inode *inode,
181 struct buffer_head *root_bh,
182 u64 blkno,
183 u32 cpos,
184 u32 len,
185 void *para);
187 static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle,
188 u64 src_blk, u64 last_blk, u64 to_blk,
189 unsigned int start_bucket,
190 u32 *first_hash);
191 static int ocfs2_prepare_refcount_xattr(struct inode *inode,
192 struct ocfs2_dinode *di,
193 struct ocfs2_xattr_info *xi,
194 struct ocfs2_xattr_search *xis,
195 struct ocfs2_xattr_search *xbs,
196 struct ocfs2_refcount_tree **ref_tree,
197 int *meta_need,
198 int *credits);
199 static int ocfs2_get_xattr_tree_value_root(struct super_block *sb,
200 struct ocfs2_xattr_bucket *bucket,
201 int offset,
202 struct ocfs2_xattr_value_root **xv,
203 struct buffer_head **bh);
205 static inline u16 ocfs2_xattr_buckets_per_cluster(struct ocfs2_super *osb)
207 return (1 << osb->s_clustersize_bits) / OCFS2_XATTR_BUCKET_SIZE;
210 static inline u16 ocfs2_blocks_per_xattr_bucket(struct super_block *sb)
212 return OCFS2_XATTR_BUCKET_SIZE / (1 << sb->s_blocksize_bits);
215 static inline u16 ocfs2_xattr_max_xe_in_bucket(struct super_block *sb)
217 u16 len = sb->s_blocksize -
218 offsetof(struct ocfs2_xattr_header, xh_entries);
220 return len / sizeof(struct ocfs2_xattr_entry);
223 #define bucket_blkno(_b) ((_b)->bu_bhs[0]->b_blocknr)
224 #define bucket_block(_b, _n) ((_b)->bu_bhs[(_n)]->b_data)
225 #define bucket_xh(_b) ((struct ocfs2_xattr_header *)bucket_block((_b), 0))
227 static struct ocfs2_xattr_bucket *ocfs2_xattr_bucket_new(struct inode *inode)
229 struct ocfs2_xattr_bucket *bucket;
230 int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
232 BUG_ON(blks > OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET);
234 bucket = kzalloc(sizeof(struct ocfs2_xattr_bucket), GFP_NOFS);
235 if (bucket) {
236 bucket->bu_inode = inode;
237 bucket->bu_blocks = blks;
240 return bucket;
243 static void ocfs2_xattr_bucket_relse(struct ocfs2_xattr_bucket *bucket)
245 int i;
247 for (i = 0; i < bucket->bu_blocks; i++) {
248 brelse(bucket->bu_bhs[i]);
249 bucket->bu_bhs[i] = NULL;
253 static void ocfs2_xattr_bucket_free(struct ocfs2_xattr_bucket *bucket)
255 if (bucket) {
256 ocfs2_xattr_bucket_relse(bucket);
257 bucket->bu_inode = NULL;
258 kfree(bucket);
263 * A bucket that has never been written to disk doesn't need to be
264 * read. We just need the buffer_heads. Don't call this for
265 * buckets that are already on disk. ocfs2_read_xattr_bucket() initializes
266 * them fully.
268 static int ocfs2_init_xattr_bucket(struct ocfs2_xattr_bucket *bucket,
269 u64 xb_blkno)
271 int i, rc = 0;
273 for (i = 0; i < bucket->bu_blocks; i++) {
274 bucket->bu_bhs[i] = sb_getblk(bucket->bu_inode->i_sb,
275 xb_blkno + i);
276 if (!bucket->bu_bhs[i]) {
277 rc = -EIO;
278 mlog_errno(rc);
279 break;
282 if (!ocfs2_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
283 bucket->bu_bhs[i]))
284 ocfs2_set_new_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
285 bucket->bu_bhs[i]);
288 if (rc)
289 ocfs2_xattr_bucket_relse(bucket);
290 return rc;
293 /* Read the xattr bucket at xb_blkno */
294 static int ocfs2_read_xattr_bucket(struct ocfs2_xattr_bucket *bucket,
295 u64 xb_blkno)
297 int rc;
299 rc = ocfs2_read_blocks(INODE_CACHE(bucket->bu_inode), xb_blkno,
300 bucket->bu_blocks, bucket->bu_bhs, 0,
301 NULL);
302 if (!rc) {
303 spin_lock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
304 rc = ocfs2_validate_meta_ecc_bhs(bucket->bu_inode->i_sb,
305 bucket->bu_bhs,
306 bucket->bu_blocks,
307 &bucket_xh(bucket)->xh_check);
308 spin_unlock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
309 if (rc)
310 mlog_errno(rc);
313 if (rc)
314 ocfs2_xattr_bucket_relse(bucket);
315 return rc;
318 static int ocfs2_xattr_bucket_journal_access(handle_t *handle,
319 struct ocfs2_xattr_bucket *bucket,
320 int type)
322 int i, rc = 0;
324 for (i = 0; i < bucket->bu_blocks; i++) {
325 rc = ocfs2_journal_access(handle,
326 INODE_CACHE(bucket->bu_inode),
327 bucket->bu_bhs[i], type);
328 if (rc) {
329 mlog_errno(rc);
330 break;
334 return rc;
337 static void ocfs2_xattr_bucket_journal_dirty(handle_t *handle,
338 struct ocfs2_xattr_bucket *bucket)
340 int i;
342 spin_lock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
343 ocfs2_compute_meta_ecc_bhs(bucket->bu_inode->i_sb,
344 bucket->bu_bhs, bucket->bu_blocks,
345 &bucket_xh(bucket)->xh_check);
346 spin_unlock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
348 for (i = 0; i < bucket->bu_blocks; i++)
349 ocfs2_journal_dirty(handle, bucket->bu_bhs[i]);
352 static void ocfs2_xattr_bucket_copy_data(struct ocfs2_xattr_bucket *dest,
353 struct ocfs2_xattr_bucket *src)
355 int i;
356 int blocksize = src->bu_inode->i_sb->s_blocksize;
358 BUG_ON(dest->bu_blocks != src->bu_blocks);
359 BUG_ON(dest->bu_inode != src->bu_inode);
361 for (i = 0; i < src->bu_blocks; i++) {
362 memcpy(bucket_block(dest, i), bucket_block(src, i),
363 blocksize);
367 static int ocfs2_validate_xattr_block(struct super_block *sb,
368 struct buffer_head *bh)
370 int rc;
371 struct ocfs2_xattr_block *xb =
372 (struct ocfs2_xattr_block *)bh->b_data;
374 mlog(0, "Validating xattr block %llu\n",
375 (unsigned long long)bh->b_blocknr);
377 BUG_ON(!buffer_uptodate(bh));
380 * If the ecc fails, we return the error but otherwise
381 * leave the filesystem running. We know any error is
382 * local to this block.
384 rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &xb->xb_check);
385 if (rc)
386 return rc;
389 * Errors after here are fatal
392 if (!OCFS2_IS_VALID_XATTR_BLOCK(xb)) {
393 ocfs2_error(sb,
394 "Extended attribute block #%llu has bad "
395 "signature %.*s",
396 (unsigned long long)bh->b_blocknr, 7,
397 xb->xb_signature);
398 return -EINVAL;
401 if (le64_to_cpu(xb->xb_blkno) != bh->b_blocknr) {
402 ocfs2_error(sb,
403 "Extended attribute block #%llu has an "
404 "invalid xb_blkno of %llu",
405 (unsigned long long)bh->b_blocknr,
406 (unsigned long long)le64_to_cpu(xb->xb_blkno));
407 return -EINVAL;
410 if (le32_to_cpu(xb->xb_fs_generation) != OCFS2_SB(sb)->fs_generation) {
411 ocfs2_error(sb,
412 "Extended attribute block #%llu has an invalid "
413 "xb_fs_generation of #%u",
414 (unsigned long long)bh->b_blocknr,
415 le32_to_cpu(xb->xb_fs_generation));
416 return -EINVAL;
419 return 0;
422 static int ocfs2_read_xattr_block(struct inode *inode, u64 xb_blkno,
423 struct buffer_head **bh)
425 int rc;
426 struct buffer_head *tmp = *bh;
428 rc = ocfs2_read_block(INODE_CACHE(inode), xb_blkno, &tmp,
429 ocfs2_validate_xattr_block);
431 /* If ocfs2_read_block() got us a new bh, pass it up. */
432 if (!rc && !*bh)
433 *bh = tmp;
435 return rc;
438 static inline const char *ocfs2_xattr_prefix(int name_index)
440 struct xattr_handler *handler = NULL;
442 if (name_index > 0 && name_index < OCFS2_XATTR_MAX)
443 handler = ocfs2_xattr_handler_map[name_index];
445 return handler ? handler->prefix : NULL;
448 static u32 ocfs2_xattr_name_hash(struct inode *inode,
449 const char *name,
450 int name_len)
452 /* Get hash value of uuid from super block */
453 u32 hash = OCFS2_SB(inode->i_sb)->uuid_hash;
454 int i;
456 /* hash extended attribute name */
457 for (i = 0; i < name_len; i++) {
458 hash = (hash << OCFS2_HASH_SHIFT) ^
459 (hash >> (8*sizeof(hash) - OCFS2_HASH_SHIFT)) ^
460 *name++;
463 return hash;
467 * ocfs2_xattr_hash_entry()
469 * Compute the hash of an extended attribute.
471 static void ocfs2_xattr_hash_entry(struct inode *inode,
472 struct ocfs2_xattr_header *header,
473 struct ocfs2_xattr_entry *entry)
475 u32 hash = 0;
476 char *name = (char *)header + le16_to_cpu(entry->xe_name_offset);
478 hash = ocfs2_xattr_name_hash(inode, name, entry->xe_name_len);
479 entry->xe_name_hash = cpu_to_le32(hash);
481 return;
484 static int ocfs2_xattr_entry_real_size(int name_len, size_t value_len)
486 int size = 0;
488 if (value_len <= OCFS2_XATTR_INLINE_SIZE)
489 size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len);
490 else
491 size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
492 size += sizeof(struct ocfs2_xattr_entry);
494 return size;
497 int ocfs2_calc_security_init(struct inode *dir,
498 struct ocfs2_security_xattr_info *si,
499 int *want_clusters,
500 int *xattr_credits,
501 struct ocfs2_alloc_context **xattr_ac)
503 int ret = 0;
504 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
505 int s_size = ocfs2_xattr_entry_real_size(strlen(si->name),
506 si->value_len);
509 * The max space of security xattr taken inline is
510 * 256(name) + 80(value) + 16(entry) = 352 bytes,
511 * So reserve one metadata block for it is ok.
513 if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE ||
514 s_size > OCFS2_XATTR_FREE_IN_IBODY) {
515 ret = ocfs2_reserve_new_metadata_blocks(osb, 1, xattr_ac);
516 if (ret) {
517 mlog_errno(ret);
518 return ret;
520 *xattr_credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS;
523 /* reserve clusters for xattr value which will be set in B tree*/
524 if (si->value_len > OCFS2_XATTR_INLINE_SIZE) {
525 int new_clusters = ocfs2_clusters_for_bytes(dir->i_sb,
526 si->value_len);
528 *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb,
529 new_clusters);
530 *want_clusters += new_clusters;
532 return ret;
535 int ocfs2_calc_xattr_init(struct inode *dir,
536 struct buffer_head *dir_bh,
537 int mode,
538 struct ocfs2_security_xattr_info *si,
539 int *want_clusters,
540 int *xattr_credits,
541 int *want_meta)
543 int ret = 0;
544 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
545 int s_size = 0, a_size = 0, acl_len = 0, new_clusters;
547 if (si->enable)
548 s_size = ocfs2_xattr_entry_real_size(strlen(si->name),
549 si->value_len);
551 if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
552 acl_len = ocfs2_xattr_get_nolock(dir, dir_bh,
553 OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT,
554 "", NULL, 0);
555 if (acl_len > 0) {
556 a_size = ocfs2_xattr_entry_real_size(0, acl_len);
557 if (S_ISDIR(mode))
558 a_size <<= 1;
559 } else if (acl_len != 0 && acl_len != -ENODATA) {
560 mlog_errno(ret);
561 return ret;
565 if (!(s_size + a_size))
566 return ret;
569 * The max space of security xattr taken inline is
570 * 256(name) + 80(value) + 16(entry) = 352 bytes,
571 * The max space of acl xattr taken inline is
572 * 80(value) + 16(entry) * 2(if directory) = 192 bytes,
573 * when blocksize = 512, may reserve one more cluser for
574 * xattr bucket, otherwise reserve one metadata block
575 * for them is ok.
576 * If this is a new directory with inline data,
577 * we choose to reserve the entire inline area for
578 * directory contents and force an external xattr block.
580 if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE ||
581 (S_ISDIR(mode) && ocfs2_supports_inline_data(osb)) ||
582 (s_size + a_size) > OCFS2_XATTR_FREE_IN_IBODY) {
583 *want_meta = *want_meta + 1;
584 *xattr_credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS;
587 if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE &&
588 (s_size + a_size) > OCFS2_XATTR_FREE_IN_BLOCK(dir)) {
589 *want_clusters += 1;
590 *xattr_credits += ocfs2_blocks_per_xattr_bucket(dir->i_sb);
594 * reserve credits and clusters for xattrs which has large value
595 * and have to be set outside
597 if (si->enable && si->value_len > OCFS2_XATTR_INLINE_SIZE) {
598 new_clusters = ocfs2_clusters_for_bytes(dir->i_sb,
599 si->value_len);
600 *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb,
601 new_clusters);
602 *want_clusters += new_clusters;
604 if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL &&
605 acl_len > OCFS2_XATTR_INLINE_SIZE) {
606 /* for directory, it has DEFAULT and ACCESS two types of acls */
607 new_clusters = (S_ISDIR(mode) ? 2 : 1) *
608 ocfs2_clusters_for_bytes(dir->i_sb, acl_len);
609 *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb,
610 new_clusters);
611 *want_clusters += new_clusters;
614 return ret;
617 static int ocfs2_xattr_extend_allocation(struct inode *inode,
618 u32 clusters_to_add,
619 struct ocfs2_xattr_value_buf *vb,
620 struct ocfs2_xattr_set_ctxt *ctxt)
622 int status = 0;
623 handle_t *handle = ctxt->handle;
624 enum ocfs2_alloc_restarted why;
625 u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters);
626 struct ocfs2_extent_tree et;
628 mlog(0, "(clusters_to_add for xattr= %u)\n", clusters_to_add);
630 ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
632 status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
633 OCFS2_JOURNAL_ACCESS_WRITE);
634 if (status < 0) {
635 mlog_errno(status);
636 goto leave;
639 prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
640 status = ocfs2_add_clusters_in_btree(handle,
641 &et,
642 &logical_start,
643 clusters_to_add,
645 ctxt->data_ac,
646 ctxt->meta_ac,
647 &why);
648 if (status < 0) {
649 mlog_errno(status);
650 goto leave;
653 status = ocfs2_journal_dirty(handle, vb->vb_bh);
654 if (status < 0) {
655 mlog_errno(status);
656 goto leave;
659 clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) - prev_clusters;
662 * We should have already allocated enough space before the transaction,
663 * so no need to restart.
665 BUG_ON(why != RESTART_NONE || clusters_to_add);
667 leave:
669 return status;
672 static int __ocfs2_remove_xattr_range(struct inode *inode,
673 struct ocfs2_xattr_value_buf *vb,
674 u32 cpos, u32 phys_cpos, u32 len,
675 unsigned int ext_flags,
676 struct ocfs2_xattr_set_ctxt *ctxt)
678 int ret;
679 u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
680 handle_t *handle = ctxt->handle;
681 struct ocfs2_extent_tree et;
683 ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
685 ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
686 OCFS2_JOURNAL_ACCESS_WRITE);
687 if (ret) {
688 mlog_errno(ret);
689 goto out;
692 ret = ocfs2_remove_extent(handle, &et, cpos, len, ctxt->meta_ac,
693 &ctxt->dealloc);
694 if (ret) {
695 mlog_errno(ret);
696 goto out;
699 le32_add_cpu(&vb->vb_xv->xr_clusters, -len);
701 ret = ocfs2_journal_dirty(handle, vb->vb_bh);
702 if (ret) {
703 mlog_errno(ret);
704 goto out;
707 if (ext_flags & OCFS2_EXT_REFCOUNTED)
708 ret = ocfs2_decrease_refcount(inode, handle,
709 ocfs2_blocks_to_clusters(inode->i_sb,
710 phys_blkno),
711 len, ctxt->meta_ac, &ctxt->dealloc, 1);
712 else
713 ret = ocfs2_cache_cluster_dealloc(&ctxt->dealloc,
714 phys_blkno, len);
715 if (ret)
716 mlog_errno(ret);
718 out:
719 return ret;
722 static int ocfs2_xattr_shrink_size(struct inode *inode,
723 u32 old_clusters,
724 u32 new_clusters,
725 struct ocfs2_xattr_value_buf *vb,
726 struct ocfs2_xattr_set_ctxt *ctxt)
728 int ret = 0;
729 unsigned int ext_flags;
730 u32 trunc_len, cpos, phys_cpos, alloc_size;
731 u64 block;
733 if (old_clusters <= new_clusters)
734 return 0;
736 cpos = new_clusters;
737 trunc_len = old_clusters - new_clusters;
738 while (trunc_len) {
739 ret = ocfs2_xattr_get_clusters(inode, cpos, &phys_cpos,
740 &alloc_size,
741 &vb->vb_xv->xr_list, &ext_flags);
742 if (ret) {
743 mlog_errno(ret);
744 goto out;
747 if (alloc_size > trunc_len)
748 alloc_size = trunc_len;
750 ret = __ocfs2_remove_xattr_range(inode, vb, cpos,
751 phys_cpos, alloc_size,
752 ext_flags, ctxt);
753 if (ret) {
754 mlog_errno(ret);
755 goto out;
758 block = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
759 ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode),
760 block, alloc_size);
761 cpos += alloc_size;
762 trunc_len -= alloc_size;
765 out:
766 return ret;
769 static int ocfs2_xattr_value_truncate(struct inode *inode,
770 struct ocfs2_xattr_value_buf *vb,
771 int len,
772 struct ocfs2_xattr_set_ctxt *ctxt)
774 int ret;
775 u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb, len);
776 u32 old_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
778 if (new_clusters == old_clusters)
779 return 0;
781 if (new_clusters > old_clusters)
782 ret = ocfs2_xattr_extend_allocation(inode,
783 new_clusters - old_clusters,
784 vb, ctxt);
785 else
786 ret = ocfs2_xattr_shrink_size(inode,
787 old_clusters, new_clusters,
788 vb, ctxt);
790 return ret;
793 static int ocfs2_xattr_list_entry(char *buffer, size_t size,
794 size_t *result, const char *prefix,
795 const char *name, int name_len)
797 char *p = buffer + *result;
798 int prefix_len = strlen(prefix);
799 int total_len = prefix_len + name_len + 1;
801 *result += total_len;
803 /* we are just looking for how big our buffer needs to be */
804 if (!size)
805 return 0;
807 if (*result > size)
808 return -ERANGE;
810 memcpy(p, prefix, prefix_len);
811 memcpy(p + prefix_len, name, name_len);
812 p[prefix_len + name_len] = '\0';
814 return 0;
817 static int ocfs2_xattr_list_entries(struct inode *inode,
818 struct ocfs2_xattr_header *header,
819 char *buffer, size_t buffer_size)
821 size_t result = 0;
822 int i, type, ret;
823 const char *prefix, *name;
825 for (i = 0 ; i < le16_to_cpu(header->xh_count); i++) {
826 struct ocfs2_xattr_entry *entry = &header->xh_entries[i];
827 type = ocfs2_xattr_get_type(entry);
828 prefix = ocfs2_xattr_prefix(type);
830 if (prefix) {
831 name = (const char *)header +
832 le16_to_cpu(entry->xe_name_offset);
834 ret = ocfs2_xattr_list_entry(buffer, buffer_size,
835 &result, prefix, name,
836 entry->xe_name_len);
837 if (ret)
838 return ret;
842 return result;
845 int ocfs2_has_inline_xattr_value_outside(struct inode *inode,
846 struct ocfs2_dinode *di)
848 struct ocfs2_xattr_header *xh;
849 int i;
851 xh = (struct ocfs2_xattr_header *)
852 ((void *)di + inode->i_sb->s_blocksize -
853 le16_to_cpu(di->i_xattr_inline_size));
855 for (i = 0; i < le16_to_cpu(xh->xh_count); i++)
856 if (!ocfs2_xattr_is_local(&xh->xh_entries[i]))
857 return 1;
859 return 0;
862 static int ocfs2_xattr_ibody_list(struct inode *inode,
863 struct ocfs2_dinode *di,
864 char *buffer,
865 size_t buffer_size)
867 struct ocfs2_xattr_header *header = NULL;
868 struct ocfs2_inode_info *oi = OCFS2_I(inode);
869 int ret = 0;
871 if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL))
872 return ret;
874 header = (struct ocfs2_xattr_header *)
875 ((void *)di + inode->i_sb->s_blocksize -
876 le16_to_cpu(di->i_xattr_inline_size));
878 ret = ocfs2_xattr_list_entries(inode, header, buffer, buffer_size);
880 return ret;
883 static int ocfs2_xattr_block_list(struct inode *inode,
884 struct ocfs2_dinode *di,
885 char *buffer,
886 size_t buffer_size)
888 struct buffer_head *blk_bh = NULL;
889 struct ocfs2_xattr_block *xb;
890 int ret = 0;
892 if (!di->i_xattr_loc)
893 return ret;
895 ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc),
896 &blk_bh);
897 if (ret < 0) {
898 mlog_errno(ret);
899 return ret;
902 xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
903 if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
904 struct ocfs2_xattr_header *header = &xb->xb_attrs.xb_header;
905 ret = ocfs2_xattr_list_entries(inode, header,
906 buffer, buffer_size);
907 } else
908 ret = ocfs2_xattr_tree_list_index_block(inode, blk_bh,
909 buffer, buffer_size);
911 brelse(blk_bh);
913 return ret;
916 ssize_t ocfs2_listxattr(struct dentry *dentry,
917 char *buffer,
918 size_t size)
920 int ret = 0, i_ret = 0, b_ret = 0;
921 struct buffer_head *di_bh = NULL;
922 struct ocfs2_dinode *di = NULL;
923 struct ocfs2_inode_info *oi = OCFS2_I(dentry->d_inode);
925 if (!ocfs2_supports_xattr(OCFS2_SB(dentry->d_sb)))
926 return -EOPNOTSUPP;
928 if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL))
929 return ret;
931 ret = ocfs2_inode_lock(dentry->d_inode, &di_bh, 0);
932 if (ret < 0) {
933 mlog_errno(ret);
934 return ret;
937 di = (struct ocfs2_dinode *)di_bh->b_data;
939 down_read(&oi->ip_xattr_sem);
940 i_ret = ocfs2_xattr_ibody_list(dentry->d_inode, di, buffer, size);
941 if (i_ret < 0)
942 b_ret = 0;
943 else {
944 if (buffer) {
945 buffer += i_ret;
946 size -= i_ret;
948 b_ret = ocfs2_xattr_block_list(dentry->d_inode, di,
949 buffer, size);
950 if (b_ret < 0)
951 i_ret = 0;
953 up_read(&oi->ip_xattr_sem);
954 ocfs2_inode_unlock(dentry->d_inode, 0);
956 brelse(di_bh);
958 return i_ret + b_ret;
961 static int ocfs2_xattr_find_entry(int name_index,
962 const char *name,
963 struct ocfs2_xattr_search *xs)
965 struct ocfs2_xattr_entry *entry;
966 size_t name_len;
967 int i, cmp = 1;
969 if (name == NULL)
970 return -EINVAL;
972 name_len = strlen(name);
973 entry = xs->here;
974 for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) {
975 cmp = name_index - ocfs2_xattr_get_type(entry);
976 if (!cmp)
977 cmp = name_len - entry->xe_name_len;
978 if (!cmp)
979 cmp = memcmp(name, (xs->base +
980 le16_to_cpu(entry->xe_name_offset)),
981 name_len);
982 if (cmp == 0)
983 break;
984 entry += 1;
986 xs->here = entry;
988 return cmp ? -ENODATA : 0;
991 static int ocfs2_xattr_get_value_outside(struct inode *inode,
992 struct ocfs2_xattr_value_root *xv,
993 void *buffer,
994 size_t len)
996 u32 cpos, p_cluster, num_clusters, bpc, clusters;
997 u64 blkno;
998 int i, ret = 0;
999 size_t cplen, blocksize;
1000 struct buffer_head *bh = NULL;
1001 struct ocfs2_extent_list *el;
1003 el = &xv->xr_list;
1004 clusters = le32_to_cpu(xv->xr_clusters);
1005 bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
1006 blocksize = inode->i_sb->s_blocksize;
1008 cpos = 0;
1009 while (cpos < clusters) {
1010 ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
1011 &num_clusters, el, NULL);
1012 if (ret) {
1013 mlog_errno(ret);
1014 goto out;
1017 blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
1018 /* Copy ocfs2_xattr_value */
1019 for (i = 0; i < num_clusters * bpc; i++, blkno++) {
1020 ret = ocfs2_read_block(INODE_CACHE(inode), blkno,
1021 &bh, NULL);
1022 if (ret) {
1023 mlog_errno(ret);
1024 goto out;
1027 cplen = len >= blocksize ? blocksize : len;
1028 memcpy(buffer, bh->b_data, cplen);
1029 len -= cplen;
1030 buffer += cplen;
1032 brelse(bh);
1033 bh = NULL;
1034 if (len == 0)
1035 break;
1037 cpos += num_clusters;
1039 out:
1040 return ret;
1043 static int ocfs2_xattr_ibody_get(struct inode *inode,
1044 int name_index,
1045 const char *name,
1046 void *buffer,
1047 size_t buffer_size,
1048 struct ocfs2_xattr_search *xs)
1050 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1051 struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
1052 struct ocfs2_xattr_value_root *xv;
1053 size_t size;
1054 int ret = 0;
1056 if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL))
1057 return -ENODATA;
1059 xs->end = (void *)di + inode->i_sb->s_blocksize;
1060 xs->header = (struct ocfs2_xattr_header *)
1061 (xs->end - le16_to_cpu(di->i_xattr_inline_size));
1062 xs->base = (void *)xs->header;
1063 xs->here = xs->header->xh_entries;
1065 ret = ocfs2_xattr_find_entry(name_index, name, xs);
1066 if (ret)
1067 return ret;
1068 size = le64_to_cpu(xs->here->xe_value_size);
1069 if (buffer) {
1070 if (size > buffer_size)
1071 return -ERANGE;
1072 if (ocfs2_xattr_is_local(xs->here)) {
1073 memcpy(buffer, (void *)xs->base +
1074 le16_to_cpu(xs->here->xe_name_offset) +
1075 OCFS2_XATTR_SIZE(xs->here->xe_name_len), size);
1076 } else {
1077 xv = (struct ocfs2_xattr_value_root *)
1078 (xs->base + le16_to_cpu(
1079 xs->here->xe_name_offset) +
1080 OCFS2_XATTR_SIZE(xs->here->xe_name_len));
1081 ret = ocfs2_xattr_get_value_outside(inode, xv,
1082 buffer, size);
1083 if (ret < 0) {
1084 mlog_errno(ret);
1085 return ret;
1090 return size;
1093 static int ocfs2_xattr_block_get(struct inode *inode,
1094 int name_index,
1095 const char *name,
1096 void *buffer,
1097 size_t buffer_size,
1098 struct ocfs2_xattr_search *xs)
1100 struct ocfs2_xattr_block *xb;
1101 struct ocfs2_xattr_value_root *xv;
1102 size_t size;
1103 int ret = -ENODATA, name_offset, name_len, i;
1104 int uninitialized_var(block_off);
1106 xs->bucket = ocfs2_xattr_bucket_new(inode);
1107 if (!xs->bucket) {
1108 ret = -ENOMEM;
1109 mlog_errno(ret);
1110 goto cleanup;
1113 ret = ocfs2_xattr_block_find(inode, name_index, name, xs);
1114 if (ret) {
1115 mlog_errno(ret);
1116 goto cleanup;
1119 if (xs->not_found) {
1120 ret = -ENODATA;
1121 goto cleanup;
1124 xb = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
1125 size = le64_to_cpu(xs->here->xe_value_size);
1126 if (buffer) {
1127 ret = -ERANGE;
1128 if (size > buffer_size)
1129 goto cleanup;
1131 name_offset = le16_to_cpu(xs->here->xe_name_offset);
1132 name_len = OCFS2_XATTR_SIZE(xs->here->xe_name_len);
1133 i = xs->here - xs->header->xh_entries;
1135 if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
1136 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
1137 bucket_xh(xs->bucket),
1139 &block_off,
1140 &name_offset);
1141 xs->base = bucket_block(xs->bucket, block_off);
1143 if (ocfs2_xattr_is_local(xs->here)) {
1144 memcpy(buffer, (void *)xs->base +
1145 name_offset + name_len, size);
1146 } else {
1147 xv = (struct ocfs2_xattr_value_root *)
1148 (xs->base + name_offset + name_len);
1149 ret = ocfs2_xattr_get_value_outside(inode, xv,
1150 buffer, size);
1151 if (ret < 0) {
1152 mlog_errno(ret);
1153 goto cleanup;
1157 ret = size;
1158 cleanup:
1159 ocfs2_xattr_bucket_free(xs->bucket);
1161 brelse(xs->xattr_bh);
1162 xs->xattr_bh = NULL;
1163 return ret;
1166 int ocfs2_xattr_get_nolock(struct inode *inode,
1167 struct buffer_head *di_bh,
1168 int name_index,
1169 const char *name,
1170 void *buffer,
1171 size_t buffer_size)
1173 int ret;
1174 struct ocfs2_dinode *di = NULL;
1175 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1176 struct ocfs2_xattr_search xis = {
1177 .not_found = -ENODATA,
1179 struct ocfs2_xattr_search xbs = {
1180 .not_found = -ENODATA,
1183 if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
1184 return -EOPNOTSUPP;
1186 if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL))
1187 ret = -ENODATA;
1189 xis.inode_bh = xbs.inode_bh = di_bh;
1190 di = (struct ocfs2_dinode *)di_bh->b_data;
1192 down_read(&oi->ip_xattr_sem);
1193 ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer,
1194 buffer_size, &xis);
1195 if (ret == -ENODATA && di->i_xattr_loc)
1196 ret = ocfs2_xattr_block_get(inode, name_index, name, buffer,
1197 buffer_size, &xbs);
1198 up_read(&oi->ip_xattr_sem);
1200 return ret;
1203 /* ocfs2_xattr_get()
1205 * Copy an extended attribute into the buffer provided.
1206 * Buffer is NULL to compute the size of buffer required.
1208 static int ocfs2_xattr_get(struct inode *inode,
1209 int name_index,
1210 const char *name,
1211 void *buffer,
1212 size_t buffer_size)
1214 int ret;
1215 struct buffer_head *di_bh = NULL;
1217 ret = ocfs2_inode_lock(inode, &di_bh, 0);
1218 if (ret < 0) {
1219 mlog_errno(ret);
1220 return ret;
1222 ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
1223 name, buffer, buffer_size);
1225 ocfs2_inode_unlock(inode, 0);
1227 brelse(di_bh);
1229 return ret;
1232 static int __ocfs2_xattr_set_value_outside(struct inode *inode,
1233 handle_t *handle,
1234 struct ocfs2_xattr_value_buf *vb,
1235 const void *value,
1236 int value_len)
1238 int ret = 0, i, cp_len;
1239 u16 blocksize = inode->i_sb->s_blocksize;
1240 u32 p_cluster, num_clusters;
1241 u32 cpos = 0, bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
1242 u32 clusters = ocfs2_clusters_for_bytes(inode->i_sb, value_len);
1243 u64 blkno;
1244 struct buffer_head *bh = NULL;
1245 unsigned int ext_flags;
1246 struct ocfs2_xattr_value_root *xv = vb->vb_xv;
1248 BUG_ON(clusters > le32_to_cpu(xv->xr_clusters));
1250 while (cpos < clusters) {
1251 ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
1252 &num_clusters, &xv->xr_list,
1253 &ext_flags);
1254 if (ret) {
1255 mlog_errno(ret);
1256 goto out;
1259 BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
1261 blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
1263 for (i = 0; i < num_clusters * bpc; i++, blkno++) {
1264 ret = ocfs2_read_block(INODE_CACHE(inode), blkno,
1265 &bh, NULL);
1266 if (ret) {
1267 mlog_errno(ret);
1268 goto out;
1271 ret = ocfs2_journal_access(handle,
1272 INODE_CACHE(inode),
1274 OCFS2_JOURNAL_ACCESS_WRITE);
1275 if (ret < 0) {
1276 mlog_errno(ret);
1277 goto out;
1280 cp_len = value_len > blocksize ? blocksize : value_len;
1281 memcpy(bh->b_data, value, cp_len);
1282 value_len -= cp_len;
1283 value += cp_len;
1284 if (cp_len < blocksize)
1285 memset(bh->b_data + cp_len, 0,
1286 blocksize - cp_len);
1288 ret = ocfs2_journal_dirty(handle, bh);
1289 if (ret < 0) {
1290 mlog_errno(ret);
1291 goto out;
1293 brelse(bh);
1294 bh = NULL;
1297 * XXX: do we need to empty all the following
1298 * blocks in this cluster?
1300 if (!value_len)
1301 break;
1303 cpos += num_clusters;
1305 out:
1306 brelse(bh);
1308 return ret;
1311 static int ocfs2_xattr_cleanup(struct inode *inode,
1312 handle_t *handle,
1313 struct ocfs2_xattr_info *xi,
1314 struct ocfs2_xattr_search *xs,
1315 struct ocfs2_xattr_value_buf *vb,
1316 size_t offs)
1318 int ret = 0;
1319 size_t name_len = strlen(xi->name);
1320 void *val = xs->base + offs;
1321 size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
1323 ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
1324 OCFS2_JOURNAL_ACCESS_WRITE);
1325 if (ret) {
1326 mlog_errno(ret);
1327 goto out;
1329 /* Decrease xattr count */
1330 le16_add_cpu(&xs->header->xh_count, -1);
1331 /* Remove the xattr entry and tree root which has already be set*/
1332 memset((void *)xs->here, 0, sizeof(struct ocfs2_xattr_entry));
1333 memset(val, 0, size);
1335 ret = ocfs2_journal_dirty(handle, vb->vb_bh);
1336 if (ret < 0)
1337 mlog_errno(ret);
1338 out:
1339 return ret;
1342 static int ocfs2_xattr_update_entry(struct inode *inode,
1343 handle_t *handle,
1344 struct ocfs2_xattr_info *xi,
1345 struct ocfs2_xattr_search *xs,
1346 struct ocfs2_xattr_value_buf *vb,
1347 size_t offs)
1349 int ret;
1351 ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
1352 OCFS2_JOURNAL_ACCESS_WRITE);
1353 if (ret) {
1354 mlog_errno(ret);
1355 goto out;
1358 xs->here->xe_name_offset = cpu_to_le16(offs);
1359 xs->here->xe_value_size = cpu_to_le64(xi->value_len);
1360 if (xi->value_len <= OCFS2_XATTR_INLINE_SIZE)
1361 ocfs2_xattr_set_local(xs->here, 1);
1362 else
1363 ocfs2_xattr_set_local(xs->here, 0);
1364 ocfs2_xattr_hash_entry(inode, xs->header, xs->here);
1366 ret = ocfs2_journal_dirty(handle, vb->vb_bh);
1367 if (ret < 0)
1368 mlog_errno(ret);
1369 out:
1370 return ret;
1374 * ocfs2_xattr_set_value_outside()
1376 * Set large size value in B tree.
1378 static int ocfs2_xattr_set_value_outside(struct inode *inode,
1379 struct ocfs2_xattr_info *xi,
1380 struct ocfs2_xattr_search *xs,
1381 struct ocfs2_xattr_set_ctxt *ctxt,
1382 struct ocfs2_xattr_value_buf *vb,
1383 size_t offs)
1385 size_t name_len = strlen(xi->name);
1386 void *val = xs->base + offs;
1387 struct ocfs2_xattr_value_root *xv = NULL;
1388 size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
1389 int ret = 0;
1391 memset(val, 0, size);
1392 memcpy(val, xi->name, name_len);
1393 xv = (struct ocfs2_xattr_value_root *)
1394 (val + OCFS2_XATTR_SIZE(name_len));
1395 xv->xr_clusters = 0;
1396 xv->xr_last_eb_blk = 0;
1397 xv->xr_list.l_tree_depth = 0;
1398 xv->xr_list.l_count = cpu_to_le16(1);
1399 xv->xr_list.l_next_free_rec = 0;
1400 vb->vb_xv = xv;
1402 ret = ocfs2_xattr_value_truncate(inode, vb, xi->value_len, ctxt);
1403 if (ret < 0) {
1404 mlog_errno(ret);
1405 return ret;
1407 ret = ocfs2_xattr_update_entry(inode, ctxt->handle, xi, xs, vb, offs);
1408 if (ret < 0) {
1409 mlog_errno(ret);
1410 return ret;
1412 ret = __ocfs2_xattr_set_value_outside(inode, ctxt->handle, vb,
1413 xi->value, xi->value_len);
1414 if (ret < 0)
1415 mlog_errno(ret);
1417 return ret;
1421 * ocfs2_xattr_set_entry_local()
1423 * Set, replace or remove extended attribute in local.
1425 static void ocfs2_xattr_set_entry_local(struct inode *inode,
1426 struct ocfs2_xattr_info *xi,
1427 struct ocfs2_xattr_search *xs,
1428 struct ocfs2_xattr_entry *last,
1429 size_t min_offs)
1431 size_t name_len = strlen(xi->name);
1432 int i;
1434 if (xi->value && xs->not_found) {
1435 /* Insert the new xattr entry. */
1436 le16_add_cpu(&xs->header->xh_count, 1);
1437 ocfs2_xattr_set_type(last, xi->name_index);
1438 ocfs2_xattr_set_local(last, 1);
1439 last->xe_name_len = name_len;
1440 } else {
1441 void *first_val;
1442 void *val;
1443 size_t offs, size;
1445 first_val = xs->base + min_offs;
1446 offs = le16_to_cpu(xs->here->xe_name_offset);
1447 val = xs->base + offs;
1449 if (le64_to_cpu(xs->here->xe_value_size) >
1450 OCFS2_XATTR_INLINE_SIZE)
1451 size = OCFS2_XATTR_SIZE(name_len) +
1452 OCFS2_XATTR_ROOT_SIZE;
1453 else
1454 size = OCFS2_XATTR_SIZE(name_len) +
1455 OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size));
1457 if (xi->value && size == OCFS2_XATTR_SIZE(name_len) +
1458 OCFS2_XATTR_SIZE(xi->value_len)) {
1459 /* The old and the new value have the
1460 same size. Just replace the value. */
1461 ocfs2_xattr_set_local(xs->here, 1);
1462 xs->here->xe_value_size = cpu_to_le64(xi->value_len);
1463 /* Clear value bytes. */
1464 memset(val + OCFS2_XATTR_SIZE(name_len),
1466 OCFS2_XATTR_SIZE(xi->value_len));
1467 memcpy(val + OCFS2_XATTR_SIZE(name_len),
1468 xi->value,
1469 xi->value_len);
1470 return;
1472 /* Remove the old name+value. */
1473 memmove(first_val + size, first_val, val - first_val);
1474 memset(first_val, 0, size);
1475 xs->here->xe_name_hash = 0;
1476 xs->here->xe_name_offset = 0;
1477 ocfs2_xattr_set_local(xs->here, 1);
1478 xs->here->xe_value_size = 0;
1480 min_offs += size;
1482 /* Adjust all value offsets. */
1483 last = xs->header->xh_entries;
1484 for (i = 0 ; i < le16_to_cpu(xs->header->xh_count); i++) {
1485 size_t o = le16_to_cpu(last->xe_name_offset);
1487 if (o < offs)
1488 last->xe_name_offset = cpu_to_le16(o + size);
1489 last += 1;
1492 if (!xi->value) {
1493 /* Remove the old entry. */
1494 last -= 1;
1495 memmove(xs->here, xs->here + 1,
1496 (void *)last - (void *)xs->here);
1497 memset(last, 0, sizeof(struct ocfs2_xattr_entry));
1498 le16_add_cpu(&xs->header->xh_count, -1);
1501 if (xi->value) {
1502 /* Insert the new name+value. */
1503 size_t size = OCFS2_XATTR_SIZE(name_len) +
1504 OCFS2_XATTR_SIZE(xi->value_len);
1505 void *val = xs->base + min_offs - size;
1507 xs->here->xe_name_offset = cpu_to_le16(min_offs - size);
1508 memset(val, 0, size);
1509 memcpy(val, xi->name, name_len);
1510 memcpy(val + OCFS2_XATTR_SIZE(name_len),
1511 xi->value,
1512 xi->value_len);
1513 xs->here->xe_value_size = cpu_to_le64(xi->value_len);
1514 ocfs2_xattr_set_local(xs->here, 1);
1515 ocfs2_xattr_hash_entry(inode, xs->header, xs->here);
1518 return;
1522 * ocfs2_xattr_set_entry()
1524 * Set extended attribute entry into inode or block.
1526 * If extended attribute value size > OCFS2_XATTR_INLINE_SIZE,
1527 * We first insert tree root(ocfs2_xattr_value_root) with set_entry_local(),
1528 * then set value in B tree with set_value_outside().
1530 static int ocfs2_xattr_set_entry(struct inode *inode,
1531 struct ocfs2_xattr_info *xi,
1532 struct ocfs2_xattr_search *xs,
1533 struct ocfs2_xattr_set_ctxt *ctxt,
1534 int flag)
1536 struct ocfs2_xattr_entry *last;
1537 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1538 struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
1539 size_t min_offs = xs->end - xs->base, name_len = strlen(xi->name);
1540 size_t size_l = 0;
1541 handle_t *handle = ctxt->handle;
1542 int free, i, ret;
1543 struct ocfs2_xattr_info xi_l = {
1544 .name_index = xi->name_index,
1545 .name = xi->name,
1546 .value = xi->value,
1547 .value_len = xi->value_len,
1549 struct ocfs2_xattr_value_buf vb = {
1550 .vb_bh = xs->xattr_bh,
1551 .vb_access = ocfs2_journal_access_di,
1554 if (!(flag & OCFS2_INLINE_XATTR_FL)) {
1555 BUG_ON(xs->xattr_bh == xs->inode_bh);
1556 vb.vb_access = ocfs2_journal_access_xb;
1557 } else
1558 BUG_ON(xs->xattr_bh != xs->inode_bh);
1560 /* Compute min_offs, last and free space. */
1561 last = xs->header->xh_entries;
1563 for (i = 0 ; i < le16_to_cpu(xs->header->xh_count); i++) {
1564 size_t offs = le16_to_cpu(last->xe_name_offset);
1565 if (offs < min_offs)
1566 min_offs = offs;
1567 last += 1;
1570 free = min_offs - ((void *)last - xs->base) - OCFS2_XATTR_HEADER_GAP;
1571 if (free < 0)
1572 return -EIO;
1574 if (!xs->not_found) {
1575 size_t size = 0;
1576 if (ocfs2_xattr_is_local(xs->here))
1577 size = OCFS2_XATTR_SIZE(name_len) +
1578 OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size));
1579 else
1580 size = OCFS2_XATTR_SIZE(name_len) +
1581 OCFS2_XATTR_ROOT_SIZE;
1582 free += (size + sizeof(struct ocfs2_xattr_entry));
1584 /* Check free space in inode or block */
1585 if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
1586 if (free < sizeof(struct ocfs2_xattr_entry) +
1587 OCFS2_XATTR_SIZE(name_len) +
1588 OCFS2_XATTR_ROOT_SIZE) {
1589 ret = -ENOSPC;
1590 goto out;
1592 size_l = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
1593 xi_l.value = (void *)&def_xv;
1594 xi_l.value_len = OCFS2_XATTR_ROOT_SIZE;
1595 } else if (xi->value) {
1596 if (free < sizeof(struct ocfs2_xattr_entry) +
1597 OCFS2_XATTR_SIZE(name_len) +
1598 OCFS2_XATTR_SIZE(xi->value_len)) {
1599 ret = -ENOSPC;
1600 goto out;
1604 if (!xs->not_found) {
1605 /* For existing extended attribute */
1606 size_t size = OCFS2_XATTR_SIZE(name_len) +
1607 OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size));
1608 size_t offs = le16_to_cpu(xs->here->xe_name_offset);
1609 void *val = xs->base + offs;
1611 if (ocfs2_xattr_is_local(xs->here) && size == size_l) {
1612 /* Replace existing local xattr with tree root */
1613 ret = ocfs2_xattr_set_value_outside(inode, xi, xs,
1614 ctxt, &vb, offs);
1615 if (ret < 0)
1616 mlog_errno(ret);
1617 goto out;
1618 } else if (!ocfs2_xattr_is_local(xs->here)) {
1619 /* For existing xattr which has value outside */
1620 vb.vb_xv = (struct ocfs2_xattr_value_root *)
1621 (val + OCFS2_XATTR_SIZE(name_len));
1623 if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
1625 * If new value need set outside also,
1626 * first truncate old value to new value,
1627 * then set new value with set_value_outside().
1629 ret = ocfs2_xattr_value_truncate(inode,
1630 &vb,
1631 xi->value_len,
1632 ctxt);
1633 if (ret < 0) {
1634 mlog_errno(ret);
1635 goto out;
1638 ret = ocfs2_xattr_update_entry(inode,
1639 handle,
1642 &vb,
1643 offs);
1644 if (ret < 0) {
1645 mlog_errno(ret);
1646 goto out;
1649 ret = __ocfs2_xattr_set_value_outside(inode,
1650 handle,
1651 &vb,
1652 xi->value,
1653 xi->value_len);
1654 if (ret < 0)
1655 mlog_errno(ret);
1656 goto out;
1657 } else {
1659 * If new value need set in local,
1660 * just trucate old value to zero.
1662 ret = ocfs2_xattr_value_truncate(inode,
1663 &vb,
1665 ctxt);
1666 if (ret < 0)
1667 mlog_errno(ret);
1672 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), xs->inode_bh,
1673 OCFS2_JOURNAL_ACCESS_WRITE);
1674 if (ret) {
1675 mlog_errno(ret);
1676 goto out;
1679 if (!(flag & OCFS2_INLINE_XATTR_FL)) {
1680 ret = vb.vb_access(handle, INODE_CACHE(inode), vb.vb_bh,
1681 OCFS2_JOURNAL_ACCESS_WRITE);
1682 if (ret) {
1683 mlog_errno(ret);
1684 goto out;
1689 * Set value in local, include set tree root in local.
1690 * This is the first step for value size >INLINE_SIZE.
1692 ocfs2_xattr_set_entry_local(inode, &xi_l, xs, last, min_offs);
1694 if (!(flag & OCFS2_INLINE_XATTR_FL)) {
1695 ret = ocfs2_journal_dirty(handle, xs->xattr_bh);
1696 if (ret < 0) {
1697 mlog_errno(ret);
1698 goto out;
1702 if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) &&
1703 (flag & OCFS2_INLINE_XATTR_FL)) {
1704 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1705 unsigned int xattrsize = osb->s_xattr_inline_size;
1708 * Adjust extent record count or inline data size
1709 * to reserve space for extended attribute.
1711 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1712 struct ocfs2_inline_data *idata = &di->id2.i_data;
1713 le16_add_cpu(&idata->id_count, -xattrsize);
1714 } else if (!(ocfs2_inode_is_fast_symlink(inode))) {
1715 struct ocfs2_extent_list *el = &di->id2.i_list;
1716 le16_add_cpu(&el->l_count, -(xattrsize /
1717 sizeof(struct ocfs2_extent_rec)));
1719 di->i_xattr_inline_size = cpu_to_le16(xattrsize);
1721 /* Update xattr flag */
1722 spin_lock(&oi->ip_lock);
1723 oi->ip_dyn_features |= flag;
1724 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
1725 spin_unlock(&oi->ip_lock);
1727 ret = ocfs2_journal_dirty(handle, xs->inode_bh);
1728 if (ret < 0)
1729 mlog_errno(ret);
1731 if (!ret && xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
1733 * Set value outside in B tree.
1734 * This is the second step for value size > INLINE_SIZE.
1736 size_t offs = le16_to_cpu(xs->here->xe_name_offset);
1737 ret = ocfs2_xattr_set_value_outside(inode, xi, xs, ctxt,
1738 &vb, offs);
1739 if (ret < 0) {
1740 int ret2;
1742 mlog_errno(ret);
1744 * If set value outside failed, we have to clean
1745 * the junk tree root we have already set in local.
1747 ret2 = ocfs2_xattr_cleanup(inode, ctxt->handle,
1748 xi, xs, &vb, offs);
1749 if (ret2 < 0)
1750 mlog_errno(ret2);
1753 out:
1754 return ret;
1758 * In xattr remove, if it is stored outside and refcounted, we may have
1759 * the chance to split the refcount tree. So need the allocators.
1761 static int ocfs2_lock_xattr_remove_allocators(struct inode *inode,
1762 struct ocfs2_xattr_value_root *xv,
1763 struct ocfs2_caching_info *ref_ci,
1764 struct buffer_head *ref_root_bh,
1765 struct ocfs2_alloc_context **meta_ac,
1766 int *ref_credits)
1768 int ret, meta_add = 0;
1769 u32 p_cluster, num_clusters;
1770 unsigned int ext_flags;
1772 *ref_credits = 0;
1773 ret = ocfs2_xattr_get_clusters(inode, 0, &p_cluster,
1774 &num_clusters,
1775 &xv->xr_list,
1776 &ext_flags);
1777 if (ret) {
1778 mlog_errno(ret);
1779 goto out;
1782 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
1783 goto out;
1785 ret = ocfs2_refcounted_xattr_delete_need(inode, ref_ci,
1786 ref_root_bh, xv,
1787 &meta_add, ref_credits);
1788 if (ret) {
1789 mlog_errno(ret);
1790 goto out;
1793 ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
1794 meta_add, meta_ac);
1795 if (ret)
1796 mlog_errno(ret);
1798 out:
1799 return ret;
1802 static int ocfs2_remove_value_outside(struct inode*inode,
1803 struct ocfs2_xattr_value_buf *vb,
1804 struct ocfs2_xattr_header *header,
1805 struct ocfs2_caching_info *ref_ci,
1806 struct buffer_head *ref_root_bh)
1808 int ret = 0, i, ref_credits;
1809 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1810 struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, };
1811 void *val;
1813 ocfs2_init_dealloc_ctxt(&ctxt.dealloc);
1815 for (i = 0; i < le16_to_cpu(header->xh_count); i++) {
1816 struct ocfs2_xattr_entry *entry = &header->xh_entries[i];
1818 if (ocfs2_xattr_is_local(entry))
1819 continue;
1821 val = (void *)header +
1822 le16_to_cpu(entry->xe_name_offset);
1823 vb->vb_xv = (struct ocfs2_xattr_value_root *)
1824 (val + OCFS2_XATTR_SIZE(entry->xe_name_len));
1826 ret = ocfs2_lock_xattr_remove_allocators(inode, vb->vb_xv,
1827 ref_ci, ref_root_bh,
1828 &ctxt.meta_ac,
1829 &ref_credits);
1831 ctxt.handle = ocfs2_start_trans(osb, ref_credits +
1832 ocfs2_remove_extent_credits(osb->sb));
1833 if (IS_ERR(ctxt.handle)) {
1834 ret = PTR_ERR(ctxt.handle);
1835 mlog_errno(ret);
1836 break;
1839 ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt);
1840 if (ret < 0) {
1841 mlog_errno(ret);
1842 break;
1845 ocfs2_commit_trans(osb, ctxt.handle);
1846 if (ctxt.meta_ac) {
1847 ocfs2_free_alloc_context(ctxt.meta_ac);
1848 ctxt.meta_ac = NULL;
1852 if (ctxt.meta_ac)
1853 ocfs2_free_alloc_context(ctxt.meta_ac);
1854 ocfs2_schedule_truncate_log_flush(osb, 1);
1855 ocfs2_run_deallocs(osb, &ctxt.dealloc);
1856 return ret;
1859 static int ocfs2_xattr_ibody_remove(struct inode *inode,
1860 struct buffer_head *di_bh,
1861 struct ocfs2_caching_info *ref_ci,
1862 struct buffer_head *ref_root_bh)
1865 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1866 struct ocfs2_xattr_header *header;
1867 int ret;
1868 struct ocfs2_xattr_value_buf vb = {
1869 .vb_bh = di_bh,
1870 .vb_access = ocfs2_journal_access_di,
1873 header = (struct ocfs2_xattr_header *)
1874 ((void *)di + inode->i_sb->s_blocksize -
1875 le16_to_cpu(di->i_xattr_inline_size));
1877 ret = ocfs2_remove_value_outside(inode, &vb, header,
1878 ref_ci, ref_root_bh);
1880 return ret;
1883 struct ocfs2_rm_xattr_bucket_para {
1884 struct ocfs2_caching_info *ref_ci;
1885 struct buffer_head *ref_root_bh;
1888 static int ocfs2_xattr_block_remove(struct inode *inode,
1889 struct buffer_head *blk_bh,
1890 struct ocfs2_caching_info *ref_ci,
1891 struct buffer_head *ref_root_bh)
1893 struct ocfs2_xattr_block *xb;
1894 int ret = 0;
1895 struct ocfs2_xattr_value_buf vb = {
1896 .vb_bh = blk_bh,
1897 .vb_access = ocfs2_journal_access_xb,
1899 struct ocfs2_rm_xattr_bucket_para args = {
1900 .ref_ci = ref_ci,
1901 .ref_root_bh = ref_root_bh,
1904 xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
1905 if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
1906 struct ocfs2_xattr_header *header = &(xb->xb_attrs.xb_header);
1907 ret = ocfs2_remove_value_outside(inode, &vb, header,
1908 ref_ci, ref_root_bh);
1909 } else
1910 ret = ocfs2_iterate_xattr_index_block(inode,
1911 blk_bh,
1912 ocfs2_rm_xattr_cluster,
1913 &args);
1915 return ret;
1918 static int ocfs2_xattr_free_block(struct inode *inode,
1919 u64 block,
1920 struct ocfs2_caching_info *ref_ci,
1921 struct buffer_head *ref_root_bh)
1923 struct inode *xb_alloc_inode;
1924 struct buffer_head *xb_alloc_bh = NULL;
1925 struct buffer_head *blk_bh = NULL;
1926 struct ocfs2_xattr_block *xb;
1927 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1928 handle_t *handle;
1929 int ret = 0;
1930 u64 blk, bg_blkno;
1931 u16 bit;
1933 ret = ocfs2_read_xattr_block(inode, block, &blk_bh);
1934 if (ret < 0) {
1935 mlog_errno(ret);
1936 goto out;
1939 ret = ocfs2_xattr_block_remove(inode, blk_bh, ref_ci, ref_root_bh);
1940 if (ret < 0) {
1941 mlog_errno(ret);
1942 goto out;
1945 xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
1946 blk = le64_to_cpu(xb->xb_blkno);
1947 bit = le16_to_cpu(xb->xb_suballoc_bit);
1948 bg_blkno = ocfs2_which_suballoc_group(blk, bit);
1950 xb_alloc_inode = ocfs2_get_system_file_inode(osb,
1951 EXTENT_ALLOC_SYSTEM_INODE,
1952 le16_to_cpu(xb->xb_suballoc_slot));
1953 if (!xb_alloc_inode) {
1954 ret = -ENOMEM;
1955 mlog_errno(ret);
1956 goto out;
1958 mutex_lock(&xb_alloc_inode->i_mutex);
1960 ret = ocfs2_inode_lock(xb_alloc_inode, &xb_alloc_bh, 1);
1961 if (ret < 0) {
1962 mlog_errno(ret);
1963 goto out_mutex;
1966 handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE);
1967 if (IS_ERR(handle)) {
1968 ret = PTR_ERR(handle);
1969 mlog_errno(ret);
1970 goto out_unlock;
1973 ret = ocfs2_free_suballoc_bits(handle, xb_alloc_inode, xb_alloc_bh,
1974 bit, bg_blkno, 1);
1975 if (ret < 0)
1976 mlog_errno(ret);
1978 ocfs2_commit_trans(osb, handle);
1979 out_unlock:
1980 ocfs2_inode_unlock(xb_alloc_inode, 1);
1981 brelse(xb_alloc_bh);
1982 out_mutex:
1983 mutex_unlock(&xb_alloc_inode->i_mutex);
1984 iput(xb_alloc_inode);
1985 out:
1986 brelse(blk_bh);
1987 return ret;
1991 * ocfs2_xattr_remove()
1993 * Free extended attribute resources associated with this inode.
1995 int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh)
1997 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1998 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1999 struct ocfs2_refcount_tree *ref_tree = NULL;
2000 struct buffer_head *ref_root_bh = NULL;
2001 struct ocfs2_caching_info *ref_ci = NULL;
2002 handle_t *handle;
2003 int ret;
2005 if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
2006 return 0;
2008 if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL))
2009 return 0;
2011 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) {
2012 ret = ocfs2_lock_refcount_tree(OCFS2_SB(inode->i_sb),
2013 le64_to_cpu(di->i_refcount_loc),
2014 1, &ref_tree, &ref_root_bh);
2015 if (ret) {
2016 mlog_errno(ret);
2017 goto out;
2019 ref_ci = &ref_tree->rf_ci;
2023 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
2024 ret = ocfs2_xattr_ibody_remove(inode, di_bh,
2025 ref_ci, ref_root_bh);
2026 if (ret < 0) {
2027 mlog_errno(ret);
2028 goto out;
2032 if (di->i_xattr_loc) {
2033 ret = ocfs2_xattr_free_block(inode,
2034 le64_to_cpu(di->i_xattr_loc),
2035 ref_ci, ref_root_bh);
2036 if (ret < 0) {
2037 mlog_errno(ret);
2038 goto out;
2042 handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)),
2043 OCFS2_INODE_UPDATE_CREDITS);
2044 if (IS_ERR(handle)) {
2045 ret = PTR_ERR(handle);
2046 mlog_errno(ret);
2047 goto out;
2049 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
2050 OCFS2_JOURNAL_ACCESS_WRITE);
2051 if (ret) {
2052 mlog_errno(ret);
2053 goto out_commit;
2056 di->i_xattr_loc = 0;
2058 spin_lock(&oi->ip_lock);
2059 oi->ip_dyn_features &= ~(OCFS2_INLINE_XATTR_FL | OCFS2_HAS_XATTR_FL);
2060 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
2061 spin_unlock(&oi->ip_lock);
2063 ret = ocfs2_journal_dirty(handle, di_bh);
2064 if (ret < 0)
2065 mlog_errno(ret);
2066 out_commit:
2067 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
2068 out:
2069 if (ref_tree)
2070 ocfs2_unlock_refcount_tree(OCFS2_SB(inode->i_sb), ref_tree, 1);
2071 brelse(ref_root_bh);
2072 return ret;
2075 static int ocfs2_xattr_has_space_inline(struct inode *inode,
2076 struct ocfs2_dinode *di)
2078 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2079 unsigned int xattrsize = OCFS2_SB(inode->i_sb)->s_xattr_inline_size;
2080 int free;
2082 if (xattrsize < OCFS2_MIN_XATTR_INLINE_SIZE)
2083 return 0;
2085 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
2086 struct ocfs2_inline_data *idata = &di->id2.i_data;
2087 free = le16_to_cpu(idata->id_count) - le64_to_cpu(di->i_size);
2088 } else if (ocfs2_inode_is_fast_symlink(inode)) {
2089 free = ocfs2_fast_symlink_chars(inode->i_sb) -
2090 le64_to_cpu(di->i_size);
2091 } else {
2092 struct ocfs2_extent_list *el = &di->id2.i_list;
2093 free = (le16_to_cpu(el->l_count) -
2094 le16_to_cpu(el->l_next_free_rec)) *
2095 sizeof(struct ocfs2_extent_rec);
2097 if (free >= xattrsize)
2098 return 1;
2100 return 0;
2104 * ocfs2_xattr_ibody_find()
2106 * Find extended attribute in inode block and
2107 * fill search info into struct ocfs2_xattr_search.
2109 static int ocfs2_xattr_ibody_find(struct inode *inode,
2110 int name_index,
2111 const char *name,
2112 struct ocfs2_xattr_search *xs)
2114 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2115 struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
2116 int ret;
2117 int has_space = 0;
2119 if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE)
2120 return 0;
2122 if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) {
2123 down_read(&oi->ip_alloc_sem);
2124 has_space = ocfs2_xattr_has_space_inline(inode, di);
2125 up_read(&oi->ip_alloc_sem);
2126 if (!has_space)
2127 return 0;
2130 xs->xattr_bh = xs->inode_bh;
2131 xs->end = (void *)di + inode->i_sb->s_blocksize;
2132 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)
2133 xs->header = (struct ocfs2_xattr_header *)
2134 (xs->end - le16_to_cpu(di->i_xattr_inline_size));
2135 else
2136 xs->header = (struct ocfs2_xattr_header *)
2137 (xs->end - OCFS2_SB(inode->i_sb)->s_xattr_inline_size);
2138 xs->base = (void *)xs->header;
2139 xs->here = xs->header->xh_entries;
2141 /* Find the named attribute. */
2142 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
2143 ret = ocfs2_xattr_find_entry(name_index, name, xs);
2144 if (ret && ret != -ENODATA)
2145 return ret;
2146 xs->not_found = ret;
2149 return 0;
2153 * ocfs2_xattr_ibody_set()
2155 * Set, replace or remove an extended attribute into inode block.
2158 static int ocfs2_xattr_ibody_set(struct inode *inode,
2159 struct ocfs2_xattr_info *xi,
2160 struct ocfs2_xattr_search *xs,
2161 struct ocfs2_xattr_set_ctxt *ctxt)
2163 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2164 struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
2165 int ret;
2167 if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE)
2168 return -ENOSPC;
2170 down_write(&oi->ip_alloc_sem);
2171 if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) {
2172 if (!ocfs2_xattr_has_space_inline(inode, di)) {
2173 ret = -ENOSPC;
2174 goto out;
2178 ret = ocfs2_xattr_set_entry(inode, xi, xs, ctxt,
2179 (OCFS2_INLINE_XATTR_FL | OCFS2_HAS_XATTR_FL));
2180 out:
2181 up_write(&oi->ip_alloc_sem);
2183 return ret;
2187 * ocfs2_xattr_block_find()
2189 * Find extended attribute in external block and
2190 * fill search info into struct ocfs2_xattr_search.
2192 static int ocfs2_xattr_block_find(struct inode *inode,
2193 int name_index,
2194 const char *name,
2195 struct ocfs2_xattr_search *xs)
2197 struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
2198 struct buffer_head *blk_bh = NULL;
2199 struct ocfs2_xattr_block *xb;
2200 int ret = 0;
2202 if (!di->i_xattr_loc)
2203 return ret;
2205 ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc),
2206 &blk_bh);
2207 if (ret < 0) {
2208 mlog_errno(ret);
2209 return ret;
2212 xs->xattr_bh = blk_bh;
2213 xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
2215 if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
2216 xs->header = &xb->xb_attrs.xb_header;
2217 xs->base = (void *)xs->header;
2218 xs->end = (void *)(blk_bh->b_data) + blk_bh->b_size;
2219 xs->here = xs->header->xh_entries;
2221 ret = ocfs2_xattr_find_entry(name_index, name, xs);
2222 } else
2223 ret = ocfs2_xattr_index_block_find(inode, blk_bh,
2224 name_index,
2225 name, xs);
2227 if (ret && ret != -ENODATA) {
2228 xs->xattr_bh = NULL;
2229 goto cleanup;
2231 xs->not_found = ret;
2232 return 0;
2233 cleanup:
2234 brelse(blk_bh);
2236 return ret;
2239 static int ocfs2_create_xattr_block(handle_t *handle,
2240 struct inode *inode,
2241 struct buffer_head *inode_bh,
2242 struct ocfs2_alloc_context *meta_ac,
2243 struct buffer_head **ret_bh,
2244 int indexed)
2246 int ret;
2247 u16 suballoc_bit_start;
2248 u32 num_got;
2249 u64 first_blkno;
2250 struct ocfs2_dinode *di = (struct ocfs2_dinode *)inode_bh->b_data;
2251 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2252 struct buffer_head *new_bh = NULL;
2253 struct ocfs2_xattr_block *xblk;
2255 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), inode_bh,
2256 OCFS2_JOURNAL_ACCESS_CREATE);
2257 if (ret < 0) {
2258 mlog_errno(ret);
2259 goto end;
2262 ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1,
2263 &suballoc_bit_start, &num_got,
2264 &first_blkno);
2265 if (ret < 0) {
2266 mlog_errno(ret);
2267 goto end;
2270 new_bh = sb_getblk(inode->i_sb, first_blkno);
2271 ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
2273 ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode),
2274 new_bh,
2275 OCFS2_JOURNAL_ACCESS_CREATE);
2276 if (ret < 0) {
2277 mlog_errno(ret);
2278 goto end;
2281 /* Initialize ocfs2_xattr_block */
2282 xblk = (struct ocfs2_xattr_block *)new_bh->b_data;
2283 memset(xblk, 0, inode->i_sb->s_blocksize);
2284 strcpy((void *)xblk, OCFS2_XATTR_BLOCK_SIGNATURE);
2285 xblk->xb_suballoc_slot = cpu_to_le16(osb->slot_num);
2286 xblk->xb_suballoc_bit = cpu_to_le16(suballoc_bit_start);
2287 xblk->xb_fs_generation = cpu_to_le32(osb->fs_generation);
2288 xblk->xb_blkno = cpu_to_le64(first_blkno);
2290 if (indexed) {
2291 struct ocfs2_xattr_tree_root *xr = &xblk->xb_attrs.xb_root;
2292 xr->xt_clusters = cpu_to_le32(1);
2293 xr->xt_last_eb_blk = 0;
2294 xr->xt_list.l_tree_depth = 0;
2295 xr->xt_list.l_count = cpu_to_le16(
2296 ocfs2_xattr_recs_per_xb(inode->i_sb));
2297 xr->xt_list.l_next_free_rec = cpu_to_le16(1);
2298 xblk->xb_flags = cpu_to_le16(OCFS2_XATTR_INDEXED);
2301 ret = ocfs2_journal_dirty(handle, new_bh);
2302 if (ret < 0) {
2303 mlog_errno(ret);
2304 goto end;
2306 di->i_xattr_loc = cpu_to_le64(first_blkno);
2307 ocfs2_journal_dirty(handle, inode_bh);
2309 *ret_bh = new_bh;
2310 new_bh = NULL;
2312 end:
2313 brelse(new_bh);
2314 return ret;
2318 * ocfs2_xattr_block_set()
2320 * Set, replace or remove an extended attribute into external block.
2323 static int ocfs2_xattr_block_set(struct inode *inode,
2324 struct ocfs2_xattr_info *xi,
2325 struct ocfs2_xattr_search *xs,
2326 struct ocfs2_xattr_set_ctxt *ctxt)
2328 struct buffer_head *new_bh = NULL;
2329 handle_t *handle = ctxt->handle;
2330 struct ocfs2_xattr_block *xblk = NULL;
2331 int ret;
2333 if (!xs->xattr_bh) {
2334 ret = ocfs2_create_xattr_block(handle, inode, xs->inode_bh,
2335 ctxt->meta_ac, &new_bh, 0);
2336 if (ret) {
2337 mlog_errno(ret);
2338 goto end;
2341 xs->xattr_bh = new_bh;
2342 xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
2343 xs->header = &xblk->xb_attrs.xb_header;
2344 xs->base = (void *)xs->header;
2345 xs->end = (void *)xblk + inode->i_sb->s_blocksize;
2346 xs->here = xs->header->xh_entries;
2347 } else
2348 xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
2350 if (!(le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)) {
2351 /* Set extended attribute into external block */
2352 ret = ocfs2_xattr_set_entry(inode, xi, xs, ctxt,
2353 OCFS2_HAS_XATTR_FL);
2354 if (!ret || ret != -ENOSPC)
2355 goto end;
2357 ret = ocfs2_xattr_create_index_block(inode, xs, ctxt);
2358 if (ret)
2359 goto end;
2362 ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt);
2364 end:
2366 return ret;
2369 /* Check whether the new xattr can be inserted into the inode. */
2370 static int ocfs2_xattr_can_be_in_inode(struct inode *inode,
2371 struct ocfs2_xattr_info *xi,
2372 struct ocfs2_xattr_search *xs)
2374 u64 value_size;
2375 struct ocfs2_xattr_entry *last;
2376 int free, i;
2377 size_t min_offs = xs->end - xs->base;
2379 if (!xs->header)
2380 return 0;
2382 last = xs->header->xh_entries;
2384 for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) {
2385 size_t offs = le16_to_cpu(last->xe_name_offset);
2386 if (offs < min_offs)
2387 min_offs = offs;
2388 last += 1;
2391 free = min_offs - ((void *)last - xs->base) - OCFS2_XATTR_HEADER_GAP;
2392 if (free < 0)
2393 return 0;
2395 BUG_ON(!xs->not_found);
2397 if (xi->value_len > OCFS2_XATTR_INLINE_SIZE)
2398 value_size = OCFS2_XATTR_ROOT_SIZE;
2399 else
2400 value_size = OCFS2_XATTR_SIZE(xi->value_len);
2402 if (free >= sizeof(struct ocfs2_xattr_entry) +
2403 OCFS2_XATTR_SIZE(strlen(xi->name)) + value_size)
2404 return 1;
2406 return 0;
2409 static int ocfs2_calc_xattr_set_need(struct inode *inode,
2410 struct ocfs2_dinode *di,
2411 struct ocfs2_xattr_info *xi,
2412 struct ocfs2_xattr_search *xis,
2413 struct ocfs2_xattr_search *xbs,
2414 int *clusters_need,
2415 int *meta_need,
2416 int *credits_need)
2418 int ret = 0, old_in_xb = 0;
2419 int clusters_add = 0, meta_add = 0, credits = 0;
2420 struct buffer_head *bh = NULL;
2421 struct ocfs2_xattr_block *xb = NULL;
2422 struct ocfs2_xattr_entry *xe = NULL;
2423 struct ocfs2_xattr_value_root *xv = NULL;
2424 char *base = NULL;
2425 int name_offset, name_len = 0;
2426 u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb,
2427 xi->value_len);
2428 u64 value_size;
2431 * Calculate the clusters we need to write.
2432 * No matter whether we replace an old one or add a new one,
2433 * we need this for writing.
2435 if (xi->value_len > OCFS2_XATTR_INLINE_SIZE)
2436 credits += new_clusters *
2437 ocfs2_clusters_to_blocks(inode->i_sb, 1);
2439 if (xis->not_found && xbs->not_found) {
2440 credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
2442 if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
2443 clusters_add += new_clusters;
2444 credits += ocfs2_calc_extend_credits(inode->i_sb,
2445 &def_xv.xv.xr_list,
2446 new_clusters);
2449 goto meta_guess;
2452 if (!xis->not_found) {
2453 xe = xis->here;
2454 name_offset = le16_to_cpu(xe->xe_name_offset);
2455 name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
2456 base = xis->base;
2457 credits += OCFS2_INODE_UPDATE_CREDITS;
2458 } else {
2459 int i, block_off = 0;
2460 xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data;
2461 xe = xbs->here;
2462 name_offset = le16_to_cpu(xe->xe_name_offset);
2463 name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
2464 i = xbs->here - xbs->header->xh_entries;
2465 old_in_xb = 1;
2467 if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
2468 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
2469 bucket_xh(xbs->bucket),
2470 i, &block_off,
2471 &name_offset);
2472 base = bucket_block(xbs->bucket, block_off);
2473 credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
2474 } else {
2475 base = xbs->base;
2476 credits += OCFS2_XATTR_BLOCK_UPDATE_CREDITS;
2481 * delete a xattr doesn't need metadata and cluster allocation.
2482 * so just calculate the credits and return.
2484 * The credits for removing the value tree will be extended
2485 * by ocfs2_remove_extent itself.
2487 if (!xi->value) {
2488 if (!ocfs2_xattr_is_local(xe))
2489 credits += ocfs2_remove_extent_credits(inode->i_sb);
2491 goto out;
2494 /* do cluster allocation guess first. */
2495 value_size = le64_to_cpu(xe->xe_value_size);
2497 if (old_in_xb) {
2499 * In xattr set, we always try to set the xe in inode first,
2500 * so if it can be inserted into inode successfully, the old
2501 * one will be removed from the xattr block, and this xattr
2502 * will be inserted into inode as a new xattr in inode.
2504 if (ocfs2_xattr_can_be_in_inode(inode, xi, xis)) {
2505 clusters_add += new_clusters;
2506 credits += ocfs2_remove_extent_credits(inode->i_sb) +
2507 OCFS2_INODE_UPDATE_CREDITS;
2508 if (!ocfs2_xattr_is_local(xe))
2509 credits += ocfs2_calc_extend_credits(
2510 inode->i_sb,
2511 &def_xv.xv.xr_list,
2512 new_clusters);
2513 goto out;
2517 if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
2518 /* the new values will be stored outside. */
2519 u32 old_clusters = 0;
2521 if (!ocfs2_xattr_is_local(xe)) {
2522 old_clusters = ocfs2_clusters_for_bytes(inode->i_sb,
2523 value_size);
2524 xv = (struct ocfs2_xattr_value_root *)
2525 (base + name_offset + name_len);
2526 value_size = OCFS2_XATTR_ROOT_SIZE;
2527 } else
2528 xv = &def_xv.xv;
2530 if (old_clusters >= new_clusters) {
2531 credits += ocfs2_remove_extent_credits(inode->i_sb);
2532 goto out;
2533 } else {
2534 meta_add += ocfs2_extend_meta_needed(&xv->xr_list);
2535 clusters_add += new_clusters - old_clusters;
2536 credits += ocfs2_calc_extend_credits(inode->i_sb,
2537 &xv->xr_list,
2538 new_clusters -
2539 old_clusters);
2540 if (value_size >= OCFS2_XATTR_ROOT_SIZE)
2541 goto out;
2543 } else {
2545 * Now the new value will be stored inside. So if the new
2546 * value is smaller than the size of value root or the old
2547 * value, we don't need any allocation, otherwise we have
2548 * to guess metadata allocation.
2550 if ((ocfs2_xattr_is_local(xe) && value_size >= xi->value_len) ||
2551 (!ocfs2_xattr_is_local(xe) &&
2552 OCFS2_XATTR_ROOT_SIZE >= xi->value_len))
2553 goto out;
2556 meta_guess:
2557 /* calculate metadata allocation. */
2558 if (di->i_xattr_loc) {
2559 if (!xbs->xattr_bh) {
2560 ret = ocfs2_read_xattr_block(inode,
2561 le64_to_cpu(di->i_xattr_loc),
2562 &bh);
2563 if (ret) {
2564 mlog_errno(ret);
2565 goto out;
2568 xb = (struct ocfs2_xattr_block *)bh->b_data;
2569 } else
2570 xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data;
2573 * If there is already an xattr tree, good, we can calculate
2574 * like other b-trees. Otherwise we may have the chance of
2575 * create a tree, the credit calculation is borrowed from
2576 * ocfs2_calc_extend_credits with root_el = NULL. And the
2577 * new tree will be cluster based, so no meta is needed.
2579 if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
2580 struct ocfs2_extent_list *el =
2581 &xb->xb_attrs.xb_root.xt_list;
2582 meta_add += ocfs2_extend_meta_needed(el);
2583 credits += ocfs2_calc_extend_credits(inode->i_sb,
2584 el, 1);
2585 } else
2586 credits += OCFS2_SUBALLOC_ALLOC + 1;
2589 * This cluster will be used either for new bucket or for
2590 * new xattr block.
2591 * If the cluster size is the same as the bucket size, one
2592 * more is needed since we may need to extend the bucket
2593 * also.
2595 clusters_add += 1;
2596 credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
2597 if (OCFS2_XATTR_BUCKET_SIZE ==
2598 OCFS2_SB(inode->i_sb)->s_clustersize) {
2599 credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
2600 clusters_add += 1;
2602 } else {
2603 meta_add += 1;
2604 credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS;
2606 out:
2607 if (clusters_need)
2608 *clusters_need = clusters_add;
2609 if (meta_need)
2610 *meta_need = meta_add;
2611 if (credits_need)
2612 *credits_need = credits;
2613 brelse(bh);
2614 return ret;
2617 static int ocfs2_init_xattr_set_ctxt(struct inode *inode,
2618 struct ocfs2_dinode *di,
2619 struct ocfs2_xattr_info *xi,
2620 struct ocfs2_xattr_search *xis,
2621 struct ocfs2_xattr_search *xbs,
2622 struct ocfs2_xattr_set_ctxt *ctxt,
2623 int extra_meta,
2624 int *credits)
2626 int clusters_add, meta_add, ret;
2627 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2629 memset(ctxt, 0, sizeof(struct ocfs2_xattr_set_ctxt));
2631 ocfs2_init_dealloc_ctxt(&ctxt->dealloc);
2633 ret = ocfs2_calc_xattr_set_need(inode, di, xi, xis, xbs,
2634 &clusters_add, &meta_add, credits);
2635 if (ret) {
2636 mlog_errno(ret);
2637 return ret;
2640 meta_add += extra_meta;
2641 mlog(0, "Set xattr %s, reserve meta blocks = %d, clusters = %d, "
2642 "credits = %d\n", xi->name, meta_add, clusters_add, *credits);
2644 if (meta_add) {
2645 ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add,
2646 &ctxt->meta_ac);
2647 if (ret) {
2648 mlog_errno(ret);
2649 goto out;
2653 if (clusters_add) {
2654 ret = ocfs2_reserve_clusters(osb, clusters_add, &ctxt->data_ac);
2655 if (ret)
2656 mlog_errno(ret);
2658 out:
2659 if (ret) {
2660 if (ctxt->meta_ac) {
2661 ocfs2_free_alloc_context(ctxt->meta_ac);
2662 ctxt->meta_ac = NULL;
2666 * We cannot have an error and a non null ctxt->data_ac.
2670 return ret;
2673 static int __ocfs2_xattr_set_handle(struct inode *inode,
2674 struct ocfs2_dinode *di,
2675 struct ocfs2_xattr_info *xi,
2676 struct ocfs2_xattr_search *xis,
2677 struct ocfs2_xattr_search *xbs,
2678 struct ocfs2_xattr_set_ctxt *ctxt)
2680 int ret = 0, credits, old_found;
2682 if (!xi->value) {
2683 /* Remove existing extended attribute */
2684 if (!xis->not_found)
2685 ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt);
2686 else if (!xbs->not_found)
2687 ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt);
2688 } else {
2689 /* We always try to set extended attribute into inode first*/
2690 ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt);
2691 if (!ret && !xbs->not_found) {
2693 * If succeed and that extended attribute existing in
2694 * external block, then we will remove it.
2696 xi->value = NULL;
2697 xi->value_len = 0;
2699 old_found = xis->not_found;
2700 xis->not_found = -ENODATA;
2701 ret = ocfs2_calc_xattr_set_need(inode,
2704 xis,
2705 xbs,
2706 NULL,
2707 NULL,
2708 &credits);
2709 xis->not_found = old_found;
2710 if (ret) {
2711 mlog_errno(ret);
2712 goto out;
2715 ret = ocfs2_extend_trans(ctxt->handle, credits +
2716 ctxt->handle->h_buffer_credits);
2717 if (ret) {
2718 mlog_errno(ret);
2719 goto out;
2721 ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt);
2722 } else if (ret == -ENOSPC) {
2723 if (di->i_xattr_loc && !xbs->xattr_bh) {
2724 ret = ocfs2_xattr_block_find(inode,
2725 xi->name_index,
2726 xi->name, xbs);
2727 if (ret)
2728 goto out;
2730 old_found = xis->not_found;
2731 xis->not_found = -ENODATA;
2732 ret = ocfs2_calc_xattr_set_need(inode,
2735 xis,
2736 xbs,
2737 NULL,
2738 NULL,
2739 &credits);
2740 xis->not_found = old_found;
2741 if (ret) {
2742 mlog_errno(ret);
2743 goto out;
2746 ret = ocfs2_extend_trans(ctxt->handle, credits +
2747 ctxt->handle->h_buffer_credits);
2748 if (ret) {
2749 mlog_errno(ret);
2750 goto out;
2754 * If no space in inode, we will set extended attribute
2755 * into external block.
2757 ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt);
2758 if (ret)
2759 goto out;
2760 if (!xis->not_found) {
2762 * If succeed and that extended attribute
2763 * existing in inode, we will remove it.
2765 xi->value = NULL;
2766 xi->value_len = 0;
2767 xbs->not_found = -ENODATA;
2768 ret = ocfs2_calc_xattr_set_need(inode,
2771 xis,
2772 xbs,
2773 NULL,
2774 NULL,
2775 &credits);
2776 if (ret) {
2777 mlog_errno(ret);
2778 goto out;
2781 ret = ocfs2_extend_trans(ctxt->handle, credits +
2782 ctxt->handle->h_buffer_credits);
2783 if (ret) {
2784 mlog_errno(ret);
2785 goto out;
2787 ret = ocfs2_xattr_ibody_set(inode, xi,
2788 xis, ctxt);
2793 if (!ret) {
2794 /* Update inode ctime. */
2795 ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode),
2796 xis->inode_bh,
2797 OCFS2_JOURNAL_ACCESS_WRITE);
2798 if (ret) {
2799 mlog_errno(ret);
2800 goto out;
2803 inode->i_ctime = CURRENT_TIME;
2804 di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
2805 di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
2806 ocfs2_journal_dirty(ctxt->handle, xis->inode_bh);
2808 out:
2809 return ret;
2813 * This function only called duing creating inode
2814 * for init security/acl xattrs of the new inode.
2815 * All transanction credits have been reserved in mknod.
2817 int ocfs2_xattr_set_handle(handle_t *handle,
2818 struct inode *inode,
2819 struct buffer_head *di_bh,
2820 int name_index,
2821 const char *name,
2822 const void *value,
2823 size_t value_len,
2824 int flags,
2825 struct ocfs2_alloc_context *meta_ac,
2826 struct ocfs2_alloc_context *data_ac)
2828 struct ocfs2_dinode *di;
2829 int ret;
2831 struct ocfs2_xattr_info xi = {
2832 .name_index = name_index,
2833 .name = name,
2834 .value = value,
2835 .value_len = value_len,
2838 struct ocfs2_xattr_search xis = {
2839 .not_found = -ENODATA,
2842 struct ocfs2_xattr_search xbs = {
2843 .not_found = -ENODATA,
2846 struct ocfs2_xattr_set_ctxt ctxt = {
2847 .handle = handle,
2848 .meta_ac = meta_ac,
2849 .data_ac = data_ac,
2852 if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
2853 return -EOPNOTSUPP;
2856 * In extreme situation, may need xattr bucket when
2857 * block size is too small. And we have already reserved
2858 * the credits for bucket in mknod.
2860 if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE) {
2861 xbs.bucket = ocfs2_xattr_bucket_new(inode);
2862 if (!xbs.bucket) {
2863 mlog_errno(-ENOMEM);
2864 return -ENOMEM;
2868 xis.inode_bh = xbs.inode_bh = di_bh;
2869 di = (struct ocfs2_dinode *)di_bh->b_data;
2871 down_write(&OCFS2_I(inode)->ip_xattr_sem);
2873 ret = ocfs2_xattr_ibody_find(inode, name_index, name, &xis);
2874 if (ret)
2875 goto cleanup;
2876 if (xis.not_found) {
2877 ret = ocfs2_xattr_block_find(inode, name_index, name, &xbs);
2878 if (ret)
2879 goto cleanup;
2882 ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt);
2884 cleanup:
2885 up_write(&OCFS2_I(inode)->ip_xattr_sem);
2886 brelse(xbs.xattr_bh);
2887 ocfs2_xattr_bucket_free(xbs.bucket);
2889 return ret;
2893 * ocfs2_xattr_set()
2895 * Set, replace or remove an extended attribute for this inode.
2896 * value is NULL to remove an existing extended attribute, else either
2897 * create or replace an extended attribute.
2899 int ocfs2_xattr_set(struct inode *inode,
2900 int name_index,
2901 const char *name,
2902 const void *value,
2903 size_t value_len,
2904 int flags)
2906 struct buffer_head *di_bh = NULL;
2907 struct ocfs2_dinode *di;
2908 int ret, credits, ref_meta = 0, ref_credits = 0;
2909 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2910 struct inode *tl_inode = osb->osb_tl_inode;
2911 struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, };
2912 struct ocfs2_refcount_tree *ref_tree = NULL;
2914 struct ocfs2_xattr_info xi = {
2915 .name_index = name_index,
2916 .name = name,
2917 .value = value,
2918 .value_len = value_len,
2921 struct ocfs2_xattr_search xis = {
2922 .not_found = -ENODATA,
2925 struct ocfs2_xattr_search xbs = {
2926 .not_found = -ENODATA,
2929 if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
2930 return -EOPNOTSUPP;
2933 * Only xbs will be used on indexed trees. xis doesn't need a
2934 * bucket.
2936 xbs.bucket = ocfs2_xattr_bucket_new(inode);
2937 if (!xbs.bucket) {
2938 mlog_errno(-ENOMEM);
2939 return -ENOMEM;
2942 ret = ocfs2_inode_lock(inode, &di_bh, 1);
2943 if (ret < 0) {
2944 mlog_errno(ret);
2945 goto cleanup_nolock;
2947 xis.inode_bh = xbs.inode_bh = di_bh;
2948 di = (struct ocfs2_dinode *)di_bh->b_data;
2950 down_write(&OCFS2_I(inode)->ip_xattr_sem);
2952 * Scan inode and external block to find the same name
2953 * extended attribute and collect search infomation.
2955 ret = ocfs2_xattr_ibody_find(inode, name_index, name, &xis);
2956 if (ret)
2957 goto cleanup;
2958 if (xis.not_found) {
2959 ret = ocfs2_xattr_block_find(inode, name_index, name, &xbs);
2960 if (ret)
2961 goto cleanup;
2964 if (xis.not_found && xbs.not_found) {
2965 ret = -ENODATA;
2966 if (flags & XATTR_REPLACE)
2967 goto cleanup;
2968 ret = 0;
2969 if (!value)
2970 goto cleanup;
2971 } else {
2972 ret = -EEXIST;
2973 if (flags & XATTR_CREATE)
2974 goto cleanup;
2977 /* Check whether the value is refcounted and do some prepartion. */
2978 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL &&
2979 (!xis.not_found || !xbs.not_found)) {
2980 ret = ocfs2_prepare_refcount_xattr(inode, di, &xi,
2981 &xis, &xbs, &ref_tree,
2982 &ref_meta, &ref_credits);
2983 if (ret) {
2984 mlog_errno(ret);
2985 goto cleanup;
2989 mutex_lock(&tl_inode->i_mutex);
2991 if (ocfs2_truncate_log_needs_flush(osb)) {
2992 ret = __ocfs2_flush_truncate_log(osb);
2993 if (ret < 0) {
2994 mutex_unlock(&tl_inode->i_mutex);
2995 mlog_errno(ret);
2996 goto cleanup;
2999 mutex_unlock(&tl_inode->i_mutex);
3001 ret = ocfs2_init_xattr_set_ctxt(inode, di, &xi, &xis,
3002 &xbs, &ctxt, ref_meta, &credits);
3003 if (ret) {
3004 mlog_errno(ret);
3005 goto cleanup;
3008 /* we need to update inode's ctime field, so add credit for it. */
3009 credits += OCFS2_INODE_UPDATE_CREDITS;
3010 ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits);
3011 if (IS_ERR(ctxt.handle)) {
3012 ret = PTR_ERR(ctxt.handle);
3013 mlog_errno(ret);
3014 goto cleanup;
3017 ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt);
3019 ocfs2_commit_trans(osb, ctxt.handle);
3021 if (ctxt.data_ac)
3022 ocfs2_free_alloc_context(ctxt.data_ac);
3023 if (ctxt.meta_ac)
3024 ocfs2_free_alloc_context(ctxt.meta_ac);
3025 if (ocfs2_dealloc_has_cluster(&ctxt.dealloc))
3026 ocfs2_schedule_truncate_log_flush(osb, 1);
3027 ocfs2_run_deallocs(osb, &ctxt.dealloc);
3029 cleanup:
3030 if (ref_tree)
3031 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
3032 up_write(&OCFS2_I(inode)->ip_xattr_sem);
3033 if (!value && !ret) {
3034 ret = ocfs2_try_remove_refcount_tree(inode, di_bh);
3035 if (ret)
3036 mlog_errno(ret);
3038 ocfs2_inode_unlock(inode, 1);
3039 cleanup_nolock:
3040 brelse(di_bh);
3041 brelse(xbs.xattr_bh);
3042 ocfs2_xattr_bucket_free(xbs.bucket);
3044 return ret;
3048 * Find the xattr extent rec which may contains name_hash.
3049 * e_cpos will be the first name hash of the xattr rec.
3050 * el must be the ocfs2_xattr_header.xb_attrs.xb_root.xt_list.
3052 static int ocfs2_xattr_get_rec(struct inode *inode,
3053 u32 name_hash,
3054 u64 *p_blkno,
3055 u32 *e_cpos,
3056 u32 *num_clusters,
3057 struct ocfs2_extent_list *el)
3059 int ret = 0, i;
3060 struct buffer_head *eb_bh = NULL;
3061 struct ocfs2_extent_block *eb;
3062 struct ocfs2_extent_rec *rec = NULL;
3063 u64 e_blkno = 0;
3065 if (el->l_tree_depth) {
3066 ret = ocfs2_find_leaf(INODE_CACHE(inode), el, name_hash,
3067 &eb_bh);
3068 if (ret) {
3069 mlog_errno(ret);
3070 goto out;
3073 eb = (struct ocfs2_extent_block *) eb_bh->b_data;
3074 el = &eb->h_list;
3076 if (el->l_tree_depth) {
3077 ocfs2_error(inode->i_sb,
3078 "Inode %lu has non zero tree depth in "
3079 "xattr tree block %llu\n", inode->i_ino,
3080 (unsigned long long)eb_bh->b_blocknr);
3081 ret = -EROFS;
3082 goto out;
3086 for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
3087 rec = &el->l_recs[i];
3089 if (le32_to_cpu(rec->e_cpos) <= name_hash) {
3090 e_blkno = le64_to_cpu(rec->e_blkno);
3091 break;
3095 if (!e_blkno) {
3096 ocfs2_error(inode->i_sb, "Inode %lu has bad extent "
3097 "record (%u, %u, 0) in xattr", inode->i_ino,
3098 le32_to_cpu(rec->e_cpos),
3099 ocfs2_rec_clusters(el, rec));
3100 ret = -EROFS;
3101 goto out;
3104 *p_blkno = le64_to_cpu(rec->e_blkno);
3105 *num_clusters = le16_to_cpu(rec->e_leaf_clusters);
3106 if (e_cpos)
3107 *e_cpos = le32_to_cpu(rec->e_cpos);
3108 out:
3109 brelse(eb_bh);
3110 return ret;
3113 typedef int (xattr_bucket_func)(struct inode *inode,
3114 struct ocfs2_xattr_bucket *bucket,
3115 void *para);
3117 static int ocfs2_find_xe_in_bucket(struct inode *inode,
3118 struct ocfs2_xattr_bucket *bucket,
3119 int name_index,
3120 const char *name,
3121 u32 name_hash,
3122 u16 *xe_index,
3123 int *found)
3125 int i, ret = 0, cmp = 1, block_off, new_offset;
3126 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
3127 size_t name_len = strlen(name);
3128 struct ocfs2_xattr_entry *xe = NULL;
3129 char *xe_name;
3132 * We don't use binary search in the bucket because there
3133 * may be multiple entries with the same name hash.
3135 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
3136 xe = &xh->xh_entries[i];
3138 if (name_hash > le32_to_cpu(xe->xe_name_hash))
3139 continue;
3140 else if (name_hash < le32_to_cpu(xe->xe_name_hash))
3141 break;
3143 cmp = name_index - ocfs2_xattr_get_type(xe);
3144 if (!cmp)
3145 cmp = name_len - xe->xe_name_len;
3146 if (cmp)
3147 continue;
3149 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
3152 &block_off,
3153 &new_offset);
3154 if (ret) {
3155 mlog_errno(ret);
3156 break;
3160 xe_name = bucket_block(bucket, block_off) + new_offset;
3161 if (!memcmp(name, xe_name, name_len)) {
3162 *xe_index = i;
3163 *found = 1;
3164 ret = 0;
3165 break;
3169 return ret;
3173 * Find the specified xattr entry in a series of buckets.
3174 * This series start from p_blkno and last for num_clusters.
3175 * The ocfs2_xattr_header.xh_num_buckets of the first bucket contains
3176 * the num of the valid buckets.
3178 * Return the buffer_head this xattr should reside in. And if the xattr's
3179 * hash is in the gap of 2 buckets, return the lower bucket.
3181 static int ocfs2_xattr_bucket_find(struct inode *inode,
3182 int name_index,
3183 const char *name,
3184 u32 name_hash,
3185 u64 p_blkno,
3186 u32 first_hash,
3187 u32 num_clusters,
3188 struct ocfs2_xattr_search *xs)
3190 int ret, found = 0;
3191 struct ocfs2_xattr_header *xh = NULL;
3192 struct ocfs2_xattr_entry *xe = NULL;
3193 u16 index = 0;
3194 u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
3195 int low_bucket = 0, bucket, high_bucket;
3196 struct ocfs2_xattr_bucket *search;
3197 u32 last_hash;
3198 u64 blkno, lower_blkno = 0;
3200 search = ocfs2_xattr_bucket_new(inode);
3201 if (!search) {
3202 ret = -ENOMEM;
3203 mlog_errno(ret);
3204 goto out;
3207 ret = ocfs2_read_xattr_bucket(search, p_blkno);
3208 if (ret) {
3209 mlog_errno(ret);
3210 goto out;
3213 xh = bucket_xh(search);
3214 high_bucket = le16_to_cpu(xh->xh_num_buckets) - 1;
3215 while (low_bucket <= high_bucket) {
3216 ocfs2_xattr_bucket_relse(search);
3218 bucket = (low_bucket + high_bucket) / 2;
3219 blkno = p_blkno + bucket * blk_per_bucket;
3220 ret = ocfs2_read_xattr_bucket(search, blkno);
3221 if (ret) {
3222 mlog_errno(ret);
3223 goto out;
3226 xh = bucket_xh(search);
3227 xe = &xh->xh_entries[0];
3228 if (name_hash < le32_to_cpu(xe->xe_name_hash)) {
3229 high_bucket = bucket - 1;
3230 continue;
3234 * Check whether the hash of the last entry in our
3235 * bucket is larger than the search one. for an empty
3236 * bucket, the last one is also the first one.
3238 if (xh->xh_count)
3239 xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
3241 last_hash = le32_to_cpu(xe->xe_name_hash);
3243 /* record lower_blkno which may be the insert place. */
3244 lower_blkno = blkno;
3246 if (name_hash > le32_to_cpu(xe->xe_name_hash)) {
3247 low_bucket = bucket + 1;
3248 continue;
3251 /* the searched xattr should reside in this bucket if exists. */
3252 ret = ocfs2_find_xe_in_bucket(inode, search,
3253 name_index, name, name_hash,
3254 &index, &found);
3255 if (ret) {
3256 mlog_errno(ret);
3257 goto out;
3259 break;
3263 * Record the bucket we have found.
3264 * When the xattr's hash value is in the gap of 2 buckets, we will
3265 * always set it to the previous bucket.
3267 if (!lower_blkno)
3268 lower_blkno = p_blkno;
3270 /* This should be in cache - we just read it during the search */
3271 ret = ocfs2_read_xattr_bucket(xs->bucket, lower_blkno);
3272 if (ret) {
3273 mlog_errno(ret);
3274 goto out;
3277 xs->header = bucket_xh(xs->bucket);
3278 xs->base = bucket_block(xs->bucket, 0);
3279 xs->end = xs->base + inode->i_sb->s_blocksize;
3281 if (found) {
3282 xs->here = &xs->header->xh_entries[index];
3283 mlog(0, "find xattr %s in bucket %llu, entry = %u\n", name,
3284 (unsigned long long)bucket_blkno(xs->bucket), index);
3285 } else
3286 ret = -ENODATA;
3288 out:
3289 ocfs2_xattr_bucket_free(search);
3290 return ret;
3293 static int ocfs2_xattr_index_block_find(struct inode *inode,
3294 struct buffer_head *root_bh,
3295 int name_index,
3296 const char *name,
3297 struct ocfs2_xattr_search *xs)
3299 int ret;
3300 struct ocfs2_xattr_block *xb =
3301 (struct ocfs2_xattr_block *)root_bh->b_data;
3302 struct ocfs2_xattr_tree_root *xb_root = &xb->xb_attrs.xb_root;
3303 struct ocfs2_extent_list *el = &xb_root->xt_list;
3304 u64 p_blkno = 0;
3305 u32 first_hash, num_clusters = 0;
3306 u32 name_hash = ocfs2_xattr_name_hash(inode, name, strlen(name));
3308 if (le16_to_cpu(el->l_next_free_rec) == 0)
3309 return -ENODATA;
3311 mlog(0, "find xattr %s, hash = %u, index = %d in xattr tree\n",
3312 name, name_hash, name_index);
3314 ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &first_hash,
3315 &num_clusters, el);
3316 if (ret) {
3317 mlog_errno(ret);
3318 goto out;
3321 BUG_ON(p_blkno == 0 || num_clusters == 0 || first_hash > name_hash);
3323 mlog(0, "find xattr extent rec %u clusters from %llu, the first hash "
3324 "in the rec is %u\n", num_clusters, (unsigned long long)p_blkno,
3325 first_hash);
3327 ret = ocfs2_xattr_bucket_find(inode, name_index, name, name_hash,
3328 p_blkno, first_hash, num_clusters, xs);
3330 out:
3331 return ret;
3334 static int ocfs2_iterate_xattr_buckets(struct inode *inode,
3335 u64 blkno,
3336 u32 clusters,
3337 xattr_bucket_func *func,
3338 void *para)
3340 int i, ret = 0;
3341 u32 bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb));
3342 u32 num_buckets = clusters * bpc;
3343 struct ocfs2_xattr_bucket *bucket;
3345 bucket = ocfs2_xattr_bucket_new(inode);
3346 if (!bucket) {
3347 mlog_errno(-ENOMEM);
3348 return -ENOMEM;
3351 mlog(0, "iterating xattr buckets in %u clusters starting from %llu\n",
3352 clusters, (unsigned long long)blkno);
3354 for (i = 0; i < num_buckets; i++, blkno += bucket->bu_blocks) {
3355 ret = ocfs2_read_xattr_bucket(bucket, blkno);
3356 if (ret) {
3357 mlog_errno(ret);
3358 break;
3362 * The real bucket num in this series of blocks is stored
3363 * in the 1st bucket.
3365 if (i == 0)
3366 num_buckets = le16_to_cpu(bucket_xh(bucket)->xh_num_buckets);
3368 mlog(0, "iterating xattr bucket %llu, first hash %u\n",
3369 (unsigned long long)blkno,
3370 le32_to_cpu(bucket_xh(bucket)->xh_entries[0].xe_name_hash));
3371 if (func) {
3372 ret = func(inode, bucket, para);
3373 if (ret && ret != -ERANGE)
3374 mlog_errno(ret);
3375 /* Fall through to bucket_relse() */
3378 ocfs2_xattr_bucket_relse(bucket);
3379 if (ret)
3380 break;
3383 ocfs2_xattr_bucket_free(bucket);
3384 return ret;
3387 struct ocfs2_xattr_tree_list {
3388 char *buffer;
3389 size_t buffer_size;
3390 size_t result;
3393 static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb,
3394 struct ocfs2_xattr_header *xh,
3395 int index,
3396 int *block_off,
3397 int *new_offset)
3399 u16 name_offset;
3401 if (index < 0 || index >= le16_to_cpu(xh->xh_count))
3402 return -EINVAL;
3404 name_offset = le16_to_cpu(xh->xh_entries[index].xe_name_offset);
3406 *block_off = name_offset >> sb->s_blocksize_bits;
3407 *new_offset = name_offset % sb->s_blocksize;
3409 return 0;
3412 static int ocfs2_list_xattr_bucket(struct inode *inode,
3413 struct ocfs2_xattr_bucket *bucket,
3414 void *para)
3416 int ret = 0, type;
3417 struct ocfs2_xattr_tree_list *xl = (struct ocfs2_xattr_tree_list *)para;
3418 int i, block_off, new_offset;
3419 const char *prefix, *name;
3421 for (i = 0 ; i < le16_to_cpu(bucket_xh(bucket)->xh_count); i++) {
3422 struct ocfs2_xattr_entry *entry = &bucket_xh(bucket)->xh_entries[i];
3423 type = ocfs2_xattr_get_type(entry);
3424 prefix = ocfs2_xattr_prefix(type);
3426 if (prefix) {
3427 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
3428 bucket_xh(bucket),
3430 &block_off,
3431 &new_offset);
3432 if (ret)
3433 break;
3435 name = (const char *)bucket_block(bucket, block_off) +
3436 new_offset;
3437 ret = ocfs2_xattr_list_entry(xl->buffer,
3438 xl->buffer_size,
3439 &xl->result,
3440 prefix, name,
3441 entry->xe_name_len);
3442 if (ret)
3443 break;
3447 return ret;
3450 static int ocfs2_iterate_xattr_index_block(struct inode *inode,
3451 struct buffer_head *blk_bh,
3452 xattr_tree_rec_func *rec_func,
3453 void *para)
3455 struct ocfs2_xattr_block *xb =
3456 (struct ocfs2_xattr_block *)blk_bh->b_data;
3457 struct ocfs2_extent_list *el = &xb->xb_attrs.xb_root.xt_list;
3458 int ret = 0;
3459 u32 name_hash = UINT_MAX, e_cpos = 0, num_clusters = 0;
3460 u64 p_blkno = 0;
3462 if (!el->l_next_free_rec || !rec_func)
3463 return 0;
3465 while (name_hash > 0) {
3466 ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno,
3467 &e_cpos, &num_clusters, el);
3468 if (ret) {
3469 mlog_errno(ret);
3470 break;
3473 ret = rec_func(inode, blk_bh, p_blkno, e_cpos,
3474 num_clusters, para);
3475 if (ret) {
3476 if (ret != -ERANGE)
3477 mlog_errno(ret);
3478 break;
3481 if (e_cpos == 0)
3482 break;
3484 name_hash = e_cpos - 1;
3487 return ret;
3491 static int ocfs2_list_xattr_tree_rec(struct inode *inode,
3492 struct buffer_head *root_bh,
3493 u64 blkno, u32 cpos, u32 len, void *para)
3495 return ocfs2_iterate_xattr_buckets(inode, blkno, len,
3496 ocfs2_list_xattr_bucket, para);
3499 static int ocfs2_xattr_tree_list_index_block(struct inode *inode,
3500 struct buffer_head *blk_bh,
3501 char *buffer,
3502 size_t buffer_size)
3504 int ret;
3505 struct ocfs2_xattr_tree_list xl = {
3506 .buffer = buffer,
3507 .buffer_size = buffer_size,
3508 .result = 0,
3511 ret = ocfs2_iterate_xattr_index_block(inode, blk_bh,
3512 ocfs2_list_xattr_tree_rec, &xl);
3513 if (ret) {
3514 mlog_errno(ret);
3515 goto out;
3518 ret = xl.result;
3519 out:
3520 return ret;
3523 static int cmp_xe(const void *a, const void *b)
3525 const struct ocfs2_xattr_entry *l = a, *r = b;
3526 u32 l_hash = le32_to_cpu(l->xe_name_hash);
3527 u32 r_hash = le32_to_cpu(r->xe_name_hash);
3529 if (l_hash > r_hash)
3530 return 1;
3531 if (l_hash < r_hash)
3532 return -1;
3533 return 0;
3536 static void swap_xe(void *a, void *b, int size)
3538 struct ocfs2_xattr_entry *l = a, *r = b, tmp;
3540 tmp = *l;
3541 memcpy(l, r, sizeof(struct ocfs2_xattr_entry));
3542 memcpy(r, &tmp, sizeof(struct ocfs2_xattr_entry));
3546 * When the ocfs2_xattr_block is filled up, new bucket will be created
3547 * and all the xattr entries will be moved to the new bucket.
3548 * The header goes at the start of the bucket, and the names+values are
3549 * filled from the end. This is why *target starts as the last buffer.
3550 * Note: we need to sort the entries since they are not saved in order
3551 * in the ocfs2_xattr_block.
3553 static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode,
3554 struct buffer_head *xb_bh,
3555 struct ocfs2_xattr_bucket *bucket)
3557 int i, blocksize = inode->i_sb->s_blocksize;
3558 int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
3559 u16 offset, size, off_change;
3560 struct ocfs2_xattr_entry *xe;
3561 struct ocfs2_xattr_block *xb =
3562 (struct ocfs2_xattr_block *)xb_bh->b_data;
3563 struct ocfs2_xattr_header *xb_xh = &xb->xb_attrs.xb_header;
3564 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
3565 u16 count = le16_to_cpu(xb_xh->xh_count);
3566 char *src = xb_bh->b_data;
3567 char *target = bucket_block(bucket, blks - 1);
3569 mlog(0, "cp xattr from block %llu to bucket %llu\n",
3570 (unsigned long long)xb_bh->b_blocknr,
3571 (unsigned long long)bucket_blkno(bucket));
3573 for (i = 0; i < blks; i++)
3574 memset(bucket_block(bucket, i), 0, blocksize);
3577 * Since the xe_name_offset is based on ocfs2_xattr_header,
3578 * there is a offset change corresponding to the change of
3579 * ocfs2_xattr_header's position.
3581 off_change = offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header);
3582 xe = &xb_xh->xh_entries[count - 1];
3583 offset = le16_to_cpu(xe->xe_name_offset) + off_change;
3584 size = blocksize - offset;
3586 /* copy all the names and values. */
3587 memcpy(target + offset, src + offset, size);
3589 /* Init new header now. */
3590 xh->xh_count = xb_xh->xh_count;
3591 xh->xh_num_buckets = cpu_to_le16(1);
3592 xh->xh_name_value_len = cpu_to_le16(size);
3593 xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE - size);
3595 /* copy all the entries. */
3596 target = bucket_block(bucket, 0);
3597 offset = offsetof(struct ocfs2_xattr_header, xh_entries);
3598 size = count * sizeof(struct ocfs2_xattr_entry);
3599 memcpy(target + offset, (char *)xb_xh + offset, size);
3601 /* Change the xe offset for all the xe because of the move. */
3602 off_change = OCFS2_XATTR_BUCKET_SIZE - blocksize +
3603 offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header);
3604 for (i = 0; i < count; i++)
3605 le16_add_cpu(&xh->xh_entries[i].xe_name_offset, off_change);
3607 mlog(0, "copy entry: start = %u, size = %u, offset_change = %u\n",
3608 offset, size, off_change);
3610 sort(target + offset, count, sizeof(struct ocfs2_xattr_entry),
3611 cmp_xe, swap_xe);
3615 * After we move xattr from block to index btree, we have to
3616 * update ocfs2_xattr_search to the new xe and base.
3618 * When the entry is in xattr block, xattr_bh indicates the storage place.
3619 * While if the entry is in index b-tree, "bucket" indicates the
3620 * real place of the xattr.
3622 static void ocfs2_xattr_update_xattr_search(struct inode *inode,
3623 struct ocfs2_xattr_search *xs,
3624 struct buffer_head *old_bh)
3626 char *buf = old_bh->b_data;
3627 struct ocfs2_xattr_block *old_xb = (struct ocfs2_xattr_block *)buf;
3628 struct ocfs2_xattr_header *old_xh = &old_xb->xb_attrs.xb_header;
3629 int i;
3631 xs->header = bucket_xh(xs->bucket);
3632 xs->base = bucket_block(xs->bucket, 0);
3633 xs->end = xs->base + inode->i_sb->s_blocksize;
3635 if (xs->not_found)
3636 return;
3638 i = xs->here - old_xh->xh_entries;
3639 xs->here = &xs->header->xh_entries[i];
3642 static int ocfs2_xattr_create_index_block(struct inode *inode,
3643 struct ocfs2_xattr_search *xs,
3644 struct ocfs2_xattr_set_ctxt *ctxt)
3646 int ret;
3647 u32 bit_off, len;
3648 u64 blkno;
3649 handle_t *handle = ctxt->handle;
3650 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3651 struct ocfs2_inode_info *oi = OCFS2_I(inode);
3652 struct buffer_head *xb_bh = xs->xattr_bh;
3653 struct ocfs2_xattr_block *xb =
3654 (struct ocfs2_xattr_block *)xb_bh->b_data;
3655 struct ocfs2_xattr_tree_root *xr;
3656 u16 xb_flags = le16_to_cpu(xb->xb_flags);
3658 mlog(0, "create xattr index block for %llu\n",
3659 (unsigned long long)xb_bh->b_blocknr);
3661 BUG_ON(xb_flags & OCFS2_XATTR_INDEXED);
3662 BUG_ON(!xs->bucket);
3665 * XXX:
3666 * We can use this lock for now, and maybe move to a dedicated mutex
3667 * if performance becomes a problem later.
3669 down_write(&oi->ip_alloc_sem);
3671 ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), xb_bh,
3672 OCFS2_JOURNAL_ACCESS_WRITE);
3673 if (ret) {
3674 mlog_errno(ret);
3675 goto out;
3678 ret = __ocfs2_claim_clusters(osb, handle, ctxt->data_ac,
3679 1, 1, &bit_off, &len);
3680 if (ret) {
3681 mlog_errno(ret);
3682 goto out;
3686 * The bucket may spread in many blocks, and
3687 * we will only touch the 1st block and the last block
3688 * in the whole bucket(one for entry and one for data).
3690 blkno = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
3692 mlog(0, "allocate 1 cluster from %llu to xattr block\n",
3693 (unsigned long long)blkno);
3695 ret = ocfs2_init_xattr_bucket(xs->bucket, blkno);
3696 if (ret) {
3697 mlog_errno(ret);
3698 goto out;
3701 ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket,
3702 OCFS2_JOURNAL_ACCESS_CREATE);
3703 if (ret) {
3704 mlog_errno(ret);
3705 goto out;
3708 ocfs2_cp_xattr_block_to_bucket(inode, xb_bh, xs->bucket);
3709 ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket);
3711 ocfs2_xattr_update_xattr_search(inode, xs, xb_bh);
3713 /* Change from ocfs2_xattr_header to ocfs2_xattr_tree_root */
3714 memset(&xb->xb_attrs, 0, inode->i_sb->s_blocksize -
3715 offsetof(struct ocfs2_xattr_block, xb_attrs));
3717 xr = &xb->xb_attrs.xb_root;
3718 xr->xt_clusters = cpu_to_le32(1);
3719 xr->xt_last_eb_blk = 0;
3720 xr->xt_list.l_tree_depth = 0;
3721 xr->xt_list.l_count = cpu_to_le16(ocfs2_xattr_recs_per_xb(inode->i_sb));
3722 xr->xt_list.l_next_free_rec = cpu_to_le16(1);
3724 xr->xt_list.l_recs[0].e_cpos = 0;
3725 xr->xt_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
3726 xr->xt_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
3728 xb->xb_flags = cpu_to_le16(xb_flags | OCFS2_XATTR_INDEXED);
3730 ocfs2_journal_dirty(handle, xb_bh);
3732 out:
3733 up_write(&oi->ip_alloc_sem);
3735 return ret;
3738 static int cmp_xe_offset(const void *a, const void *b)
3740 const struct ocfs2_xattr_entry *l = a, *r = b;
3741 u32 l_name_offset = le16_to_cpu(l->xe_name_offset);
3742 u32 r_name_offset = le16_to_cpu(r->xe_name_offset);
3744 if (l_name_offset < r_name_offset)
3745 return 1;
3746 if (l_name_offset > r_name_offset)
3747 return -1;
3748 return 0;
3752 * defrag a xattr bucket if we find that the bucket has some
3753 * holes beteen name/value pairs.
3754 * We will move all the name/value pairs to the end of the bucket
3755 * so that we can spare some space for insertion.
3757 static int ocfs2_defrag_xattr_bucket(struct inode *inode,
3758 handle_t *handle,
3759 struct ocfs2_xattr_bucket *bucket)
3761 int ret, i;
3762 size_t end, offset, len, value_len;
3763 struct ocfs2_xattr_header *xh;
3764 char *entries, *buf, *bucket_buf = NULL;
3765 u64 blkno = bucket_blkno(bucket);
3766 u16 xh_free_start;
3767 size_t blocksize = inode->i_sb->s_blocksize;
3768 struct ocfs2_xattr_entry *xe;
3771 * In order to make the operation more efficient and generic,
3772 * we copy all the blocks into a contiguous memory and do the
3773 * defragment there, so if anything is error, we will not touch
3774 * the real block.
3776 bucket_buf = kmalloc(OCFS2_XATTR_BUCKET_SIZE, GFP_NOFS);
3777 if (!bucket_buf) {
3778 ret = -EIO;
3779 goto out;
3782 buf = bucket_buf;
3783 for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize)
3784 memcpy(buf, bucket_block(bucket, i), blocksize);
3786 ret = ocfs2_xattr_bucket_journal_access(handle, bucket,
3787 OCFS2_JOURNAL_ACCESS_WRITE);
3788 if (ret < 0) {
3789 mlog_errno(ret);
3790 goto out;
3793 xh = (struct ocfs2_xattr_header *)bucket_buf;
3794 entries = (char *)xh->xh_entries;
3795 xh_free_start = le16_to_cpu(xh->xh_free_start);
3797 mlog(0, "adjust xattr bucket in %llu, count = %u, "
3798 "xh_free_start = %u, xh_name_value_len = %u.\n",
3799 (unsigned long long)blkno, le16_to_cpu(xh->xh_count),
3800 xh_free_start, le16_to_cpu(xh->xh_name_value_len));
3803 * sort all the entries by their offset.
3804 * the largest will be the first, so that we can
3805 * move them to the end one by one.
3807 sort(entries, le16_to_cpu(xh->xh_count),
3808 sizeof(struct ocfs2_xattr_entry),
3809 cmp_xe_offset, swap_xe);
3811 /* Move all name/values to the end of the bucket. */
3812 xe = xh->xh_entries;
3813 end = OCFS2_XATTR_BUCKET_SIZE;
3814 for (i = 0; i < le16_to_cpu(xh->xh_count); i++, xe++) {
3815 offset = le16_to_cpu(xe->xe_name_offset);
3816 if (ocfs2_xattr_is_local(xe))
3817 value_len = OCFS2_XATTR_SIZE(
3818 le64_to_cpu(xe->xe_value_size));
3819 else
3820 value_len = OCFS2_XATTR_ROOT_SIZE;
3821 len = OCFS2_XATTR_SIZE(xe->xe_name_len) + value_len;
3824 * We must make sure that the name/value pair
3825 * exist in the same block. So adjust end to
3826 * the previous block end if needed.
3828 if (((end - len) / blocksize !=
3829 (end - 1) / blocksize))
3830 end = end - end % blocksize;
3832 if (end > offset + len) {
3833 memmove(bucket_buf + end - len,
3834 bucket_buf + offset, len);
3835 xe->xe_name_offset = cpu_to_le16(end - len);
3838 mlog_bug_on_msg(end < offset + len, "Defrag check failed for "
3839 "bucket %llu\n", (unsigned long long)blkno);
3841 end -= len;
3844 mlog_bug_on_msg(xh_free_start > end, "Defrag check failed for "
3845 "bucket %llu\n", (unsigned long long)blkno);
3847 if (xh_free_start == end)
3848 goto out;
3850 memset(bucket_buf + xh_free_start, 0, end - xh_free_start);
3851 xh->xh_free_start = cpu_to_le16(end);
3853 /* sort the entries by their name_hash. */
3854 sort(entries, le16_to_cpu(xh->xh_count),
3855 sizeof(struct ocfs2_xattr_entry),
3856 cmp_xe, swap_xe);
3858 buf = bucket_buf;
3859 for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize)
3860 memcpy(bucket_block(bucket, i), buf, blocksize);
3861 ocfs2_xattr_bucket_journal_dirty(handle, bucket);
3863 out:
3864 kfree(bucket_buf);
3865 return ret;
3869 * prev_blkno points to the start of an existing extent. new_blkno
3870 * points to a newly allocated extent. Because we know each of our
3871 * clusters contains more than bucket, we can easily split one cluster
3872 * at a bucket boundary. So we take the last cluster of the existing
3873 * extent and split it down the middle. We move the last half of the
3874 * buckets in the last cluster of the existing extent over to the new
3875 * extent.
3877 * first_bh is the buffer at prev_blkno so we can update the existing
3878 * extent's bucket count. header_bh is the bucket were we were hoping
3879 * to insert our xattr. If the bucket move places the target in the new
3880 * extent, we'll update first_bh and header_bh after modifying the old
3881 * extent.
3883 * first_hash will be set as the 1st xe's name_hash in the new extent.
3885 static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode *inode,
3886 handle_t *handle,
3887 struct ocfs2_xattr_bucket *first,
3888 struct ocfs2_xattr_bucket *target,
3889 u64 new_blkno,
3890 u32 num_clusters,
3891 u32 *first_hash)
3893 int ret;
3894 struct super_block *sb = inode->i_sb;
3895 int blks_per_bucket = ocfs2_blocks_per_xattr_bucket(sb);
3896 int num_buckets = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(sb));
3897 int to_move = num_buckets / 2;
3898 u64 src_blkno;
3899 u64 last_cluster_blkno = bucket_blkno(first) +
3900 ((num_clusters - 1) * ocfs2_clusters_to_blocks(sb, 1));
3902 BUG_ON(le16_to_cpu(bucket_xh(first)->xh_num_buckets) < num_buckets);
3903 BUG_ON(OCFS2_XATTR_BUCKET_SIZE == OCFS2_SB(sb)->s_clustersize);
3905 mlog(0, "move half of xattrs in cluster %llu to %llu\n",
3906 (unsigned long long)last_cluster_blkno, (unsigned long long)new_blkno);
3908 ret = ocfs2_mv_xattr_buckets(inode, handle, bucket_blkno(first),
3909 last_cluster_blkno, new_blkno,
3910 to_move, first_hash);
3911 if (ret) {
3912 mlog_errno(ret);
3913 goto out;
3916 /* This is the first bucket that got moved */
3917 src_blkno = last_cluster_blkno + (to_move * blks_per_bucket);
3920 * If the target bucket was part of the moved buckets, we need to
3921 * update first and target.
3923 if (bucket_blkno(target) >= src_blkno) {
3924 /* Find the block for the new target bucket */
3925 src_blkno = new_blkno +
3926 (bucket_blkno(target) - src_blkno);
3928 ocfs2_xattr_bucket_relse(first);
3929 ocfs2_xattr_bucket_relse(target);
3932 * These shouldn't fail - the buffers are in the
3933 * journal from ocfs2_cp_xattr_bucket().
3935 ret = ocfs2_read_xattr_bucket(first, new_blkno);
3936 if (ret) {
3937 mlog_errno(ret);
3938 goto out;
3940 ret = ocfs2_read_xattr_bucket(target, src_blkno);
3941 if (ret)
3942 mlog_errno(ret);
3946 out:
3947 return ret;
3951 * Find the suitable pos when we divide a bucket into 2.
3952 * We have to make sure the xattrs with the same hash value exist
3953 * in the same bucket.
3955 * If this ocfs2_xattr_header covers more than one hash value, find a
3956 * place where the hash value changes. Try to find the most even split.
3957 * The most common case is that all entries have different hash values,
3958 * and the first check we make will find a place to split.
3960 static int ocfs2_xattr_find_divide_pos(struct ocfs2_xattr_header *xh)
3962 struct ocfs2_xattr_entry *entries = xh->xh_entries;
3963 int count = le16_to_cpu(xh->xh_count);
3964 int delta, middle = count / 2;
3967 * We start at the middle. Each step gets farther away in both
3968 * directions. We therefore hit the change in hash value
3969 * nearest to the middle. Note that this loop does not execute for
3970 * count < 2.
3972 for (delta = 0; delta < middle; delta++) {
3973 /* Let's check delta earlier than middle */
3974 if (cmp_xe(&entries[middle - delta - 1],
3975 &entries[middle - delta]))
3976 return middle - delta;
3978 /* For even counts, don't walk off the end */
3979 if ((middle + delta + 1) == count)
3980 continue;
3982 /* Now try delta past middle */
3983 if (cmp_xe(&entries[middle + delta],
3984 &entries[middle + delta + 1]))
3985 return middle + delta + 1;
3988 /* Every entry had the same hash */
3989 return count;
3993 * Move some xattrs in old bucket(blk) to new bucket(new_blk).
3994 * first_hash will record the 1st hash of the new bucket.
3996 * Normally half of the xattrs will be moved. But we have to make
3997 * sure that the xattrs with the same hash value are stored in the
3998 * same bucket. If all the xattrs in this bucket have the same hash
3999 * value, the new bucket will be initialized as an empty one and the
4000 * first_hash will be initialized as (hash_value+1).
4002 static int ocfs2_divide_xattr_bucket(struct inode *inode,
4003 handle_t *handle,
4004 u64 blk,
4005 u64 new_blk,
4006 u32 *first_hash,
4007 int new_bucket_head)
4009 int ret, i;
4010 int count, start, len, name_value_len = 0, xe_len, name_offset = 0;
4011 struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL;
4012 struct ocfs2_xattr_header *xh;
4013 struct ocfs2_xattr_entry *xe;
4014 int blocksize = inode->i_sb->s_blocksize;
4016 mlog(0, "move some of xattrs from bucket %llu to %llu\n",
4017 (unsigned long long)blk, (unsigned long long)new_blk);
4019 s_bucket = ocfs2_xattr_bucket_new(inode);
4020 t_bucket = ocfs2_xattr_bucket_new(inode);
4021 if (!s_bucket || !t_bucket) {
4022 ret = -ENOMEM;
4023 mlog_errno(ret);
4024 goto out;
4027 ret = ocfs2_read_xattr_bucket(s_bucket, blk);
4028 if (ret) {
4029 mlog_errno(ret);
4030 goto out;
4033 ret = ocfs2_xattr_bucket_journal_access(handle, s_bucket,
4034 OCFS2_JOURNAL_ACCESS_WRITE);
4035 if (ret) {
4036 mlog_errno(ret);
4037 goto out;
4041 * Even if !new_bucket_head, we're overwriting t_bucket. Thus,
4042 * there's no need to read it.
4044 ret = ocfs2_init_xattr_bucket(t_bucket, new_blk);
4045 if (ret) {
4046 mlog_errno(ret);
4047 goto out;
4051 * Hey, if we're overwriting t_bucket, what difference does
4052 * ACCESS_CREATE vs ACCESS_WRITE make? See the comment in the
4053 * same part of ocfs2_cp_xattr_bucket().
4055 ret = ocfs2_xattr_bucket_journal_access(handle, t_bucket,
4056 new_bucket_head ?
4057 OCFS2_JOURNAL_ACCESS_CREATE :
4058 OCFS2_JOURNAL_ACCESS_WRITE);
4059 if (ret) {
4060 mlog_errno(ret);
4061 goto out;
4064 xh = bucket_xh(s_bucket);
4065 count = le16_to_cpu(xh->xh_count);
4066 start = ocfs2_xattr_find_divide_pos(xh);
4068 if (start == count) {
4069 xe = &xh->xh_entries[start-1];
4072 * initialized a new empty bucket here.
4073 * The hash value is set as one larger than
4074 * that of the last entry in the previous bucket.
4076 for (i = 0; i < t_bucket->bu_blocks; i++)
4077 memset(bucket_block(t_bucket, i), 0, blocksize);
4079 xh = bucket_xh(t_bucket);
4080 xh->xh_free_start = cpu_to_le16(blocksize);
4081 xh->xh_entries[0].xe_name_hash = xe->xe_name_hash;
4082 le32_add_cpu(&xh->xh_entries[0].xe_name_hash, 1);
4084 goto set_num_buckets;
4087 /* copy the whole bucket to the new first. */
4088 ocfs2_xattr_bucket_copy_data(t_bucket, s_bucket);
4090 /* update the new bucket. */
4091 xh = bucket_xh(t_bucket);
4094 * Calculate the total name/value len and xh_free_start for
4095 * the old bucket first.
4097 name_offset = OCFS2_XATTR_BUCKET_SIZE;
4098 name_value_len = 0;
4099 for (i = 0; i < start; i++) {
4100 xe = &xh->xh_entries[i];
4101 xe_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
4102 if (ocfs2_xattr_is_local(xe))
4103 xe_len +=
4104 OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
4105 else
4106 xe_len += OCFS2_XATTR_ROOT_SIZE;
4107 name_value_len += xe_len;
4108 if (le16_to_cpu(xe->xe_name_offset) < name_offset)
4109 name_offset = le16_to_cpu(xe->xe_name_offset);
4113 * Now begin the modification to the new bucket.
4115 * In the new bucket, We just move the xattr entry to the beginning
4116 * and don't touch the name/value. So there will be some holes in the
4117 * bucket, and they will be removed when ocfs2_defrag_xattr_bucket is
4118 * called.
4120 xe = &xh->xh_entries[start];
4121 len = sizeof(struct ocfs2_xattr_entry) * (count - start);
4122 mlog(0, "mv xattr entry len %d from %d to %d\n", len,
4123 (int)((char *)xe - (char *)xh),
4124 (int)((char *)xh->xh_entries - (char *)xh));
4125 memmove((char *)xh->xh_entries, (char *)xe, len);
4126 xe = &xh->xh_entries[count - start];
4127 len = sizeof(struct ocfs2_xattr_entry) * start;
4128 memset((char *)xe, 0, len);
4130 le16_add_cpu(&xh->xh_count, -start);
4131 le16_add_cpu(&xh->xh_name_value_len, -name_value_len);
4133 /* Calculate xh_free_start for the new bucket. */
4134 xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE);
4135 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
4136 xe = &xh->xh_entries[i];
4137 xe_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
4138 if (ocfs2_xattr_is_local(xe))
4139 xe_len +=
4140 OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
4141 else
4142 xe_len += OCFS2_XATTR_ROOT_SIZE;
4143 if (le16_to_cpu(xe->xe_name_offset) <
4144 le16_to_cpu(xh->xh_free_start))
4145 xh->xh_free_start = xe->xe_name_offset;
4148 set_num_buckets:
4149 /* set xh->xh_num_buckets for the new xh. */
4150 if (new_bucket_head)
4151 xh->xh_num_buckets = cpu_to_le16(1);
4152 else
4153 xh->xh_num_buckets = 0;
4155 ocfs2_xattr_bucket_journal_dirty(handle, t_bucket);
4157 /* store the first_hash of the new bucket. */
4158 if (first_hash)
4159 *first_hash = le32_to_cpu(xh->xh_entries[0].xe_name_hash);
4162 * Now only update the 1st block of the old bucket. If we
4163 * just added a new empty bucket, there is no need to modify
4164 * it.
4166 if (start == count)
4167 goto out;
4169 xh = bucket_xh(s_bucket);
4170 memset(&xh->xh_entries[start], 0,
4171 sizeof(struct ocfs2_xattr_entry) * (count - start));
4172 xh->xh_count = cpu_to_le16(start);
4173 xh->xh_free_start = cpu_to_le16(name_offset);
4174 xh->xh_name_value_len = cpu_to_le16(name_value_len);
4176 ocfs2_xattr_bucket_journal_dirty(handle, s_bucket);
4178 out:
4179 ocfs2_xattr_bucket_free(s_bucket);
4180 ocfs2_xattr_bucket_free(t_bucket);
4182 return ret;
4186 * Copy xattr from one bucket to another bucket.
4188 * The caller must make sure that the journal transaction
4189 * has enough space for journaling.
4191 static int ocfs2_cp_xattr_bucket(struct inode *inode,
4192 handle_t *handle,
4193 u64 s_blkno,
4194 u64 t_blkno,
4195 int t_is_new)
4197 int ret;
4198 struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL;
4200 BUG_ON(s_blkno == t_blkno);
4202 mlog(0, "cp bucket %llu to %llu, target is %d\n",
4203 (unsigned long long)s_blkno, (unsigned long long)t_blkno,
4204 t_is_new);
4206 s_bucket = ocfs2_xattr_bucket_new(inode);
4207 t_bucket = ocfs2_xattr_bucket_new(inode);
4208 if (!s_bucket || !t_bucket) {
4209 ret = -ENOMEM;
4210 mlog_errno(ret);
4211 goto out;
4214 ret = ocfs2_read_xattr_bucket(s_bucket, s_blkno);
4215 if (ret)
4216 goto out;
4219 * Even if !t_is_new, we're overwriting t_bucket. Thus,
4220 * there's no need to read it.
4222 ret = ocfs2_init_xattr_bucket(t_bucket, t_blkno);
4223 if (ret)
4224 goto out;
4227 * Hey, if we're overwriting t_bucket, what difference does
4228 * ACCESS_CREATE vs ACCESS_WRITE make? Well, if we allocated a new
4229 * cluster to fill, we came here from
4230 * ocfs2_mv_xattr_buckets(), and it is really new -
4231 * ACCESS_CREATE is required. But we also might have moved data
4232 * out of t_bucket before extending back into it.
4233 * ocfs2_add_new_xattr_bucket() can do this - its call to
4234 * ocfs2_add_new_xattr_cluster() may have created a new extent
4235 * and copied out the end of the old extent. Then it re-extends
4236 * the old extent back to create space for new xattrs. That's
4237 * how we get here, and the bucket isn't really new.
4239 ret = ocfs2_xattr_bucket_journal_access(handle, t_bucket,
4240 t_is_new ?
4241 OCFS2_JOURNAL_ACCESS_CREATE :
4242 OCFS2_JOURNAL_ACCESS_WRITE);
4243 if (ret)
4244 goto out;
4246 ocfs2_xattr_bucket_copy_data(t_bucket, s_bucket);
4247 ocfs2_xattr_bucket_journal_dirty(handle, t_bucket);
4249 out:
4250 ocfs2_xattr_bucket_free(t_bucket);
4251 ocfs2_xattr_bucket_free(s_bucket);
4253 return ret;
4257 * src_blk points to the start of an existing extent. last_blk points to
4258 * last cluster in that extent. to_blk points to a newly allocated
4259 * extent. We copy the buckets from the cluster at last_blk to the new
4260 * extent. If start_bucket is non-zero, we skip that many buckets before
4261 * we start copying. The new extent's xh_num_buckets gets set to the
4262 * number of buckets we copied. The old extent's xh_num_buckets shrinks
4263 * by the same amount.
4265 static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle,
4266 u64 src_blk, u64 last_blk, u64 to_blk,
4267 unsigned int start_bucket,
4268 u32 *first_hash)
4270 int i, ret, credits;
4271 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
4272 int blks_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
4273 int num_buckets = ocfs2_xattr_buckets_per_cluster(osb);
4274 struct ocfs2_xattr_bucket *old_first, *new_first;
4276 mlog(0, "mv xattrs from cluster %llu to %llu\n",
4277 (unsigned long long)last_blk, (unsigned long long)to_blk);
4279 BUG_ON(start_bucket >= num_buckets);
4280 if (start_bucket) {
4281 num_buckets -= start_bucket;
4282 last_blk += (start_bucket * blks_per_bucket);
4285 /* The first bucket of the original extent */
4286 old_first = ocfs2_xattr_bucket_new(inode);
4287 /* The first bucket of the new extent */
4288 new_first = ocfs2_xattr_bucket_new(inode);
4289 if (!old_first || !new_first) {
4290 ret = -ENOMEM;
4291 mlog_errno(ret);
4292 goto out;
4295 ret = ocfs2_read_xattr_bucket(old_first, src_blk);
4296 if (ret) {
4297 mlog_errno(ret);
4298 goto out;
4302 * We need to update the first bucket of the old extent and all
4303 * the buckets going to the new extent.
4305 credits = ((num_buckets + 1) * blks_per_bucket) +
4306 handle->h_buffer_credits;
4307 ret = ocfs2_extend_trans(handle, credits);
4308 if (ret) {
4309 mlog_errno(ret);
4310 goto out;
4313 ret = ocfs2_xattr_bucket_journal_access(handle, old_first,
4314 OCFS2_JOURNAL_ACCESS_WRITE);
4315 if (ret) {
4316 mlog_errno(ret);
4317 goto out;
4320 for (i = 0; i < num_buckets; i++) {
4321 ret = ocfs2_cp_xattr_bucket(inode, handle,
4322 last_blk + (i * blks_per_bucket),
4323 to_blk + (i * blks_per_bucket),
4325 if (ret) {
4326 mlog_errno(ret);
4327 goto out;
4332 * Get the new bucket ready before we dirty anything
4333 * (This actually shouldn't fail, because we already dirtied
4334 * it once in ocfs2_cp_xattr_bucket()).
4336 ret = ocfs2_read_xattr_bucket(new_first, to_blk);
4337 if (ret) {
4338 mlog_errno(ret);
4339 goto out;
4341 ret = ocfs2_xattr_bucket_journal_access(handle, new_first,
4342 OCFS2_JOURNAL_ACCESS_WRITE);
4343 if (ret) {
4344 mlog_errno(ret);
4345 goto out;
4348 /* Now update the headers */
4349 le16_add_cpu(&bucket_xh(old_first)->xh_num_buckets, -num_buckets);
4350 ocfs2_xattr_bucket_journal_dirty(handle, old_first);
4352 bucket_xh(new_first)->xh_num_buckets = cpu_to_le16(num_buckets);
4353 ocfs2_xattr_bucket_journal_dirty(handle, new_first);
4355 if (first_hash)
4356 *first_hash = le32_to_cpu(bucket_xh(new_first)->xh_entries[0].xe_name_hash);
4358 out:
4359 ocfs2_xattr_bucket_free(new_first);
4360 ocfs2_xattr_bucket_free(old_first);
4361 return ret;
4365 * Move some xattrs in this cluster to the new cluster.
4366 * This function should only be called when bucket size == cluster size.
4367 * Otherwise ocfs2_mv_xattr_bucket_cross_cluster should be used instead.
4369 static int ocfs2_divide_xattr_cluster(struct inode *inode,
4370 handle_t *handle,
4371 u64 prev_blk,
4372 u64 new_blk,
4373 u32 *first_hash)
4375 u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
4376 int ret, credits = 2 * blk_per_bucket + handle->h_buffer_credits;
4378 BUG_ON(OCFS2_XATTR_BUCKET_SIZE < OCFS2_SB(inode->i_sb)->s_clustersize);
4380 ret = ocfs2_extend_trans(handle, credits);
4381 if (ret) {
4382 mlog_errno(ret);
4383 return ret;
4386 /* Move half of the xattr in start_blk to the next bucket. */
4387 return ocfs2_divide_xattr_bucket(inode, handle, prev_blk,
4388 new_blk, first_hash, 1);
4392 * Move some xattrs from the old cluster to the new one since they are not
4393 * contiguous in ocfs2 xattr tree.
4395 * new_blk starts a new separate cluster, and we will move some xattrs from
4396 * prev_blk to it. v_start will be set as the first name hash value in this
4397 * new cluster so that it can be used as e_cpos during tree insertion and
4398 * don't collide with our original b-tree operations. first_bh and header_bh
4399 * will also be updated since they will be used in ocfs2_extend_xattr_bucket
4400 * to extend the insert bucket.
4402 * The problem is how much xattr should we move to the new one and when should
4403 * we update first_bh and header_bh?
4404 * 1. If cluster size > bucket size, that means the previous cluster has more
4405 * than 1 bucket, so just move half nums of bucket into the new cluster and
4406 * update the first_bh and header_bh if the insert bucket has been moved
4407 * to the new cluster.
4408 * 2. If cluster_size == bucket_size:
4409 * a) If the previous extent rec has more than one cluster and the insert
4410 * place isn't in the last cluster, copy the entire last cluster to the
4411 * new one. This time, we don't need to upate the first_bh and header_bh
4412 * since they will not be moved into the new cluster.
4413 * b) Otherwise, move the bottom half of the xattrs in the last cluster into
4414 * the new one. And we set the extend flag to zero if the insert place is
4415 * moved into the new allocated cluster since no extend is needed.
4417 static int ocfs2_adjust_xattr_cross_cluster(struct inode *inode,
4418 handle_t *handle,
4419 struct ocfs2_xattr_bucket *first,
4420 struct ocfs2_xattr_bucket *target,
4421 u64 new_blk,
4422 u32 prev_clusters,
4423 u32 *v_start,
4424 int *extend)
4426 int ret;
4428 mlog(0, "adjust xattrs from cluster %llu len %u to %llu\n",
4429 (unsigned long long)bucket_blkno(first), prev_clusters,
4430 (unsigned long long)new_blk);
4432 if (ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)) > 1) {
4433 ret = ocfs2_mv_xattr_bucket_cross_cluster(inode,
4434 handle,
4435 first, target,
4436 new_blk,
4437 prev_clusters,
4438 v_start);
4439 if (ret)
4440 mlog_errno(ret);
4441 } else {
4442 /* The start of the last cluster in the first extent */
4443 u64 last_blk = bucket_blkno(first) +
4444 ((prev_clusters - 1) *
4445 ocfs2_clusters_to_blocks(inode->i_sb, 1));
4447 if (prev_clusters > 1 && bucket_blkno(target) != last_blk) {
4448 ret = ocfs2_mv_xattr_buckets(inode, handle,
4449 bucket_blkno(first),
4450 last_blk, new_blk, 0,
4451 v_start);
4452 if (ret)
4453 mlog_errno(ret);
4454 } else {
4455 ret = ocfs2_divide_xattr_cluster(inode, handle,
4456 last_blk, new_blk,
4457 v_start);
4458 if (ret)
4459 mlog_errno(ret);
4461 if ((bucket_blkno(target) == last_blk) && extend)
4462 *extend = 0;
4466 return ret;
4470 * Add a new cluster for xattr storage.
4472 * If the new cluster is contiguous with the previous one, it will be
4473 * appended to the same extent record, and num_clusters will be updated.
4474 * If not, we will insert a new extent for it and move some xattrs in
4475 * the last cluster into the new allocated one.
4476 * We also need to limit the maximum size of a btree leaf, otherwise we'll
4477 * lose the benefits of hashing because we'll have to search large leaves.
4478 * So now the maximum size is OCFS2_MAX_XATTR_TREE_LEAF_SIZE(or clustersize,
4479 * if it's bigger).
4481 * first_bh is the first block of the previous extent rec and header_bh
4482 * indicates the bucket we will insert the new xattrs. They will be updated
4483 * when the header_bh is moved into the new cluster.
4485 static int ocfs2_add_new_xattr_cluster(struct inode *inode,
4486 struct buffer_head *root_bh,
4487 struct ocfs2_xattr_bucket *first,
4488 struct ocfs2_xattr_bucket *target,
4489 u32 *num_clusters,
4490 u32 prev_cpos,
4491 int *extend,
4492 struct ocfs2_xattr_set_ctxt *ctxt)
4494 int ret;
4495 u16 bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
4496 u32 prev_clusters = *num_clusters;
4497 u32 clusters_to_add = 1, bit_off, num_bits, v_start = 0;
4498 u64 block;
4499 handle_t *handle = ctxt->handle;
4500 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
4501 struct ocfs2_extent_tree et;
4503 mlog(0, "Add new xattr cluster for %llu, previous xattr hash = %u, "
4504 "previous xattr blkno = %llu\n",
4505 (unsigned long long)OCFS2_I(inode)->ip_blkno,
4506 prev_cpos, (unsigned long long)bucket_blkno(first));
4508 ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh);
4510 ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), root_bh,
4511 OCFS2_JOURNAL_ACCESS_WRITE);
4512 if (ret < 0) {
4513 mlog_errno(ret);
4514 goto leave;
4517 ret = __ocfs2_claim_clusters(osb, handle, ctxt->data_ac, 1,
4518 clusters_to_add, &bit_off, &num_bits);
4519 if (ret < 0) {
4520 if (ret != -ENOSPC)
4521 mlog_errno(ret);
4522 goto leave;
4525 BUG_ON(num_bits > clusters_to_add);
4527 block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
4528 mlog(0, "Allocating %u clusters at block %u for xattr in inode %llu\n",
4529 num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno);
4531 if (bucket_blkno(first) + (prev_clusters * bpc) == block &&
4532 (prev_clusters + num_bits) << osb->s_clustersize_bits <=
4533 OCFS2_MAX_XATTR_TREE_LEAF_SIZE) {
4535 * If this cluster is contiguous with the old one and
4536 * adding this new cluster, we don't surpass the limit of
4537 * OCFS2_MAX_XATTR_TREE_LEAF_SIZE, cool. We will let it be
4538 * initialized and used like other buckets in the previous
4539 * cluster.
4540 * So add it as a contiguous one. The caller will handle
4541 * its init process.
4543 v_start = prev_cpos + prev_clusters;
4544 *num_clusters = prev_clusters + num_bits;
4545 mlog(0, "Add contiguous %u clusters to previous extent rec.\n",
4546 num_bits);
4547 } else {
4548 ret = ocfs2_adjust_xattr_cross_cluster(inode,
4549 handle,
4550 first,
4551 target,
4552 block,
4553 prev_clusters,
4554 &v_start,
4555 extend);
4556 if (ret) {
4557 mlog_errno(ret);
4558 goto leave;
4562 mlog(0, "Insert %u clusters at block %llu for xattr at %u\n",
4563 num_bits, (unsigned long long)block, v_start);
4564 ret = ocfs2_insert_extent(handle, &et, v_start, block,
4565 num_bits, 0, ctxt->meta_ac);
4566 if (ret < 0) {
4567 mlog_errno(ret);
4568 goto leave;
4571 ret = ocfs2_journal_dirty(handle, root_bh);
4572 if (ret < 0)
4573 mlog_errno(ret);
4575 leave:
4576 return ret;
4580 * We are given an extent. 'first' is the bucket at the very front of
4581 * the extent. The extent has space for an additional bucket past
4582 * bucket_xh(first)->xh_num_buckets. 'target_blkno' is the block number
4583 * of the target bucket. We wish to shift every bucket past the target
4584 * down one, filling in that additional space. When we get back to the
4585 * target, we split the target between itself and the now-empty bucket
4586 * at target+1 (aka, target_blkno + blks_per_bucket).
4588 static int ocfs2_extend_xattr_bucket(struct inode *inode,
4589 handle_t *handle,
4590 struct ocfs2_xattr_bucket *first,
4591 u64 target_blk,
4592 u32 num_clusters)
4594 int ret, credits;
4595 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
4596 u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
4597 u64 end_blk;
4598 u16 new_bucket = le16_to_cpu(bucket_xh(first)->xh_num_buckets);
4600 mlog(0, "extend xattr bucket in %llu, xattr extend rec starting "
4601 "from %llu, len = %u\n", (unsigned long long)target_blk,
4602 (unsigned long long)bucket_blkno(first), num_clusters);
4604 /* The extent must have room for an additional bucket */
4605 BUG_ON(new_bucket >=
4606 (num_clusters * ocfs2_xattr_buckets_per_cluster(osb)));
4608 /* end_blk points to the last existing bucket */
4609 end_blk = bucket_blkno(first) + ((new_bucket - 1) * blk_per_bucket);
4612 * end_blk is the start of the last existing bucket.
4613 * Thus, (end_blk - target_blk) covers the target bucket and
4614 * every bucket after it up to, but not including, the last
4615 * existing bucket. Then we add the last existing bucket, the
4616 * new bucket, and the first bucket (3 * blk_per_bucket).
4618 credits = (end_blk - target_blk) + (3 * blk_per_bucket) +
4619 handle->h_buffer_credits;
4620 ret = ocfs2_extend_trans(handle, credits);
4621 if (ret) {
4622 mlog_errno(ret);
4623 goto out;
4626 ret = ocfs2_xattr_bucket_journal_access(handle, first,
4627 OCFS2_JOURNAL_ACCESS_WRITE);
4628 if (ret) {
4629 mlog_errno(ret);
4630 goto out;
4633 while (end_blk != target_blk) {
4634 ret = ocfs2_cp_xattr_bucket(inode, handle, end_blk,
4635 end_blk + blk_per_bucket, 0);
4636 if (ret)
4637 goto out;
4638 end_blk -= blk_per_bucket;
4641 /* Move half of the xattr in target_blkno to the next bucket. */
4642 ret = ocfs2_divide_xattr_bucket(inode, handle, target_blk,
4643 target_blk + blk_per_bucket, NULL, 0);
4645 le16_add_cpu(&bucket_xh(first)->xh_num_buckets, 1);
4646 ocfs2_xattr_bucket_journal_dirty(handle, first);
4648 out:
4649 return ret;
4653 * Add new xattr bucket in an extent record and adjust the buckets
4654 * accordingly. xb_bh is the ocfs2_xattr_block, and target is the
4655 * bucket we want to insert into.
4657 * In the easy case, we will move all the buckets after target down by
4658 * one. Half of target's xattrs will be moved to the next bucket.
4660 * If current cluster is full, we'll allocate a new one. This may not
4661 * be contiguous. The underlying calls will make sure that there is
4662 * space for the insert, shifting buckets around if necessary.
4663 * 'target' may be moved by those calls.
4665 static int ocfs2_add_new_xattr_bucket(struct inode *inode,
4666 struct buffer_head *xb_bh,
4667 struct ocfs2_xattr_bucket *target,
4668 struct ocfs2_xattr_set_ctxt *ctxt)
4670 struct ocfs2_xattr_block *xb =
4671 (struct ocfs2_xattr_block *)xb_bh->b_data;
4672 struct ocfs2_xattr_tree_root *xb_root = &xb->xb_attrs.xb_root;
4673 struct ocfs2_extent_list *el = &xb_root->xt_list;
4674 u32 name_hash =
4675 le32_to_cpu(bucket_xh(target)->xh_entries[0].xe_name_hash);
4676 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
4677 int ret, num_buckets, extend = 1;
4678 u64 p_blkno;
4679 u32 e_cpos, num_clusters;
4680 /* The bucket at the front of the extent */
4681 struct ocfs2_xattr_bucket *first;
4683 mlog(0, "Add new xattr bucket starting from %llu\n",
4684 (unsigned long long)bucket_blkno(target));
4686 /* The first bucket of the original extent */
4687 first = ocfs2_xattr_bucket_new(inode);
4688 if (!first) {
4689 ret = -ENOMEM;
4690 mlog_errno(ret);
4691 goto out;
4694 ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &e_cpos,
4695 &num_clusters, el);
4696 if (ret) {
4697 mlog_errno(ret);
4698 goto out;
4701 ret = ocfs2_read_xattr_bucket(first, p_blkno);
4702 if (ret) {
4703 mlog_errno(ret);
4704 goto out;
4707 num_buckets = ocfs2_xattr_buckets_per_cluster(osb) * num_clusters;
4708 if (num_buckets == le16_to_cpu(bucket_xh(first)->xh_num_buckets)) {
4710 * This can move first+target if the target bucket moves
4711 * to the new extent.
4713 ret = ocfs2_add_new_xattr_cluster(inode,
4714 xb_bh,
4715 first,
4716 target,
4717 &num_clusters,
4718 e_cpos,
4719 &extend,
4720 ctxt);
4721 if (ret) {
4722 mlog_errno(ret);
4723 goto out;
4727 if (extend) {
4728 ret = ocfs2_extend_xattr_bucket(inode,
4729 ctxt->handle,
4730 first,
4731 bucket_blkno(target),
4732 num_clusters);
4733 if (ret)
4734 mlog_errno(ret);
4737 out:
4738 ocfs2_xattr_bucket_free(first);
4740 return ret;
4743 static inline char *ocfs2_xattr_bucket_get_val(struct inode *inode,
4744 struct ocfs2_xattr_bucket *bucket,
4745 int offs)
4747 int block_off = offs >> inode->i_sb->s_blocksize_bits;
4749 offs = offs % inode->i_sb->s_blocksize;
4750 return bucket_block(bucket, block_off) + offs;
4754 * Handle the normal xattr set, including replace, delete and new.
4756 * Note: "local" indicates the real data's locality. So we can't
4757 * just its bucket locality by its length.
4759 static void ocfs2_xattr_set_entry_normal(struct inode *inode,
4760 struct ocfs2_xattr_info *xi,
4761 struct ocfs2_xattr_search *xs,
4762 u32 name_hash,
4763 int local)
4765 struct ocfs2_xattr_entry *last, *xe;
4766 int name_len = strlen(xi->name);
4767 struct ocfs2_xattr_header *xh = xs->header;
4768 u16 count = le16_to_cpu(xh->xh_count), start;
4769 size_t blocksize = inode->i_sb->s_blocksize;
4770 char *val;
4771 size_t offs, size, new_size;
4773 last = &xh->xh_entries[count];
4774 if (!xs->not_found) {
4775 xe = xs->here;
4776 offs = le16_to_cpu(xe->xe_name_offset);
4777 if (ocfs2_xattr_is_local(xe))
4778 size = OCFS2_XATTR_SIZE(name_len) +
4779 OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
4780 else
4781 size = OCFS2_XATTR_SIZE(name_len) +
4782 OCFS2_XATTR_SIZE(OCFS2_XATTR_ROOT_SIZE);
4785 * If the new value will be stored outside, xi->value has been
4786 * initalized as an empty ocfs2_xattr_value_root, and the same
4787 * goes with xi->value_len, so we can set new_size safely here.
4788 * See ocfs2_xattr_set_in_bucket.
4790 new_size = OCFS2_XATTR_SIZE(name_len) +
4791 OCFS2_XATTR_SIZE(xi->value_len);
4793 le16_add_cpu(&xh->xh_name_value_len, -size);
4794 if (xi->value) {
4795 if (new_size > size)
4796 goto set_new_name_value;
4798 /* Now replace the old value with new one. */
4799 if (local)
4800 xe->xe_value_size = cpu_to_le64(xi->value_len);
4801 else
4802 xe->xe_value_size = 0;
4804 val = ocfs2_xattr_bucket_get_val(inode,
4805 xs->bucket, offs);
4806 memset(val + OCFS2_XATTR_SIZE(name_len), 0,
4807 size - OCFS2_XATTR_SIZE(name_len));
4808 if (OCFS2_XATTR_SIZE(xi->value_len) > 0)
4809 memcpy(val + OCFS2_XATTR_SIZE(name_len),
4810 xi->value, xi->value_len);
4812 le16_add_cpu(&xh->xh_name_value_len, new_size);
4813 ocfs2_xattr_set_local(xe, local);
4814 return;
4815 } else {
4817 * Remove the old entry if there is more than one.
4818 * We don't remove the last entry so that we can
4819 * use it to indicate the hash value of the empty
4820 * bucket.
4822 last -= 1;
4823 le16_add_cpu(&xh->xh_count, -1);
4824 if (xh->xh_count) {
4825 memmove(xe, xe + 1,
4826 (void *)last - (void *)xe);
4827 memset(last, 0,
4828 sizeof(struct ocfs2_xattr_entry));
4829 } else
4830 xh->xh_free_start =
4831 cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE);
4833 return;
4835 } else {
4836 /* find a new entry for insert. */
4837 int low = 0, high = count - 1, tmp;
4838 struct ocfs2_xattr_entry *tmp_xe;
4840 while (low <= high && count) {
4841 tmp = (low + high) / 2;
4842 tmp_xe = &xh->xh_entries[tmp];
4844 if (name_hash > le32_to_cpu(tmp_xe->xe_name_hash))
4845 low = tmp + 1;
4846 else if (name_hash <
4847 le32_to_cpu(tmp_xe->xe_name_hash))
4848 high = tmp - 1;
4849 else {
4850 low = tmp;
4851 break;
4855 xe = &xh->xh_entries[low];
4856 if (low != count)
4857 memmove(xe + 1, xe, (void *)last - (void *)xe);
4859 le16_add_cpu(&xh->xh_count, 1);
4860 memset(xe, 0, sizeof(struct ocfs2_xattr_entry));
4861 xe->xe_name_hash = cpu_to_le32(name_hash);
4862 xe->xe_name_len = name_len;
4863 ocfs2_xattr_set_type(xe, xi->name_index);
4866 set_new_name_value:
4867 /* Insert the new name+value. */
4868 size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(xi->value_len);
4871 * We must make sure that the name/value pair
4872 * exists in the same block.
4874 offs = le16_to_cpu(xh->xh_free_start);
4875 start = offs - size;
4877 if (start >> inode->i_sb->s_blocksize_bits !=
4878 (offs - 1) >> inode->i_sb->s_blocksize_bits) {
4879 offs = offs - offs % blocksize;
4880 xh->xh_free_start = cpu_to_le16(offs);
4883 val = ocfs2_xattr_bucket_get_val(inode, xs->bucket, offs - size);
4884 xe->xe_name_offset = cpu_to_le16(offs - size);
4886 memset(val, 0, size);
4887 memcpy(val, xi->name, name_len);
4888 memcpy(val + OCFS2_XATTR_SIZE(name_len), xi->value, xi->value_len);
4890 xe->xe_value_size = cpu_to_le64(xi->value_len);
4891 ocfs2_xattr_set_local(xe, local);
4892 xs->here = xe;
4893 le16_add_cpu(&xh->xh_free_start, -size);
4894 le16_add_cpu(&xh->xh_name_value_len, size);
4896 return;
4900 * Set the xattr entry in the specified bucket.
4901 * The bucket is indicated by xs->bucket and it should have the enough
4902 * space for the xattr insertion.
4904 static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode,
4905 handle_t *handle,
4906 struct ocfs2_xattr_info *xi,
4907 struct ocfs2_xattr_search *xs,
4908 u32 name_hash,
4909 int local)
4911 int ret;
4912 u64 blkno;
4914 mlog(0, "Set xattr entry len = %lu index = %d in bucket %llu\n",
4915 (unsigned long)xi->value_len, xi->name_index,
4916 (unsigned long long)bucket_blkno(xs->bucket));
4918 if (!xs->bucket->bu_bhs[1]) {
4919 blkno = bucket_blkno(xs->bucket);
4920 ocfs2_xattr_bucket_relse(xs->bucket);
4921 ret = ocfs2_read_xattr_bucket(xs->bucket, blkno);
4922 if (ret) {
4923 mlog_errno(ret);
4924 goto out;
4928 ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket,
4929 OCFS2_JOURNAL_ACCESS_WRITE);
4930 if (ret < 0) {
4931 mlog_errno(ret);
4932 goto out;
4935 ocfs2_xattr_set_entry_normal(inode, xi, xs, name_hash, local);
4936 ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket);
4938 out:
4939 return ret;
4943 * Truncate the specified xe_off entry in xattr bucket.
4944 * bucket is indicated by header_bh and len is the new length.
4945 * Both the ocfs2_xattr_value_root and the entry will be updated here.
4947 * Copy the new updated xe and xe_value_root to new_xe and new_xv if needed.
4949 static int ocfs2_xattr_bucket_value_truncate(struct inode *inode,
4950 struct ocfs2_xattr_bucket *bucket,
4951 int xe_off,
4952 int len,
4953 struct ocfs2_xattr_set_ctxt *ctxt)
4955 int ret, offset;
4956 u64 value_blk;
4957 struct ocfs2_xattr_entry *xe;
4958 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
4959 size_t blocksize = inode->i_sb->s_blocksize;
4960 struct ocfs2_xattr_value_buf vb = {
4961 .vb_access = ocfs2_journal_access,
4964 xe = &xh->xh_entries[xe_off];
4966 BUG_ON(!xe || ocfs2_xattr_is_local(xe));
4968 offset = le16_to_cpu(xe->xe_name_offset) +
4969 OCFS2_XATTR_SIZE(xe->xe_name_len);
4971 value_blk = offset / blocksize;
4973 /* We don't allow ocfs2_xattr_value to be stored in different block. */
4974 BUG_ON(value_blk != (offset + OCFS2_XATTR_ROOT_SIZE - 1) / blocksize);
4976 vb.vb_bh = bucket->bu_bhs[value_blk];
4977 BUG_ON(!vb.vb_bh);
4979 vb.vb_xv = (struct ocfs2_xattr_value_root *)
4980 (vb.vb_bh->b_data + offset % blocksize);
4983 * From here on out we have to dirty the bucket. The generic
4984 * value calls only modify one of the bucket's bhs, but we need
4985 * to send the bucket at once. So if they error, they *could* have
4986 * modified something. We have to assume they did, and dirty
4987 * the whole bucket. This leaves us in a consistent state.
4989 mlog(0, "truncate %u in xattr bucket %llu to %d bytes.\n",
4990 xe_off, (unsigned long long)bucket_blkno(bucket), len);
4991 ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt);
4992 if (ret) {
4993 mlog_errno(ret);
4994 goto out;
4997 ret = ocfs2_xattr_bucket_journal_access(ctxt->handle, bucket,
4998 OCFS2_JOURNAL_ACCESS_WRITE);
4999 if (ret) {
5000 mlog_errno(ret);
5001 goto out;
5004 xe->xe_value_size = cpu_to_le64(len);
5006 ocfs2_xattr_bucket_journal_dirty(ctxt->handle, bucket);
5008 out:
5009 return ret;
5012 static int ocfs2_xattr_bucket_value_truncate_xs(struct inode *inode,
5013 struct ocfs2_xattr_search *xs,
5014 int len,
5015 struct ocfs2_xattr_set_ctxt *ctxt)
5017 int ret, offset;
5018 struct ocfs2_xattr_entry *xe = xs->here;
5019 struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)xs->base;
5021 BUG_ON(!xs->bucket->bu_bhs[0] || !xe || ocfs2_xattr_is_local(xe));
5023 offset = xe - xh->xh_entries;
5024 ret = ocfs2_xattr_bucket_value_truncate(inode, xs->bucket,
5025 offset, len, ctxt);
5026 if (ret)
5027 mlog_errno(ret);
5029 return ret;
5032 static int ocfs2_xattr_bucket_set_value_outside(struct inode *inode,
5033 handle_t *handle,
5034 struct ocfs2_xattr_search *xs,
5035 char *val,
5036 int value_len)
5038 int ret, offset, block_off;
5039 struct ocfs2_xattr_value_root *xv;
5040 struct ocfs2_xattr_entry *xe = xs->here;
5041 struct ocfs2_xattr_header *xh = bucket_xh(xs->bucket);
5042 void *base;
5043 struct ocfs2_xattr_value_buf vb = {
5044 .vb_access = ocfs2_journal_access,
5047 BUG_ON(!xs->base || !xe || ocfs2_xattr_is_local(xe));
5049 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb, xh,
5050 xe - xh->xh_entries,
5051 &block_off,
5052 &offset);
5053 if (ret) {
5054 mlog_errno(ret);
5055 goto out;
5058 base = bucket_block(xs->bucket, block_off);
5059 xv = (struct ocfs2_xattr_value_root *)(base + offset +
5060 OCFS2_XATTR_SIZE(xe->xe_name_len));
5062 vb.vb_xv = xv;
5063 vb.vb_bh = xs->bucket->bu_bhs[block_off];
5064 ret = __ocfs2_xattr_set_value_outside(inode, handle,
5065 &vb, val, value_len);
5066 if (ret)
5067 mlog_errno(ret);
5068 out:
5069 return ret;
5072 static int ocfs2_rm_xattr_cluster(struct inode *inode,
5073 struct buffer_head *root_bh,
5074 u64 blkno,
5075 u32 cpos,
5076 u32 len,
5077 void *para)
5079 int ret;
5080 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
5081 struct inode *tl_inode = osb->osb_tl_inode;
5082 handle_t *handle;
5083 struct ocfs2_xattr_block *xb =
5084 (struct ocfs2_xattr_block *)root_bh->b_data;
5085 struct ocfs2_alloc_context *meta_ac = NULL;
5086 struct ocfs2_cached_dealloc_ctxt dealloc;
5087 struct ocfs2_extent_tree et;
5089 ret = ocfs2_iterate_xattr_buckets(inode, blkno, len,
5090 ocfs2_delete_xattr_in_bucket, para);
5091 if (ret) {
5092 mlog_errno(ret);
5093 return ret;
5096 ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh);
5098 ocfs2_init_dealloc_ctxt(&dealloc);
5100 mlog(0, "rm xattr extent rec at %u len = %u, start from %llu\n",
5101 cpos, len, (unsigned long long)blkno);
5103 ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode), blkno,
5104 len);
5106 ret = ocfs2_lock_allocators(inode, &et, 0, 1, NULL, &meta_ac);
5107 if (ret) {
5108 mlog_errno(ret);
5109 return ret;
5112 mutex_lock(&tl_inode->i_mutex);
5114 if (ocfs2_truncate_log_needs_flush(osb)) {
5115 ret = __ocfs2_flush_truncate_log(osb);
5116 if (ret < 0) {
5117 mlog_errno(ret);
5118 goto out;
5122 handle = ocfs2_start_trans(osb, ocfs2_remove_extent_credits(osb->sb));
5123 if (IS_ERR(handle)) {
5124 ret = -ENOMEM;
5125 mlog_errno(ret);
5126 goto out;
5129 ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), root_bh,
5130 OCFS2_JOURNAL_ACCESS_WRITE);
5131 if (ret) {
5132 mlog_errno(ret);
5133 goto out_commit;
5136 ret = ocfs2_remove_extent(handle, &et, cpos, len, meta_ac,
5137 &dealloc);
5138 if (ret) {
5139 mlog_errno(ret);
5140 goto out_commit;
5143 le32_add_cpu(&xb->xb_attrs.xb_root.xt_clusters, -len);
5145 ret = ocfs2_journal_dirty(handle, root_bh);
5146 if (ret) {
5147 mlog_errno(ret);
5148 goto out_commit;
5151 ret = ocfs2_truncate_log_append(osb, handle, blkno, len);
5152 if (ret)
5153 mlog_errno(ret);
5155 out_commit:
5156 ocfs2_commit_trans(osb, handle);
5157 out:
5158 ocfs2_schedule_truncate_log_flush(osb, 1);
5160 mutex_unlock(&tl_inode->i_mutex);
5162 if (meta_ac)
5163 ocfs2_free_alloc_context(meta_ac);
5165 ocfs2_run_deallocs(osb, &dealloc);
5167 return ret;
5170 static void ocfs2_xattr_bucket_remove_xs(struct inode *inode,
5171 handle_t *handle,
5172 struct ocfs2_xattr_search *xs)
5174 struct ocfs2_xattr_header *xh = bucket_xh(xs->bucket);
5175 struct ocfs2_xattr_entry *last = &xh->xh_entries[
5176 le16_to_cpu(xh->xh_count) - 1];
5177 int ret = 0;
5179 ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket,
5180 OCFS2_JOURNAL_ACCESS_WRITE);
5181 if (ret) {
5182 mlog_errno(ret);
5183 return;
5186 /* Remove the old entry. */
5187 memmove(xs->here, xs->here + 1,
5188 (void *)last - (void *)xs->here);
5189 memset(last, 0, sizeof(struct ocfs2_xattr_entry));
5190 le16_add_cpu(&xh->xh_count, -1);
5192 ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket);
5196 * Set the xattr name/value in the bucket specified in xs.
5198 * As the new value in xi may be stored in the bucket or in an outside cluster,
5199 * we divide the whole process into 3 steps:
5200 * 1. insert name/value in the bucket(ocfs2_xattr_set_entry_in_bucket)
5201 * 2. truncate of the outside cluster(ocfs2_xattr_bucket_value_truncate_xs)
5202 * 3. Set the value to the outside cluster(ocfs2_xattr_bucket_set_value_outside)
5203 * 4. If the clusters for the new outside value can't be allocated, we need
5204 * to free the xattr we allocated in set.
5206 static int ocfs2_xattr_set_in_bucket(struct inode *inode,
5207 struct ocfs2_xattr_info *xi,
5208 struct ocfs2_xattr_search *xs,
5209 struct ocfs2_xattr_set_ctxt *ctxt)
5211 int ret, local = 1;
5212 size_t value_len;
5213 char *val = (char *)xi->value;
5214 struct ocfs2_xattr_entry *xe = xs->here;
5215 u32 name_hash = ocfs2_xattr_name_hash(inode, xi->name,
5216 strlen(xi->name));
5218 if (!xs->not_found && !ocfs2_xattr_is_local(xe)) {
5220 * We need to truncate the xattr storage first.
5222 * If both the old and new value are stored to
5223 * outside block, we only need to truncate
5224 * the storage and then set the value outside.
5226 * If the new value should be stored within block,
5227 * we should free all the outside block first and
5228 * the modification to the xattr block will be done
5229 * by following steps.
5231 if (xi->value_len > OCFS2_XATTR_INLINE_SIZE)
5232 value_len = xi->value_len;
5233 else
5234 value_len = 0;
5236 ret = ocfs2_xattr_bucket_value_truncate_xs(inode, xs,
5237 value_len,
5238 ctxt);
5239 if (ret)
5240 goto out;
5242 if (value_len)
5243 goto set_value_outside;
5246 value_len = xi->value_len;
5247 /* So we have to handle the inside block change now. */
5248 if (value_len > OCFS2_XATTR_INLINE_SIZE) {
5250 * If the new value will be stored outside of block,
5251 * initalize a new empty value root and insert it first.
5253 local = 0;
5254 xi->value = &def_xv;
5255 xi->value_len = OCFS2_XATTR_ROOT_SIZE;
5258 ret = ocfs2_xattr_set_entry_in_bucket(inode, ctxt->handle, xi, xs,
5259 name_hash, local);
5260 if (ret) {
5261 mlog_errno(ret);
5262 goto out;
5265 if (value_len <= OCFS2_XATTR_INLINE_SIZE)
5266 goto out;
5268 /* allocate the space now for the outside block storage. */
5269 ret = ocfs2_xattr_bucket_value_truncate_xs(inode, xs,
5270 value_len, ctxt);
5271 if (ret) {
5272 mlog_errno(ret);
5274 if (xs->not_found) {
5276 * We can't allocate enough clusters for outside
5277 * storage and we have allocated xattr already,
5278 * so need to remove it.
5280 ocfs2_xattr_bucket_remove_xs(inode, ctxt->handle, xs);
5282 goto out;
5285 set_value_outside:
5286 ret = ocfs2_xattr_bucket_set_value_outside(inode, ctxt->handle,
5287 xs, val, value_len);
5288 out:
5289 return ret;
5293 * check whether the xattr bucket is filled up with the same hash value.
5294 * If we want to insert the xattr with the same hash, return -ENOSPC.
5295 * If we want to insert a xattr with different hash value, go ahead
5296 * and ocfs2_divide_xattr_bucket will handle this.
5298 static int ocfs2_check_xattr_bucket_collision(struct inode *inode,
5299 struct ocfs2_xattr_bucket *bucket,
5300 const char *name)
5302 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
5303 u32 name_hash = ocfs2_xattr_name_hash(inode, name, strlen(name));
5305 if (name_hash != le32_to_cpu(xh->xh_entries[0].xe_name_hash))
5306 return 0;
5308 if (xh->xh_entries[le16_to_cpu(xh->xh_count) - 1].xe_name_hash ==
5309 xh->xh_entries[0].xe_name_hash) {
5310 mlog(ML_ERROR, "Too much hash collision in xattr bucket %llu, "
5311 "hash = %u\n",
5312 (unsigned long long)bucket_blkno(bucket),
5313 le32_to_cpu(xh->xh_entries[0].xe_name_hash));
5314 return -ENOSPC;
5317 return 0;
5320 static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
5321 struct ocfs2_xattr_info *xi,
5322 struct ocfs2_xattr_search *xs,
5323 struct ocfs2_xattr_set_ctxt *ctxt)
5325 struct ocfs2_xattr_header *xh;
5326 struct ocfs2_xattr_entry *xe;
5327 u16 count, header_size, xh_free_start;
5328 int free, max_free, need, old;
5329 size_t value_size = 0, name_len = strlen(xi->name);
5330 size_t blocksize = inode->i_sb->s_blocksize;
5331 int ret, allocation = 0;
5333 mlog_entry("Set xattr %s in xattr index block\n", xi->name);
5335 try_again:
5336 xh = xs->header;
5337 count = le16_to_cpu(xh->xh_count);
5338 xh_free_start = le16_to_cpu(xh->xh_free_start);
5339 header_size = sizeof(struct ocfs2_xattr_header) +
5340 count * sizeof(struct ocfs2_xattr_entry);
5341 max_free = OCFS2_XATTR_BUCKET_SIZE - header_size -
5342 le16_to_cpu(xh->xh_name_value_len) - OCFS2_XATTR_HEADER_GAP;
5344 mlog_bug_on_msg(header_size > blocksize, "bucket %llu has header size "
5345 "of %u which exceed block size\n",
5346 (unsigned long long)bucket_blkno(xs->bucket),
5347 header_size);
5349 if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE)
5350 value_size = OCFS2_XATTR_ROOT_SIZE;
5351 else if (xi->value)
5352 value_size = OCFS2_XATTR_SIZE(xi->value_len);
5354 if (xs->not_found)
5355 need = sizeof(struct ocfs2_xattr_entry) +
5356 OCFS2_XATTR_SIZE(name_len) + value_size;
5357 else {
5358 need = value_size + OCFS2_XATTR_SIZE(name_len);
5361 * We only replace the old value if the new length is smaller
5362 * than the old one. Otherwise we will allocate new space in the
5363 * bucket to store it.
5365 xe = xs->here;
5366 if (ocfs2_xattr_is_local(xe))
5367 old = OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
5368 else
5369 old = OCFS2_XATTR_SIZE(OCFS2_XATTR_ROOT_SIZE);
5371 if (old >= value_size)
5372 need = 0;
5375 free = xh_free_start - header_size - OCFS2_XATTR_HEADER_GAP;
5377 * We need to make sure the new name/value pair
5378 * can exist in the same block.
5380 if (xh_free_start % blocksize < need)
5381 free -= xh_free_start % blocksize;
5383 mlog(0, "xs->not_found = %d, in xattr bucket %llu: free = %d, "
5384 "need = %d, max_free = %d, xh_free_start = %u, xh_name_value_len ="
5385 " %u\n", xs->not_found,
5386 (unsigned long long)bucket_blkno(xs->bucket),
5387 free, need, max_free, le16_to_cpu(xh->xh_free_start),
5388 le16_to_cpu(xh->xh_name_value_len));
5390 if (free < need ||
5391 (xs->not_found &&
5392 count == ocfs2_xattr_max_xe_in_bucket(inode->i_sb))) {
5393 if (need <= max_free &&
5394 count < ocfs2_xattr_max_xe_in_bucket(inode->i_sb)) {
5396 * We can create the space by defragment. Since only the
5397 * name/value will be moved, the xe shouldn't be changed
5398 * in xs.
5400 ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle,
5401 xs->bucket);
5402 if (ret) {
5403 mlog_errno(ret);
5404 goto out;
5407 xh_free_start = le16_to_cpu(xh->xh_free_start);
5408 free = xh_free_start - header_size
5409 - OCFS2_XATTR_HEADER_GAP;
5410 if (xh_free_start % blocksize < need)
5411 free -= xh_free_start % blocksize;
5413 if (free >= need)
5414 goto xattr_set;
5416 mlog(0, "Can't get enough space for xattr insert by "
5417 "defragment. Need %u bytes, but we have %d, so "
5418 "allocate new bucket for it.\n", need, free);
5422 * We have to add new buckets or clusters and one
5423 * allocation should leave us enough space for insert.
5425 BUG_ON(allocation);
5428 * We do not allow for overlapping ranges between buckets. And
5429 * the maximum number of collisions we will allow for then is
5430 * one bucket's worth, so check it here whether we need to
5431 * add a new bucket for the insert.
5433 ret = ocfs2_check_xattr_bucket_collision(inode,
5434 xs->bucket,
5435 xi->name);
5436 if (ret) {
5437 mlog_errno(ret);
5438 goto out;
5441 ret = ocfs2_add_new_xattr_bucket(inode,
5442 xs->xattr_bh,
5443 xs->bucket,
5444 ctxt);
5445 if (ret) {
5446 mlog_errno(ret);
5447 goto out;
5451 * ocfs2_add_new_xattr_bucket() will have updated
5452 * xs->bucket if it moved, but it will not have updated
5453 * any of the other search fields. Thus, we drop it and
5454 * re-search. Everything should be cached, so it'll be
5455 * quick.
5457 ocfs2_xattr_bucket_relse(xs->bucket);
5458 ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh,
5459 xi->name_index,
5460 xi->name, xs);
5461 if (ret && ret != -ENODATA)
5462 goto out;
5463 xs->not_found = ret;
5464 allocation = 1;
5465 goto try_again;
5468 xattr_set:
5469 ret = ocfs2_xattr_set_in_bucket(inode, xi, xs, ctxt);
5470 out:
5471 mlog_exit(ret);
5472 return ret;
5475 static int ocfs2_delete_xattr_in_bucket(struct inode *inode,
5476 struct ocfs2_xattr_bucket *bucket,
5477 void *para)
5479 int ret = 0, ref_credits;
5480 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
5481 u16 i;
5482 struct ocfs2_xattr_entry *xe;
5483 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
5484 struct ocfs2_xattr_set_ctxt ctxt = {NULL, NULL,};
5485 int credits = ocfs2_remove_extent_credits(osb->sb) +
5486 ocfs2_blocks_per_xattr_bucket(inode->i_sb);
5487 struct ocfs2_xattr_value_root *xv;
5488 struct ocfs2_rm_xattr_bucket_para *args =
5489 (struct ocfs2_rm_xattr_bucket_para *)para;
5491 ocfs2_init_dealloc_ctxt(&ctxt.dealloc);
5493 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
5494 xe = &xh->xh_entries[i];
5495 if (ocfs2_xattr_is_local(xe))
5496 continue;
5498 ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket,
5499 i, &xv, NULL);
5501 ret = ocfs2_lock_xattr_remove_allocators(inode, xv,
5502 args->ref_ci,
5503 args->ref_root_bh,
5504 &ctxt.meta_ac,
5505 &ref_credits);
5507 ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits);
5508 if (IS_ERR(ctxt.handle)) {
5509 ret = PTR_ERR(ctxt.handle);
5510 mlog_errno(ret);
5511 break;
5514 ret = ocfs2_xattr_bucket_value_truncate(inode, bucket,
5515 i, 0, &ctxt);
5517 ocfs2_commit_trans(osb, ctxt.handle);
5518 if (ctxt.meta_ac) {
5519 ocfs2_free_alloc_context(ctxt.meta_ac);
5520 ctxt.meta_ac = NULL;
5522 if (ret) {
5523 mlog_errno(ret);
5524 break;
5528 if (ctxt.meta_ac)
5529 ocfs2_free_alloc_context(ctxt.meta_ac);
5530 ocfs2_schedule_truncate_log_flush(osb, 1);
5531 ocfs2_run_deallocs(osb, &ctxt.dealloc);
5532 return ret;
5536 * Whenever we modify a xattr value root in the bucket(e.g, CoW
5537 * or change the extent record flag), we need to recalculate
5538 * the metaecc for the whole bucket. So it is done here.
5540 * Note:
5541 * We have to give the extra credits for the caller.
5543 static int ocfs2_xattr_bucket_post_refcount(struct inode *inode,
5544 handle_t *handle,
5545 void *para)
5547 int ret;
5548 struct ocfs2_xattr_bucket *bucket =
5549 (struct ocfs2_xattr_bucket *)para;
5551 ret = ocfs2_xattr_bucket_journal_access(handle, bucket,
5552 OCFS2_JOURNAL_ACCESS_WRITE);
5553 if (ret) {
5554 mlog_errno(ret);
5555 return ret;
5558 ocfs2_xattr_bucket_journal_dirty(handle, bucket);
5560 return 0;
5564 * Special action we need if the xattr value is refcounted.
5566 * 1. If the xattr is refcounted, lock the tree.
5567 * 2. CoW the xattr if we are setting the new value and the value
5568 * will be stored outside.
5569 * 3. In other case, decrease_refcount will work for us, so just
5570 * lock the refcount tree, calculate the meta and credits is OK.
5572 * We have to do CoW before ocfs2_init_xattr_set_ctxt since
5573 * currently CoW is a completed transaction, while this function
5574 * will also lock the allocators and let us deadlock. So we will
5575 * CoW the whole xattr value.
5577 static int ocfs2_prepare_refcount_xattr(struct inode *inode,
5578 struct ocfs2_dinode *di,
5579 struct ocfs2_xattr_info *xi,
5580 struct ocfs2_xattr_search *xis,
5581 struct ocfs2_xattr_search *xbs,
5582 struct ocfs2_refcount_tree **ref_tree,
5583 int *meta_add,
5584 int *credits)
5586 int ret = 0;
5587 struct ocfs2_xattr_block *xb;
5588 struct ocfs2_xattr_entry *xe;
5589 char *base;
5590 u32 p_cluster, num_clusters;
5591 unsigned int ext_flags;
5592 int name_offset, name_len;
5593 struct ocfs2_xattr_value_buf vb;
5594 struct ocfs2_xattr_bucket *bucket = NULL;
5595 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
5596 struct ocfs2_post_refcount refcount;
5597 struct ocfs2_post_refcount *p = NULL;
5598 struct buffer_head *ref_root_bh = NULL;
5600 if (!xis->not_found) {
5601 xe = xis->here;
5602 name_offset = le16_to_cpu(xe->xe_name_offset);
5603 name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
5604 base = xis->base;
5605 vb.vb_bh = xis->inode_bh;
5606 vb.vb_access = ocfs2_journal_access_di;
5607 } else {
5608 int i, block_off = 0;
5609 xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data;
5610 xe = xbs->here;
5611 name_offset = le16_to_cpu(xe->xe_name_offset);
5612 name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
5613 i = xbs->here - xbs->header->xh_entries;
5615 if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
5616 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
5617 bucket_xh(xbs->bucket),
5618 i, &block_off,
5619 &name_offset);
5620 if (ret) {
5621 mlog_errno(ret);
5622 goto out;
5624 base = bucket_block(xbs->bucket, block_off);
5625 vb.vb_bh = xbs->bucket->bu_bhs[block_off];
5626 vb.vb_access = ocfs2_journal_access;
5628 if (ocfs2_meta_ecc(osb)) {
5629 /*create parameters for ocfs2_post_refcount. */
5630 bucket = xbs->bucket;
5631 refcount.credits = bucket->bu_blocks;
5632 refcount.para = bucket;
5633 refcount.func =
5634 ocfs2_xattr_bucket_post_refcount;
5635 p = &refcount;
5637 } else {
5638 base = xbs->base;
5639 vb.vb_bh = xbs->xattr_bh;
5640 vb.vb_access = ocfs2_journal_access_xb;
5644 if (ocfs2_xattr_is_local(xe))
5645 goto out;
5647 vb.vb_xv = (struct ocfs2_xattr_value_root *)
5648 (base + name_offset + name_len);
5650 ret = ocfs2_xattr_get_clusters(inode, 0, &p_cluster,
5651 &num_clusters, &vb.vb_xv->xr_list,
5652 &ext_flags);
5653 if (ret) {
5654 mlog_errno(ret);
5655 goto out;
5659 * We just need to check the 1st extent record, since we always
5660 * CoW the whole xattr. So there shouldn't be a xattr with
5661 * some REFCOUNT extent recs after the 1st one.
5663 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
5664 goto out;
5666 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
5667 1, ref_tree, &ref_root_bh);
5668 if (ret) {
5669 mlog_errno(ret);
5670 goto out;
5674 * If we are deleting the xattr or the new size will be stored inside,
5675 * cool, leave it there, the xattr truncate process will remove them
5676 * for us(it still needs the refcount tree lock and the meta, credits).
5677 * And the worse case is that every cluster truncate will split the
5678 * refcount tree, and make the original extent become 3. So we will need
5679 * 2 * cluster more extent recs at most.
5681 if (!xi->value || xi->value_len <= OCFS2_XATTR_INLINE_SIZE) {
5683 ret = ocfs2_refcounted_xattr_delete_need(inode,
5684 &(*ref_tree)->rf_ci,
5685 ref_root_bh, vb.vb_xv,
5686 meta_add, credits);
5687 if (ret)
5688 mlog_errno(ret);
5689 goto out;
5692 ret = ocfs2_refcount_cow_xattr(inode, di, &vb,
5693 *ref_tree, ref_root_bh, 0,
5694 le32_to_cpu(vb.vb_xv->xr_clusters), p);
5695 if (ret)
5696 mlog_errno(ret);
5698 out:
5699 brelse(ref_root_bh);
5700 return ret;
5704 * Add the REFCOUNTED flags for all the extent rec in ocfs2_xattr_value_root.
5705 * The physical clusters will be added to refcount tree.
5707 static int ocfs2_xattr_value_attach_refcount(struct inode *inode,
5708 struct ocfs2_xattr_value_root *xv,
5709 struct ocfs2_extent_tree *value_et,
5710 struct ocfs2_caching_info *ref_ci,
5711 struct buffer_head *ref_root_bh,
5712 struct ocfs2_cached_dealloc_ctxt *dealloc,
5713 struct ocfs2_post_refcount *refcount)
5715 int ret = 0;
5716 u32 clusters = le32_to_cpu(xv->xr_clusters);
5717 u32 cpos, p_cluster, num_clusters;
5718 struct ocfs2_extent_list *el = &xv->xr_list;
5719 unsigned int ext_flags;
5721 cpos = 0;
5722 while (cpos < clusters) {
5723 ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
5724 &num_clusters, el, &ext_flags);
5726 cpos += num_clusters;
5727 if ((ext_flags & OCFS2_EXT_REFCOUNTED))
5728 continue;
5730 BUG_ON(!p_cluster);
5732 ret = ocfs2_add_refcount_flag(inode, value_et,
5733 ref_ci, ref_root_bh,
5734 cpos - num_clusters,
5735 p_cluster, num_clusters,
5736 dealloc, refcount);
5737 if (ret) {
5738 mlog_errno(ret);
5739 break;
5743 return ret;
5747 * Given a normal ocfs2_xattr_header, refcount all the entries which
5748 * have value stored outside.
5749 * Used for xattrs stored in inode and ocfs2_xattr_block.
5751 static int ocfs2_xattr_attach_refcount_normal(struct inode *inode,
5752 struct ocfs2_xattr_value_buf *vb,
5753 struct ocfs2_xattr_header *header,
5754 struct ocfs2_caching_info *ref_ci,
5755 struct buffer_head *ref_root_bh,
5756 struct ocfs2_cached_dealloc_ctxt *dealloc)
5759 struct ocfs2_xattr_entry *xe;
5760 struct ocfs2_xattr_value_root *xv;
5761 struct ocfs2_extent_tree et;
5762 int i, ret = 0;
5764 for (i = 0; i < le16_to_cpu(header->xh_count); i++) {
5765 xe = &header->xh_entries[i];
5767 if (ocfs2_xattr_is_local(xe))
5768 continue;
5770 xv = (struct ocfs2_xattr_value_root *)((void *)header +
5771 le16_to_cpu(xe->xe_name_offset) +
5772 OCFS2_XATTR_SIZE(xe->xe_name_len));
5774 vb->vb_xv = xv;
5775 ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
5777 ret = ocfs2_xattr_value_attach_refcount(inode, xv, &et,
5778 ref_ci, ref_root_bh,
5779 dealloc, NULL);
5780 if (ret) {
5781 mlog_errno(ret);
5782 break;
5786 return ret;
5789 static int ocfs2_xattr_inline_attach_refcount(struct inode *inode,
5790 struct buffer_head *fe_bh,
5791 struct ocfs2_caching_info *ref_ci,
5792 struct buffer_head *ref_root_bh,
5793 struct ocfs2_cached_dealloc_ctxt *dealloc)
5795 struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data;
5796 struct ocfs2_xattr_header *header = (struct ocfs2_xattr_header *)
5797 (fe_bh->b_data + inode->i_sb->s_blocksize -
5798 le16_to_cpu(di->i_xattr_inline_size));
5799 struct ocfs2_xattr_value_buf vb = {
5800 .vb_bh = fe_bh,
5801 .vb_access = ocfs2_journal_access_di,
5804 return ocfs2_xattr_attach_refcount_normal(inode, &vb, header,
5805 ref_ci, ref_root_bh, dealloc);
5808 struct ocfs2_xattr_tree_value_refcount_para {
5809 struct ocfs2_caching_info *ref_ci;
5810 struct buffer_head *ref_root_bh;
5811 struct ocfs2_cached_dealloc_ctxt *dealloc;
5814 static int ocfs2_get_xattr_tree_value_root(struct super_block *sb,
5815 struct ocfs2_xattr_bucket *bucket,
5816 int offset,
5817 struct ocfs2_xattr_value_root **xv,
5818 struct buffer_head **bh)
5820 int ret, block_off, name_offset;
5821 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
5822 struct ocfs2_xattr_entry *xe = &xh->xh_entries[offset];
5823 void *base;
5825 ret = ocfs2_xattr_bucket_get_name_value(sb,
5826 bucket_xh(bucket),
5827 offset,
5828 &block_off,
5829 &name_offset);
5830 if (ret) {
5831 mlog_errno(ret);
5832 goto out;
5835 base = bucket_block(bucket, block_off);
5837 *xv = (struct ocfs2_xattr_value_root *)(base + name_offset +
5838 OCFS2_XATTR_SIZE(xe->xe_name_len));
5840 if (bh)
5841 *bh = bucket->bu_bhs[block_off];
5842 out:
5843 return ret;
5847 * For a given xattr bucket, refcount all the entries which
5848 * have value stored outside.
5850 static int ocfs2_xattr_bucket_value_refcount(struct inode *inode,
5851 struct ocfs2_xattr_bucket *bucket,
5852 void *para)
5854 int i, ret = 0;
5855 struct ocfs2_extent_tree et;
5856 struct ocfs2_xattr_tree_value_refcount_para *ref =
5857 (struct ocfs2_xattr_tree_value_refcount_para *)para;
5858 struct ocfs2_xattr_header *xh =
5859 (struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data;
5860 struct ocfs2_xattr_entry *xe;
5861 struct ocfs2_xattr_value_buf vb = {
5862 .vb_access = ocfs2_journal_access,
5864 struct ocfs2_post_refcount refcount = {
5865 .credits = bucket->bu_blocks,
5866 .para = bucket,
5867 .func = ocfs2_xattr_bucket_post_refcount,
5869 struct ocfs2_post_refcount *p = NULL;
5871 /* We only need post_refcount if we support metaecc. */
5872 if (ocfs2_meta_ecc(OCFS2_SB(inode->i_sb)))
5873 p = &refcount;
5875 mlog(0, "refcount bucket %llu, count = %u\n",
5876 (unsigned long long)bucket_blkno(bucket),
5877 le16_to_cpu(xh->xh_count));
5878 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
5879 xe = &xh->xh_entries[i];
5881 if (ocfs2_xattr_is_local(xe))
5882 continue;
5884 ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket, i,
5885 &vb.vb_xv, &vb.vb_bh);
5886 if (ret) {
5887 mlog_errno(ret);
5888 break;
5891 ocfs2_init_xattr_value_extent_tree(&et,
5892 INODE_CACHE(inode), &vb);
5894 ret = ocfs2_xattr_value_attach_refcount(inode, vb.vb_xv,
5895 &et, ref->ref_ci,
5896 ref->ref_root_bh,
5897 ref->dealloc, p);
5898 if (ret) {
5899 mlog_errno(ret);
5900 break;
5904 return ret;
5908 static int ocfs2_refcount_xattr_tree_rec(struct inode *inode,
5909 struct buffer_head *root_bh,
5910 u64 blkno, u32 cpos, u32 len, void *para)
5912 return ocfs2_iterate_xattr_buckets(inode, blkno, len,
5913 ocfs2_xattr_bucket_value_refcount,
5914 para);
5917 static int ocfs2_xattr_block_attach_refcount(struct inode *inode,
5918 struct buffer_head *blk_bh,
5919 struct ocfs2_caching_info *ref_ci,
5920 struct buffer_head *ref_root_bh,
5921 struct ocfs2_cached_dealloc_ctxt *dealloc)
5923 int ret = 0;
5924 struct ocfs2_xattr_block *xb =
5925 (struct ocfs2_xattr_block *)blk_bh->b_data;
5927 if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
5928 struct ocfs2_xattr_header *header = &xb->xb_attrs.xb_header;
5929 struct ocfs2_xattr_value_buf vb = {
5930 .vb_bh = blk_bh,
5931 .vb_access = ocfs2_journal_access_xb,
5934 ret = ocfs2_xattr_attach_refcount_normal(inode, &vb, header,
5935 ref_ci, ref_root_bh,
5936 dealloc);
5937 } else {
5938 struct ocfs2_xattr_tree_value_refcount_para para = {
5939 .ref_ci = ref_ci,
5940 .ref_root_bh = ref_root_bh,
5941 .dealloc = dealloc,
5944 ret = ocfs2_iterate_xattr_index_block(inode, blk_bh,
5945 ocfs2_refcount_xattr_tree_rec,
5946 &para);
5949 return ret;
5952 int ocfs2_xattr_attach_refcount_tree(struct inode *inode,
5953 struct buffer_head *fe_bh,
5954 struct ocfs2_caching_info *ref_ci,
5955 struct buffer_head *ref_root_bh,
5956 struct ocfs2_cached_dealloc_ctxt *dealloc)
5958 int ret = 0;
5959 struct ocfs2_inode_info *oi = OCFS2_I(inode);
5960 struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data;
5961 struct buffer_head *blk_bh = NULL;
5963 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
5964 ret = ocfs2_xattr_inline_attach_refcount(inode, fe_bh,
5965 ref_ci, ref_root_bh,
5966 dealloc);
5967 if (ret) {
5968 mlog_errno(ret);
5969 goto out;
5973 if (!di->i_xattr_loc)
5974 goto out;
5976 ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc),
5977 &blk_bh);
5978 if (ret < 0) {
5979 mlog_errno(ret);
5980 goto out;
5983 ret = ocfs2_xattr_block_attach_refcount(inode, blk_bh, ref_ci,
5984 ref_root_bh, dealloc);
5985 if (ret)
5986 mlog_errno(ret);
5988 brelse(blk_bh);
5989 out:
5991 return ret;
5994 typedef int (should_xattr_reflinked)(struct ocfs2_xattr_entry *xe);
5996 * Store the information we need in xattr reflink.
5997 * old_bh and new_bh are inode bh for the old and new inode.
5999 struct ocfs2_xattr_reflink {
6000 struct inode *old_inode;
6001 struct inode *new_inode;
6002 struct buffer_head *old_bh;
6003 struct buffer_head *new_bh;
6004 struct ocfs2_caching_info *ref_ci;
6005 struct buffer_head *ref_root_bh;
6006 struct ocfs2_cached_dealloc_ctxt *dealloc;
6007 should_xattr_reflinked *xattr_reflinked;
6011 * Given a xattr header and xe offset,
6012 * return the proper xv and the corresponding bh.
6013 * xattr in inode, block and xattr tree have different implementaions.
6015 typedef int (get_xattr_value_root)(struct super_block *sb,
6016 struct buffer_head *bh,
6017 struct ocfs2_xattr_header *xh,
6018 int offset,
6019 struct ocfs2_xattr_value_root **xv,
6020 struct buffer_head **ret_bh,
6021 void *para);
6024 * Calculate all the xattr value root metadata stored in this xattr header and
6025 * credits we need if we create them from the scratch.
6026 * We use get_xattr_value_root so that all types of xattr container can use it.
6028 static int ocfs2_value_metas_in_xattr_header(struct super_block *sb,
6029 struct buffer_head *bh,
6030 struct ocfs2_xattr_header *xh,
6031 int *metas, int *credits,
6032 int *num_recs,
6033 get_xattr_value_root *func,
6034 void *para)
6036 int i, ret = 0;
6037 struct ocfs2_xattr_value_root *xv;
6038 struct ocfs2_xattr_entry *xe;
6040 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
6041 xe = &xh->xh_entries[i];
6042 if (ocfs2_xattr_is_local(xe))
6043 continue;
6045 ret = func(sb, bh, xh, i, &xv, NULL, para);
6046 if (ret) {
6047 mlog_errno(ret);
6048 break;
6051 *metas += le16_to_cpu(xv->xr_list.l_tree_depth) *
6052 le16_to_cpu(xv->xr_list.l_next_free_rec);
6054 *credits += ocfs2_calc_extend_credits(sb,
6055 &def_xv.xv.xr_list,
6056 le32_to_cpu(xv->xr_clusters));
6059 * If the value is a tree with depth > 1, We don't go deep
6060 * to the extent block, so just calculate a maximum record num.
6062 if (!xv->xr_list.l_tree_depth)
6063 *num_recs += le16_to_cpu(xv->xr_list.l_next_free_rec);
6064 else
6065 *num_recs += ocfs2_clusters_for_bytes(sb,
6066 XATTR_SIZE_MAX);
6069 return ret;
6072 /* Used by xattr inode and block to return the right xv and buffer_head. */
6073 static int ocfs2_get_xattr_value_root(struct super_block *sb,
6074 struct buffer_head *bh,
6075 struct ocfs2_xattr_header *xh,
6076 int offset,
6077 struct ocfs2_xattr_value_root **xv,
6078 struct buffer_head **ret_bh,
6079 void *para)
6081 struct ocfs2_xattr_entry *xe = &xh->xh_entries[offset];
6083 *xv = (struct ocfs2_xattr_value_root *)((void *)xh +
6084 le16_to_cpu(xe->xe_name_offset) +
6085 OCFS2_XATTR_SIZE(xe->xe_name_len));
6087 if (ret_bh)
6088 *ret_bh = bh;
6090 return 0;
6094 * Lock the meta_ac and caculate how much credits we need for reflink xattrs.
6095 * It is only used for inline xattr and xattr block.
6097 static int ocfs2_reflink_lock_xattr_allocators(struct ocfs2_super *osb,
6098 struct ocfs2_xattr_header *xh,
6099 struct buffer_head *ref_root_bh,
6100 int *credits,
6101 struct ocfs2_alloc_context **meta_ac)
6103 int ret, meta_add = 0, num_recs = 0;
6104 struct ocfs2_refcount_block *rb =
6105 (struct ocfs2_refcount_block *)ref_root_bh->b_data;
6107 *credits = 0;
6109 ret = ocfs2_value_metas_in_xattr_header(osb->sb, NULL, xh,
6110 &meta_add, credits, &num_recs,
6111 ocfs2_get_xattr_value_root,
6112 NULL);
6113 if (ret) {
6114 mlog_errno(ret);
6115 goto out;
6119 * We need to add/modify num_recs in refcount tree, so just calculate
6120 * an approximate number we need for refcount tree change.
6121 * Sometimes we need to split the tree, and after split, half recs
6122 * will be moved to the new block, and a new block can only provide
6123 * half number of recs. So we multiple new blocks by 2.
6125 num_recs = num_recs / ocfs2_refcount_recs_per_rb(osb->sb) * 2;
6126 meta_add += num_recs;
6127 *credits += num_recs + num_recs * OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
6128 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
6129 *credits += le16_to_cpu(rb->rf_list.l_tree_depth) *
6130 le16_to_cpu(rb->rf_list.l_next_free_rec) + 1;
6131 else
6132 *credits += 1;
6134 ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add, meta_ac);
6135 if (ret)
6136 mlog_errno(ret);
6138 out:
6139 return ret;
6143 * Given a xattr header, reflink all the xattrs in this container.
6144 * It can be used for inode, block and bucket.
6146 * NOTE:
6147 * Before we call this function, the caller has memcpy the xattr in
6148 * old_xh to the new_xh.
6150 * If args.xattr_reflinked is set, call it to decide whether the xe should
6151 * be reflinked or not. If not, remove it from the new xattr header.
6153 static int ocfs2_reflink_xattr_header(handle_t *handle,
6154 struct ocfs2_xattr_reflink *args,
6155 struct buffer_head *old_bh,
6156 struct ocfs2_xattr_header *xh,
6157 struct buffer_head *new_bh,
6158 struct ocfs2_xattr_header *new_xh,
6159 struct ocfs2_xattr_value_buf *vb,
6160 struct ocfs2_alloc_context *meta_ac,
6161 get_xattr_value_root *func,
6162 void *para)
6164 int ret = 0, i, j;
6165 struct super_block *sb = args->old_inode->i_sb;
6166 struct buffer_head *value_bh;
6167 struct ocfs2_xattr_entry *xe, *last;
6168 struct ocfs2_xattr_value_root *xv, *new_xv;
6169 struct ocfs2_extent_tree data_et;
6170 u32 clusters, cpos, p_cluster, num_clusters;
6171 unsigned int ext_flags = 0;
6173 mlog(0, "reflink xattr in container %llu, count = %u\n",
6174 (unsigned long long)old_bh->b_blocknr, le16_to_cpu(xh->xh_count));
6176 last = &new_xh->xh_entries[le16_to_cpu(new_xh->xh_count)];
6177 for (i = 0, j = 0; i < le16_to_cpu(xh->xh_count); i++, j++) {
6178 xe = &xh->xh_entries[i];
6180 if (args->xattr_reflinked && !args->xattr_reflinked(xe)) {
6181 xe = &new_xh->xh_entries[j];
6183 le16_add_cpu(&new_xh->xh_count, -1);
6184 if (new_xh->xh_count) {
6185 memmove(xe, xe + 1,
6186 (void *)last - (void *)xe);
6187 memset(last, 0,
6188 sizeof(struct ocfs2_xattr_entry));
6192 * We don't want j to increase in the next round since
6193 * it is already moved ahead.
6195 j--;
6196 continue;
6199 if (ocfs2_xattr_is_local(xe))
6200 continue;
6202 ret = func(sb, old_bh, xh, i, &xv, NULL, para);
6203 if (ret) {
6204 mlog_errno(ret);
6205 break;
6208 ret = func(sb, new_bh, new_xh, j, &new_xv, &value_bh, para);
6209 if (ret) {
6210 mlog_errno(ret);
6211 break;
6215 * For the xattr which has l_tree_depth = 0, all the extent
6216 * recs have already be copied to the new xh with the
6217 * propriate OCFS2_EXT_REFCOUNTED flag we just need to
6218 * increase the refount count int the refcount tree.
6220 * For the xattr which has l_tree_depth > 0, we need
6221 * to initialize it to the empty default value root,
6222 * and then insert the extents one by one.
6224 if (xv->xr_list.l_tree_depth) {
6225 memcpy(new_xv, &def_xv, sizeof(def_xv));
6226 vb->vb_xv = new_xv;
6227 vb->vb_bh = value_bh;
6228 ocfs2_init_xattr_value_extent_tree(&data_et,
6229 INODE_CACHE(args->new_inode), vb);
6232 clusters = le32_to_cpu(xv->xr_clusters);
6233 cpos = 0;
6234 while (cpos < clusters) {
6235 ret = ocfs2_xattr_get_clusters(args->old_inode,
6236 cpos,
6237 &p_cluster,
6238 &num_clusters,
6239 &xv->xr_list,
6240 &ext_flags);
6241 if (ret) {
6242 mlog_errno(ret);
6243 goto out;
6246 BUG_ON(!p_cluster);
6248 if (xv->xr_list.l_tree_depth) {
6249 ret = ocfs2_insert_extent(handle,
6250 &data_et, cpos,
6251 ocfs2_clusters_to_blocks(
6252 args->old_inode->i_sb,
6253 p_cluster),
6254 num_clusters, ext_flags,
6255 meta_ac);
6256 if (ret) {
6257 mlog_errno(ret);
6258 goto out;
6262 ret = ocfs2_increase_refcount(handle, args->ref_ci,
6263 args->ref_root_bh,
6264 p_cluster, num_clusters,
6265 meta_ac, args->dealloc);
6266 if (ret) {
6267 mlog_errno(ret);
6268 goto out;
6271 cpos += num_clusters;
6275 out:
6276 return ret;
6279 static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args)
6281 int ret = 0, credits = 0;
6282 handle_t *handle;
6283 struct ocfs2_super *osb = OCFS2_SB(args->old_inode->i_sb);
6284 struct ocfs2_dinode *di = (struct ocfs2_dinode *)args->old_bh->b_data;
6285 int inline_size = le16_to_cpu(di->i_xattr_inline_size);
6286 int header_off = osb->sb->s_blocksize - inline_size;
6287 struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)
6288 (args->old_bh->b_data + header_off);
6289 struct ocfs2_xattr_header *new_xh = (struct ocfs2_xattr_header *)
6290 (args->new_bh->b_data + header_off);
6291 struct ocfs2_alloc_context *meta_ac = NULL;
6292 struct ocfs2_inode_info *new_oi;
6293 struct ocfs2_dinode *new_di;
6294 struct ocfs2_xattr_value_buf vb = {
6295 .vb_bh = args->new_bh,
6296 .vb_access = ocfs2_journal_access_di,
6299 ret = ocfs2_reflink_lock_xattr_allocators(osb, xh, args->ref_root_bh,
6300 &credits, &meta_ac);
6301 if (ret) {
6302 mlog_errno(ret);
6303 goto out;
6306 handle = ocfs2_start_trans(osb, credits);
6307 if (IS_ERR(handle)) {
6308 ret = PTR_ERR(handle);
6309 mlog_errno(ret);
6310 goto out;
6313 ret = ocfs2_journal_access_di(handle, INODE_CACHE(args->new_inode),
6314 args->new_bh, OCFS2_JOURNAL_ACCESS_WRITE);
6315 if (ret) {
6316 mlog_errno(ret);
6317 goto out_commit;
6320 memcpy(args->new_bh->b_data + header_off,
6321 args->old_bh->b_data + header_off, inline_size);
6323 new_di = (struct ocfs2_dinode *)args->new_bh->b_data;
6324 new_di->i_xattr_inline_size = cpu_to_le16(inline_size);
6326 ret = ocfs2_reflink_xattr_header(handle, args, args->old_bh, xh,
6327 args->new_bh, new_xh, &vb, meta_ac,
6328 ocfs2_get_xattr_value_root, NULL);
6329 if (ret) {
6330 mlog_errno(ret);
6331 goto out_commit;
6334 new_oi = OCFS2_I(args->new_inode);
6335 spin_lock(&new_oi->ip_lock);
6336 new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL;
6337 new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
6338 spin_unlock(&new_oi->ip_lock);
6340 ocfs2_journal_dirty(handle, args->new_bh);
6342 out_commit:
6343 ocfs2_commit_trans(osb, handle);
6345 out:
6346 if (meta_ac)
6347 ocfs2_free_alloc_context(meta_ac);
6348 return ret;
6351 static int ocfs2_create_empty_xattr_block(struct inode *inode,
6352 struct buffer_head *fe_bh,
6353 struct buffer_head **ret_bh,
6354 int indexed)
6356 int ret;
6357 handle_t *handle;
6358 struct ocfs2_alloc_context *meta_ac;
6359 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
6361 ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
6362 if (ret < 0) {
6363 mlog_errno(ret);
6364 return ret;
6367 handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS);
6368 if (IS_ERR(handle)) {
6369 ret = PTR_ERR(handle);
6370 mlog_errno(ret);
6371 goto out;
6374 mlog(0, "create new xattr block for inode %llu, index = %d\n",
6375 (unsigned long long)fe_bh->b_blocknr, indexed);
6376 ret = ocfs2_create_xattr_block(handle, inode, fe_bh,
6377 meta_ac, ret_bh, indexed);
6378 if (ret)
6379 mlog_errno(ret);
6381 ocfs2_commit_trans(osb, handle);
6382 out:
6383 ocfs2_free_alloc_context(meta_ac);
6384 return ret;
6387 static int ocfs2_reflink_xattr_block(struct ocfs2_xattr_reflink *args,
6388 struct buffer_head *blk_bh,
6389 struct buffer_head *new_blk_bh)
6391 int ret = 0, credits = 0;
6392 handle_t *handle;
6393 struct ocfs2_inode_info *new_oi = OCFS2_I(args->new_inode);
6394 struct ocfs2_dinode *new_di;
6395 struct ocfs2_super *osb = OCFS2_SB(args->new_inode->i_sb);
6396 int header_off = offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header);
6397 struct ocfs2_xattr_block *xb =
6398 (struct ocfs2_xattr_block *)blk_bh->b_data;
6399 struct ocfs2_xattr_header *xh = &xb->xb_attrs.xb_header;
6400 struct ocfs2_xattr_block *new_xb =
6401 (struct ocfs2_xattr_block *)new_blk_bh->b_data;
6402 struct ocfs2_xattr_header *new_xh = &new_xb->xb_attrs.xb_header;
6403 struct ocfs2_alloc_context *meta_ac;
6404 struct ocfs2_xattr_value_buf vb = {
6405 .vb_bh = new_blk_bh,
6406 .vb_access = ocfs2_journal_access_xb,
6409 ret = ocfs2_reflink_lock_xattr_allocators(osb, xh, args->ref_root_bh,
6410 &credits, &meta_ac);
6411 if (ret) {
6412 mlog_errno(ret);
6413 return ret;
6416 /* One more credits in case we need to add xattr flags in new inode. */
6417 handle = ocfs2_start_trans(osb, credits + 1);
6418 if (IS_ERR(handle)) {
6419 ret = PTR_ERR(handle);
6420 mlog_errno(ret);
6421 goto out;
6424 if (!(new_oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) {
6425 ret = ocfs2_journal_access_di(handle,
6426 INODE_CACHE(args->new_inode),
6427 args->new_bh,
6428 OCFS2_JOURNAL_ACCESS_WRITE);
6429 if (ret) {
6430 mlog_errno(ret);
6431 goto out_commit;
6435 ret = ocfs2_journal_access_xb(handle, INODE_CACHE(args->new_inode),
6436 new_blk_bh, OCFS2_JOURNAL_ACCESS_WRITE);
6437 if (ret) {
6438 mlog_errno(ret);
6439 goto out_commit;
6442 memcpy(new_blk_bh->b_data + header_off, blk_bh->b_data + header_off,
6443 osb->sb->s_blocksize - header_off);
6445 ret = ocfs2_reflink_xattr_header(handle, args, blk_bh, xh,
6446 new_blk_bh, new_xh, &vb, meta_ac,
6447 ocfs2_get_xattr_value_root, NULL);
6448 if (ret) {
6449 mlog_errno(ret);
6450 goto out_commit;
6453 ocfs2_journal_dirty(handle, new_blk_bh);
6455 if (!(new_oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) {
6456 new_di = (struct ocfs2_dinode *)args->new_bh->b_data;
6457 spin_lock(&new_oi->ip_lock);
6458 new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL;
6459 new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
6460 spin_unlock(&new_oi->ip_lock);
6462 ocfs2_journal_dirty(handle, args->new_bh);
6465 out_commit:
6466 ocfs2_commit_trans(osb, handle);
6468 out:
6469 ocfs2_free_alloc_context(meta_ac);
6470 return ret;
6473 struct ocfs2_reflink_xattr_tree_args {
6474 struct ocfs2_xattr_reflink *reflink;
6475 struct buffer_head *old_blk_bh;
6476 struct buffer_head *new_blk_bh;
6477 struct ocfs2_xattr_bucket *old_bucket;
6478 struct ocfs2_xattr_bucket *new_bucket;
6482 * NOTE:
6483 * We have to handle the case that both old bucket and new bucket
6484 * will call this function to get the right ret_bh.
6485 * So The caller must give us the right bh.
6487 static int ocfs2_get_reflink_xattr_value_root(struct super_block *sb,
6488 struct buffer_head *bh,
6489 struct ocfs2_xattr_header *xh,
6490 int offset,
6491 struct ocfs2_xattr_value_root **xv,
6492 struct buffer_head **ret_bh,
6493 void *para)
6495 struct ocfs2_reflink_xattr_tree_args *args =
6496 (struct ocfs2_reflink_xattr_tree_args *)para;
6497 struct ocfs2_xattr_bucket *bucket;
6499 if (bh == args->old_bucket->bu_bhs[0])
6500 bucket = args->old_bucket;
6501 else
6502 bucket = args->new_bucket;
6504 return ocfs2_get_xattr_tree_value_root(sb, bucket, offset,
6505 xv, ret_bh);
6508 struct ocfs2_value_tree_metas {
6509 int num_metas;
6510 int credits;
6511 int num_recs;
6514 static int ocfs2_value_tree_metas_in_bucket(struct super_block *sb,
6515 struct buffer_head *bh,
6516 struct ocfs2_xattr_header *xh,
6517 int offset,
6518 struct ocfs2_xattr_value_root **xv,
6519 struct buffer_head **ret_bh,
6520 void *para)
6522 struct ocfs2_xattr_bucket *bucket =
6523 (struct ocfs2_xattr_bucket *)para;
6525 return ocfs2_get_xattr_tree_value_root(sb, bucket, offset,
6526 xv, ret_bh);
6529 static int ocfs2_calc_value_tree_metas(struct inode *inode,
6530 struct ocfs2_xattr_bucket *bucket,
6531 void *para)
6533 struct ocfs2_value_tree_metas *metas =
6534 (struct ocfs2_value_tree_metas *)para;
6535 struct ocfs2_xattr_header *xh =
6536 (struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data;
6538 /* Add the credits for this bucket first. */
6539 metas->credits += bucket->bu_blocks;
6540 return ocfs2_value_metas_in_xattr_header(inode->i_sb, bucket->bu_bhs[0],
6541 xh, &metas->num_metas,
6542 &metas->credits, &metas->num_recs,
6543 ocfs2_value_tree_metas_in_bucket,
6544 bucket);
6548 * Given a xattr extent rec starting from blkno and having len clusters,
6549 * iterate all the buckets calculate how much metadata we need for reflinking
6550 * all the ocfs2_xattr_value_root and lock the allocators accordingly.
6552 static int ocfs2_lock_reflink_xattr_rec_allocators(
6553 struct ocfs2_reflink_xattr_tree_args *args,
6554 struct ocfs2_extent_tree *xt_et,
6555 u64 blkno, u32 len, int *credits,
6556 struct ocfs2_alloc_context **meta_ac,
6557 struct ocfs2_alloc_context **data_ac)
6559 int ret, num_free_extents;
6560 struct ocfs2_value_tree_metas metas;
6561 struct ocfs2_super *osb = OCFS2_SB(args->reflink->old_inode->i_sb);
6562 struct ocfs2_refcount_block *rb;
6564 memset(&metas, 0, sizeof(metas));
6566 ret = ocfs2_iterate_xattr_buckets(args->reflink->old_inode, blkno, len,
6567 ocfs2_calc_value_tree_metas, &metas);
6568 if (ret) {
6569 mlog_errno(ret);
6570 goto out;
6573 *credits = metas.credits;
6576 * Calculate we need for refcount tree change.
6578 * We need to add/modify num_recs in refcount tree, so just calculate
6579 * an approximate number we need for refcount tree change.
6580 * Sometimes we need to split the tree, and after split, half recs
6581 * will be moved to the new block, and a new block can only provide
6582 * half number of recs. So we multiple new blocks by 2.
6583 * In the end, we have to add credits for modifying the already
6584 * existed refcount block.
6586 rb = (struct ocfs2_refcount_block *)args->reflink->ref_root_bh->b_data;
6587 metas.num_recs =
6588 (metas.num_recs + ocfs2_refcount_recs_per_rb(osb->sb) - 1) /
6589 ocfs2_refcount_recs_per_rb(osb->sb) * 2;
6590 metas.num_metas += metas.num_recs;
6591 *credits += metas.num_recs +
6592 metas.num_recs * OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
6593 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
6594 *credits += le16_to_cpu(rb->rf_list.l_tree_depth) *
6595 le16_to_cpu(rb->rf_list.l_next_free_rec) + 1;
6596 else
6597 *credits += 1;
6599 /* count in the xattr tree change. */
6600 num_free_extents = ocfs2_num_free_extents(osb, xt_et);
6601 if (num_free_extents < 0) {
6602 ret = num_free_extents;
6603 mlog_errno(ret);
6604 goto out;
6607 if (num_free_extents < len)
6608 metas.num_metas += ocfs2_extend_meta_needed(xt_et->et_root_el);
6610 *credits += ocfs2_calc_extend_credits(osb->sb,
6611 xt_et->et_root_el, len);
6613 if (metas.num_metas) {
6614 ret = ocfs2_reserve_new_metadata_blocks(osb, metas.num_metas,
6615 meta_ac);
6616 if (ret) {
6617 mlog_errno(ret);
6618 goto out;
6622 if (len) {
6623 ret = ocfs2_reserve_clusters(osb, len, data_ac);
6624 if (ret)
6625 mlog_errno(ret);
6627 out:
6628 if (ret) {
6629 if (*meta_ac) {
6630 ocfs2_free_alloc_context(*meta_ac);
6631 meta_ac = NULL;
6635 return ret;
6638 static int ocfs2_reflink_xattr_buckets(handle_t *handle,
6639 u64 blkno, u64 new_blkno, u32 clusters,
6640 struct ocfs2_alloc_context *meta_ac,
6641 struct ocfs2_alloc_context *data_ac,
6642 struct ocfs2_reflink_xattr_tree_args *args)
6644 int i, j, ret = 0;
6645 struct super_block *sb = args->reflink->old_inode->i_sb;
6646 u32 bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(sb));
6647 u32 num_buckets = clusters * bpc;
6648 int bpb = args->old_bucket->bu_blocks;
6649 struct ocfs2_xattr_value_buf vb = {
6650 .vb_access = ocfs2_journal_access,
6653 for (i = 0; i < num_buckets; i++, blkno += bpb, new_blkno += bpb) {
6654 ret = ocfs2_read_xattr_bucket(args->old_bucket, blkno);
6655 if (ret) {
6656 mlog_errno(ret);
6657 break;
6660 ret = ocfs2_init_xattr_bucket(args->new_bucket, new_blkno);
6661 if (ret) {
6662 mlog_errno(ret);
6663 break;
6667 * The real bucket num in this series of blocks is stored
6668 * in the 1st bucket.
6670 if (i == 0)
6671 num_buckets = le16_to_cpu(
6672 bucket_xh(args->old_bucket)->xh_num_buckets);
6674 ret = ocfs2_xattr_bucket_journal_access(handle,
6675 args->new_bucket,
6676 OCFS2_JOURNAL_ACCESS_CREATE);
6677 if (ret) {
6678 mlog_errno(ret);
6679 break;
6682 for (j = 0; j < bpb; j++)
6683 memcpy(bucket_block(args->new_bucket, j),
6684 bucket_block(args->old_bucket, j),
6685 sb->s_blocksize);
6687 ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
6689 ret = ocfs2_reflink_xattr_header(handle, args->reflink,
6690 args->old_bucket->bu_bhs[0],
6691 bucket_xh(args->old_bucket),
6692 args->new_bucket->bu_bhs[0],
6693 bucket_xh(args->new_bucket),
6694 &vb, meta_ac,
6695 ocfs2_get_reflink_xattr_value_root,
6696 args);
6697 if (ret) {
6698 mlog_errno(ret);
6699 break;
6703 * Re-access and dirty the bucket to calculate metaecc.
6704 * Because we may extend the transaction in reflink_xattr_header
6705 * which will let the already accessed block gone.
6707 ret = ocfs2_xattr_bucket_journal_access(handle,
6708 args->new_bucket,
6709 OCFS2_JOURNAL_ACCESS_WRITE);
6710 if (ret) {
6711 mlog_errno(ret);
6712 break;
6715 ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
6716 ocfs2_xattr_bucket_relse(args->old_bucket);
6717 ocfs2_xattr_bucket_relse(args->new_bucket);
6720 ocfs2_xattr_bucket_relse(args->old_bucket);
6721 ocfs2_xattr_bucket_relse(args->new_bucket);
6722 return ret;
6725 * Create the same xattr extent record in the new inode's xattr tree.
6727 static int ocfs2_reflink_xattr_rec(struct inode *inode,
6728 struct buffer_head *root_bh,
6729 u64 blkno,
6730 u32 cpos,
6731 u32 len,
6732 void *para)
6734 int ret, credits = 0;
6735 u32 p_cluster, num_clusters;
6736 u64 new_blkno;
6737 handle_t *handle;
6738 struct ocfs2_reflink_xattr_tree_args *args =
6739 (struct ocfs2_reflink_xattr_tree_args *)para;
6740 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
6741 struct ocfs2_alloc_context *meta_ac = NULL;
6742 struct ocfs2_alloc_context *data_ac = NULL;
6743 struct ocfs2_extent_tree et;
6745 ocfs2_init_xattr_tree_extent_tree(&et,
6746 INODE_CACHE(args->reflink->new_inode),
6747 args->new_blk_bh);
6749 ret = ocfs2_lock_reflink_xattr_rec_allocators(args, &et, blkno,
6750 len, &credits,
6751 &meta_ac, &data_ac);
6752 if (ret) {
6753 mlog_errno(ret);
6754 goto out;
6757 handle = ocfs2_start_trans(osb, credits);
6758 if (IS_ERR(handle)) {
6759 ret = PTR_ERR(handle);
6760 mlog_errno(ret);
6761 goto out;
6764 ret = ocfs2_claim_clusters(osb, handle, data_ac,
6765 len, &p_cluster, &num_clusters);
6766 if (ret) {
6767 mlog_errno(ret);
6768 goto out_commit;
6771 new_blkno = ocfs2_clusters_to_blocks(osb->sb, p_cluster);
6773 mlog(0, "reflink xattr buckets %llu to %llu, len %u\n",
6774 (unsigned long long)blkno, (unsigned long long)new_blkno, len);
6775 ret = ocfs2_reflink_xattr_buckets(handle, blkno, new_blkno, len,
6776 meta_ac, data_ac, args);
6777 if (ret) {
6778 mlog_errno(ret);
6779 goto out_commit;
6782 mlog(0, "insert new xattr extent rec start %llu len %u to %u\n",
6783 (unsigned long long)new_blkno, len, cpos);
6784 ret = ocfs2_insert_extent(handle, &et, cpos, new_blkno,
6785 len, 0, meta_ac);
6786 if (ret)
6787 mlog_errno(ret);
6789 out_commit:
6790 ocfs2_commit_trans(osb, handle);
6792 out:
6793 if (meta_ac)
6794 ocfs2_free_alloc_context(meta_ac);
6795 if (data_ac)
6796 ocfs2_free_alloc_context(data_ac);
6797 return ret;
6801 * Create reflinked xattr buckets.
6802 * We will add bucket one by one, and refcount all the xattrs in the bucket
6803 * if they are stored outside.
6805 static int ocfs2_reflink_xattr_tree(struct ocfs2_xattr_reflink *args,
6806 struct buffer_head *blk_bh,
6807 struct buffer_head *new_blk_bh)
6809 int ret;
6810 struct ocfs2_reflink_xattr_tree_args para;
6812 memset(&para, 0, sizeof(para));
6813 para.reflink = args;
6814 para.old_blk_bh = blk_bh;
6815 para.new_blk_bh = new_blk_bh;
6817 para.old_bucket = ocfs2_xattr_bucket_new(args->old_inode);
6818 if (!para.old_bucket) {
6819 mlog_errno(-ENOMEM);
6820 return -ENOMEM;
6823 para.new_bucket = ocfs2_xattr_bucket_new(args->new_inode);
6824 if (!para.new_bucket) {
6825 ret = -ENOMEM;
6826 mlog_errno(ret);
6827 goto out;
6830 ret = ocfs2_iterate_xattr_index_block(args->old_inode, blk_bh,
6831 ocfs2_reflink_xattr_rec,
6832 &para);
6833 if (ret)
6834 mlog_errno(ret);
6836 out:
6837 ocfs2_xattr_bucket_free(para.old_bucket);
6838 ocfs2_xattr_bucket_free(para.new_bucket);
6839 return ret;
6842 static int ocfs2_reflink_xattr_in_block(struct ocfs2_xattr_reflink *args,
6843 struct buffer_head *blk_bh)
6845 int ret, indexed = 0;
6846 struct buffer_head *new_blk_bh = NULL;
6847 struct ocfs2_xattr_block *xb =
6848 (struct ocfs2_xattr_block *)blk_bh->b_data;
6851 if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)
6852 indexed = 1;
6854 ret = ocfs2_create_empty_xattr_block(args->new_inode, args->new_bh,
6855 &new_blk_bh, indexed);
6856 if (ret) {
6857 mlog_errno(ret);
6858 goto out;
6861 if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED))
6862 ret = ocfs2_reflink_xattr_block(args, blk_bh, new_blk_bh);
6863 else
6864 ret = ocfs2_reflink_xattr_tree(args, blk_bh, new_blk_bh);
6865 if (ret)
6866 mlog_errno(ret);
6868 out:
6869 brelse(new_blk_bh);
6870 return ret;
6873 static int ocfs2_reflink_xattr_no_security(struct ocfs2_xattr_entry *xe)
6875 int type = ocfs2_xattr_get_type(xe);
6877 return type != OCFS2_XATTR_INDEX_SECURITY &&
6878 type != OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS &&
6879 type != OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT;
6882 int ocfs2_reflink_xattrs(struct inode *old_inode,
6883 struct buffer_head *old_bh,
6884 struct inode *new_inode,
6885 struct buffer_head *new_bh,
6886 bool preserve_security)
6888 int ret;
6889 struct ocfs2_xattr_reflink args;
6890 struct ocfs2_inode_info *oi = OCFS2_I(old_inode);
6891 struct ocfs2_dinode *di = (struct ocfs2_dinode *)old_bh->b_data;
6892 struct buffer_head *blk_bh = NULL;
6893 struct ocfs2_cached_dealloc_ctxt dealloc;
6894 struct ocfs2_refcount_tree *ref_tree;
6895 struct buffer_head *ref_root_bh = NULL;
6897 ret = ocfs2_lock_refcount_tree(OCFS2_SB(old_inode->i_sb),
6898 le64_to_cpu(di->i_refcount_loc),
6899 1, &ref_tree, &ref_root_bh);
6900 if (ret) {
6901 mlog_errno(ret);
6902 goto out;
6905 ocfs2_init_dealloc_ctxt(&dealloc);
6907 args.old_inode = old_inode;
6908 args.new_inode = new_inode;
6909 args.old_bh = old_bh;
6910 args.new_bh = new_bh;
6911 args.ref_ci = &ref_tree->rf_ci;
6912 args.ref_root_bh = ref_root_bh;
6913 args.dealloc = &dealloc;
6914 if (preserve_security)
6915 args.xattr_reflinked = NULL;
6916 else
6917 args.xattr_reflinked = ocfs2_reflink_xattr_no_security;
6919 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
6920 ret = ocfs2_reflink_xattr_inline(&args);
6921 if (ret) {
6922 mlog_errno(ret);
6923 goto out_unlock;
6927 if (!di->i_xattr_loc)
6928 goto out_unlock;
6930 ret = ocfs2_read_xattr_block(old_inode, le64_to_cpu(di->i_xattr_loc),
6931 &blk_bh);
6932 if (ret < 0) {
6933 mlog_errno(ret);
6934 goto out_unlock;
6937 ret = ocfs2_reflink_xattr_in_block(&args, blk_bh);
6938 if (ret)
6939 mlog_errno(ret);
6941 brelse(blk_bh);
6943 out_unlock:
6944 ocfs2_unlock_refcount_tree(OCFS2_SB(old_inode->i_sb),
6945 ref_tree, 1);
6946 brelse(ref_root_bh);
6948 if (ocfs2_dealloc_has_cluster(&dealloc)) {
6949 ocfs2_schedule_truncate_log_flush(OCFS2_SB(old_inode->i_sb), 1);
6950 ocfs2_run_deallocs(OCFS2_SB(old_inode->i_sb), &dealloc);
6953 out:
6954 return ret;
6958 * Initialize security and acl for a already created inode.
6959 * Used for reflink a non-preserve-security file.
6961 * It uses common api like ocfs2_xattr_set, so the caller
6962 * must not hold any lock expect i_mutex.
6964 int ocfs2_init_security_and_acl(struct inode *dir,
6965 struct inode *inode)
6967 int ret = 0;
6968 struct buffer_head *dir_bh = NULL;
6969 struct ocfs2_security_xattr_info si = {
6970 .enable = 1,
6973 ret = ocfs2_init_security_get(inode, dir, &si);
6974 if (!ret) {
6975 ret = ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
6976 si.name, si.value, si.value_len,
6977 XATTR_CREATE);
6978 if (ret) {
6979 mlog_errno(ret);
6980 goto leave;
6982 } else if (ret != -EOPNOTSUPP) {
6983 mlog_errno(ret);
6984 goto leave;
6987 ret = ocfs2_inode_lock(dir, &dir_bh, 0);
6988 if (ret) {
6989 mlog_errno(ret);
6990 goto leave;
6993 ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL);
6994 if (ret)
6995 mlog_errno(ret);
6997 ocfs2_inode_unlock(dir, 0);
6998 brelse(dir_bh);
6999 leave:
7000 return ret;
7003 * 'security' attributes support
7005 static size_t ocfs2_xattr_security_list(struct dentry *dentry, char *list,
7006 size_t list_size, const char *name,
7007 size_t name_len, int type)
7009 const size_t prefix_len = XATTR_SECURITY_PREFIX_LEN;
7010 const size_t total_len = prefix_len + name_len + 1;
7012 if (list && total_len <= list_size) {
7013 memcpy(list, XATTR_SECURITY_PREFIX, prefix_len);
7014 memcpy(list + prefix_len, name, name_len);
7015 list[prefix_len + name_len] = '\0';
7017 return total_len;
7020 static int ocfs2_xattr_security_get(struct dentry *dentry, const char *name,
7021 void *buffer, size_t size, int type)
7023 if (strcmp(name, "") == 0)
7024 return -EINVAL;
7025 return ocfs2_xattr_get(dentry->d_inode, OCFS2_XATTR_INDEX_SECURITY,
7026 name, buffer, size);
7029 static int ocfs2_xattr_security_set(struct dentry *dentry, const char *name,
7030 const void *value, size_t size, int flags, int type)
7032 if (strcmp(name, "") == 0)
7033 return -EINVAL;
7035 return ocfs2_xattr_set(dentry->d_inode, OCFS2_XATTR_INDEX_SECURITY,
7036 name, value, size, flags);
7039 int ocfs2_init_security_get(struct inode *inode,
7040 struct inode *dir,
7041 struct ocfs2_security_xattr_info *si)
7043 /* check whether ocfs2 support feature xattr */
7044 if (!ocfs2_supports_xattr(OCFS2_SB(dir->i_sb)))
7045 return -EOPNOTSUPP;
7046 return security_inode_init_security(inode, dir, &si->name, &si->value,
7047 &si->value_len);
7050 int ocfs2_init_security_set(handle_t *handle,
7051 struct inode *inode,
7052 struct buffer_head *di_bh,
7053 struct ocfs2_security_xattr_info *si,
7054 struct ocfs2_alloc_context *xattr_ac,
7055 struct ocfs2_alloc_context *data_ac)
7057 return ocfs2_xattr_set_handle(handle, inode, di_bh,
7058 OCFS2_XATTR_INDEX_SECURITY,
7059 si->name, si->value, si->value_len, 0,
7060 xattr_ac, data_ac);
7063 struct xattr_handler ocfs2_xattr_security_handler = {
7064 .prefix = XATTR_SECURITY_PREFIX,
7065 .list = ocfs2_xattr_security_list,
7066 .get = ocfs2_xattr_security_get,
7067 .set = ocfs2_xattr_security_set,
7071 * 'trusted' attributes support
7073 static size_t ocfs2_xattr_trusted_list(struct dentry *dentry, char *list,
7074 size_t list_size, const char *name,
7075 size_t name_len, int type)
7077 const size_t prefix_len = XATTR_TRUSTED_PREFIX_LEN;
7078 const size_t total_len = prefix_len + name_len + 1;
7080 if (list && total_len <= list_size) {
7081 memcpy(list, XATTR_TRUSTED_PREFIX, prefix_len);
7082 memcpy(list + prefix_len, name, name_len);
7083 list[prefix_len + name_len] = '\0';
7085 return total_len;
7088 static int ocfs2_xattr_trusted_get(struct dentry *dentry, const char *name,
7089 void *buffer, size_t size, int type)
7091 if (strcmp(name, "") == 0)
7092 return -EINVAL;
7093 return ocfs2_xattr_get(dentry->d_inode, OCFS2_XATTR_INDEX_TRUSTED,
7094 name, buffer, size);
7097 static int ocfs2_xattr_trusted_set(struct dentry *dentry, const char *name,
7098 const void *value, size_t size, int flags, int type)
7100 if (strcmp(name, "") == 0)
7101 return -EINVAL;
7103 return ocfs2_xattr_set(dentry->d_inode, OCFS2_XATTR_INDEX_TRUSTED,
7104 name, value, size, flags);
7107 struct xattr_handler ocfs2_xattr_trusted_handler = {
7108 .prefix = XATTR_TRUSTED_PREFIX,
7109 .list = ocfs2_xattr_trusted_list,
7110 .get = ocfs2_xattr_trusted_get,
7111 .set = ocfs2_xattr_trusted_set,
7115 * 'user' attributes support
7117 static size_t ocfs2_xattr_user_list(struct dentry *dentry, char *list,
7118 size_t list_size, const char *name,
7119 size_t name_len, int type)
7121 const size_t prefix_len = XATTR_USER_PREFIX_LEN;
7122 const size_t total_len = prefix_len + name_len + 1;
7123 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
7125 if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
7126 return 0;
7128 if (list && total_len <= list_size) {
7129 memcpy(list, XATTR_USER_PREFIX, prefix_len);
7130 memcpy(list + prefix_len, name, name_len);
7131 list[prefix_len + name_len] = '\0';
7133 return total_len;
7136 static int ocfs2_xattr_user_get(struct dentry *dentry, const char *name,
7137 void *buffer, size_t size, int type)
7139 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
7141 if (strcmp(name, "") == 0)
7142 return -EINVAL;
7143 if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
7144 return -EOPNOTSUPP;
7145 return ocfs2_xattr_get(dentry->d_inode, OCFS2_XATTR_INDEX_USER, name,
7146 buffer, size);
7149 static int ocfs2_xattr_user_set(struct dentry *dentry, const char *name,
7150 const void *value, size_t size, int flags, int type)
7152 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
7154 if (strcmp(name, "") == 0)
7155 return -EINVAL;
7156 if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
7157 return -EOPNOTSUPP;
7159 return ocfs2_xattr_set(dentry->d_inode, OCFS2_XATTR_INDEX_USER,
7160 name, value, size, flags);
7163 struct xattr_handler ocfs2_xattr_user_handler = {
7164 .prefix = XATTR_USER_PREFIX,
7165 .list = ocfs2_xattr_user_list,
7166 .get = ocfs2_xattr_user_get,
7167 .set = ocfs2_xattr_user_set,