2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/xattr.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <asm/uaccess.h>
32 * ea_calc_size - returns the acutal number of bytes the request will take up
33 * (not counting any unstuffed data blocks)
38 * Returns: 1 if the EA should be stuffed
41 static int ea_calc_size(struct gfs2_sbd
*sdp
, struct gfs2_ea_request
*er
,
44 *size
= GFS2_EAREQ_SIZE_STUFFED(er
);
45 if (*size
<= sdp
->sd_jbsize
)
48 *size
= GFS2_EAREQ_SIZE_UNSTUFFED(sdp
, er
);
53 static int ea_check_size(struct gfs2_sbd
*sdp
, struct gfs2_ea_request
*er
)
57 if (er
->er_data_len
> GFS2_EA_MAX_DATA_LEN
)
60 ea_calc_size(sdp
, er
, &size
);
62 /* This can only happen with 512 byte blocks */
63 if (size
> sdp
->sd_jbsize
)
69 typedef int (*ea_call_t
) (struct gfs2_inode
*ip
, struct buffer_head
*bh
,
70 struct gfs2_ea_header
*ea
,
71 struct gfs2_ea_header
*prev
, void *private);
73 static int ea_foreach_i(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
74 ea_call_t ea_call
, void *data
)
76 struct gfs2_ea_header
*ea
, *prev
= NULL
;
79 if (gfs2_metatype_check(GFS2_SB(&ip
->i_inode
), bh
, GFS2_METATYPE_EA
))
82 for (ea
= GFS2_EA_BH2FIRST(bh
);; prev
= ea
, ea
= GFS2_EA2NEXT(ea
)) {
83 if (!GFS2_EA_REC_LEN(ea
))
85 if (!(bh
->b_data
<= (char *)ea
&& (char *)GFS2_EA2NEXT(ea
) <=
86 bh
->b_data
+ bh
->b_size
))
88 if (!GFS2_EATYPE_VALID(ea
->ea_type
))
91 error
= ea_call(ip
, bh
, ea
, prev
, data
);
95 if (GFS2_EA_IS_LAST(ea
)) {
96 if ((char *)GFS2_EA2NEXT(ea
) !=
97 bh
->b_data
+ bh
->b_size
)
106 gfs2_consist_inode(ip
);
110 static int ea_foreach(struct gfs2_inode
*ip
, ea_call_t ea_call
, void *data
)
112 struct buffer_head
*bh
, *eabh
;
116 error
= gfs2_meta_read(ip
->i_gl
, ip
->i_eattr
, DIO_WAIT
, &bh
);
120 if (!(ip
->i_diskflags
& GFS2_DIF_EA_INDIRECT
)) {
121 error
= ea_foreach_i(ip
, bh
, ea_call
, data
);
125 if (gfs2_metatype_check(GFS2_SB(&ip
->i_inode
), bh
, GFS2_METATYPE_IN
)) {
130 eablk
= (__be64
*)(bh
->b_data
+ sizeof(struct gfs2_meta_header
));
131 end
= eablk
+ GFS2_SB(&ip
->i_inode
)->sd_inptrs
;
133 for (; eablk
< end
; eablk
++) {
138 bn
= be64_to_cpu(*eablk
);
140 error
= gfs2_meta_read(ip
->i_gl
, bn
, DIO_WAIT
, &eabh
);
143 error
= ea_foreach_i(ip
, eabh
, ea_call
, data
);
154 struct gfs2_ea_request
*ef_er
;
155 struct gfs2_ea_location
*ef_el
;
158 static int ea_find_i(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
159 struct gfs2_ea_header
*ea
, struct gfs2_ea_header
*prev
,
162 struct ea_find
*ef
= private;
163 struct gfs2_ea_request
*er
= ef
->ef_er
;
165 if (ea
->ea_type
== GFS2_EATYPE_UNUSED
)
168 if (ea
->ea_type
== er
->er_type
) {
169 if (ea
->ea_name_len
== er
->er_name_len
&&
170 !memcmp(GFS2_EA2NAME(ea
), er
->er_name
, ea
->ea_name_len
)) {
171 struct gfs2_ea_location
*el
= ef
->ef_el
;
183 int gfs2_ea_find(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
,
184 struct gfs2_ea_location
*el
)
192 memset(el
, 0, sizeof(struct gfs2_ea_location
));
194 error
= ea_foreach(ip
, ea_find_i
, &ef
);
202 * ea_dealloc_unstuffed -
209 * Take advantage of the fact that all unstuffed blocks are
210 * allocated from the same RG. But watch, this may not always
216 static int ea_dealloc_unstuffed(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
217 struct gfs2_ea_header
*ea
,
218 struct gfs2_ea_header
*prev
, void *private)
220 int *leave
= private;
221 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
222 struct gfs2_rgrpd
*rgd
;
223 struct gfs2_holder rg_gh
;
224 struct buffer_head
*dibh
;
228 unsigned int blen
= 0;
229 unsigned int blks
= 0;
233 if (GFS2_EA_IS_STUFFED(ea
))
236 dataptrs
= GFS2_EA2DATAPTRS(ea
);
237 for (x
= 0; x
< ea
->ea_num_ptrs
; x
++, dataptrs
++) {
240 bn
= be64_to_cpu(*dataptrs
);
246 rgd
= gfs2_blk2rgrpd(sdp
, bn
);
248 gfs2_consist_inode(ip
);
252 error
= gfs2_glock_nq_init(rgd
->rd_gl
, LM_ST_EXCLUSIVE
, 0, &rg_gh
);
256 error
= gfs2_trans_begin(sdp
, rgd
->rd_length
+ RES_DINODE
+
257 RES_EATTR
+ RES_STATFS
+ RES_QUOTA
, blks
);
261 gfs2_trans_add_bh(ip
->i_gl
, bh
, 1);
263 dataptrs
= GFS2_EA2DATAPTRS(ea
);
264 for (x
= 0; x
< ea
->ea_num_ptrs
; x
++, dataptrs
++) {
267 bn
= be64_to_cpu(*dataptrs
);
269 if (bstart
+ blen
== bn
)
273 gfs2_free_meta(ip
, bstart
, blen
);
279 gfs2_add_inode_blocks(&ip
->i_inode
, -1);
282 gfs2_free_meta(ip
, bstart
, blen
);
284 if (prev
&& !leave
) {
287 len
= GFS2_EA_REC_LEN(prev
) + GFS2_EA_REC_LEN(ea
);
288 prev
->ea_rec_len
= cpu_to_be32(len
);
290 if (GFS2_EA_IS_LAST(ea
))
291 prev
->ea_flags
|= GFS2_EAFLAG_LAST
;
293 ea
->ea_type
= GFS2_EATYPE_UNUSED
;
297 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
299 ip
->i_inode
.i_ctime
= CURRENT_TIME
;
300 gfs2_trans_add_bh(ip
->i_gl
, dibh
, 1);
301 gfs2_dinode_out(ip
, dibh
->b_data
);
308 gfs2_glock_dq_uninit(&rg_gh
);
312 static int ea_remove_unstuffed(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
313 struct gfs2_ea_header
*ea
,
314 struct gfs2_ea_header
*prev
, int leave
)
316 struct gfs2_alloc
*al
;
319 al
= gfs2_alloc_get(ip
);
323 error
= gfs2_quota_hold(ip
, NO_QUOTA_CHANGE
, NO_QUOTA_CHANGE
);
327 error
= gfs2_rindex_hold(GFS2_SB(&ip
->i_inode
), &al
->al_ri_gh
);
331 error
= ea_dealloc_unstuffed(ip
, bh
, ea
, prev
, (leave
) ? &error
: NULL
);
333 gfs2_glock_dq_uninit(&al
->al_ri_gh
);
336 gfs2_quota_unhold(ip
);
343 struct gfs2_ea_request
*ei_er
;
344 unsigned int ei_size
;
347 static int ea_list_i(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
348 struct gfs2_ea_header
*ea
, struct gfs2_ea_header
*prev
,
351 struct ea_list
*ei
= private;
352 struct gfs2_ea_request
*er
= ei
->ei_er
;
353 unsigned int ea_size
= gfs2_ea_strlen(ea
);
355 if (ea
->ea_type
== GFS2_EATYPE_UNUSED
)
358 if (er
->er_data_len
) {
363 if (ei
->ei_size
+ ea_size
> er
->er_data_len
)
366 switch (ea
->ea_type
) {
367 case GFS2_EATYPE_USR
:
371 case GFS2_EATYPE_SYS
:
375 case GFS2_EATYPE_SECURITY
:
376 prefix
= "security.";
383 memcpy(er
->er_data
+ ei
->ei_size
, prefix
, l
);
384 memcpy(er
->er_data
+ ei
->ei_size
+ l
, GFS2_EA2NAME(ea
),
386 memcpy(er
->er_data
+ ei
->ei_size
+ ea_size
- 1, &c
, 1);
389 ei
->ei_size
+= ea_size
;
399 * Returns: actual size of data on success, -errno on error
402 int gfs2_ea_list(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
)
404 struct gfs2_holder i_gh
;
407 if (!er
->er_data
|| !er
->er_data_len
) {
412 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
, &i_gh
);
417 struct ea_list ei
= { .ei_er
= er
, .ei_size
= 0 };
419 error
= ea_foreach(ip
, ea_list_i
, &ei
);
424 gfs2_glock_dq_uninit(&i_gh
);
430 * ea_get_unstuffed - actually copies the unstuffed data into the
432 * @ip: The GFS2 inode
433 * @ea: The extended attribute header structure
434 * @data: The data to be copied
439 static int ea_get_unstuffed(struct gfs2_inode
*ip
, struct gfs2_ea_header
*ea
,
442 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
443 struct buffer_head
**bh
;
444 unsigned int amount
= GFS2_EA_DATA_LEN(ea
);
445 unsigned int nptrs
= DIV_ROUND_UP(amount
, sdp
->sd_jbsize
);
446 __be64
*dataptrs
= GFS2_EA2DATAPTRS(ea
);
450 bh
= kcalloc(nptrs
, sizeof(struct buffer_head
*), GFP_NOFS
);
454 for (x
= 0; x
< nptrs
; x
++) {
455 error
= gfs2_meta_read(ip
->i_gl
, be64_to_cpu(*dataptrs
), 0,
465 for (x
= 0; x
< nptrs
; x
++) {
466 error
= gfs2_meta_wait(sdp
, bh
[x
]);
468 for (; x
< nptrs
; x
++)
472 if (gfs2_metatype_check(sdp
, bh
[x
], GFS2_METATYPE_ED
)) {
473 for (; x
< nptrs
; x
++)
479 memcpy(data
, bh
[x
]->b_data
+ sizeof(struct gfs2_meta_header
),
480 (sdp
->sd_jbsize
> amount
) ? amount
: sdp
->sd_jbsize
);
482 amount
-= sdp
->sd_jbsize
;
483 data
+= sdp
->sd_jbsize
;
493 int gfs2_ea_get_copy(struct gfs2_inode
*ip
, struct gfs2_ea_location
*el
,
496 if (GFS2_EA_IS_STUFFED(el
->el_ea
)) {
497 memcpy(data
, GFS2_EA2DATA(el
->el_ea
), GFS2_EA_DATA_LEN(el
->el_ea
));
500 return ea_get_unstuffed(ip
, el
->el_ea
, data
);
505 * @ip: The GFS2 inode
506 * @er: The request structure
508 * Returns: actual size of data on success, -errno on error
511 int gfs2_ea_get_i(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
)
513 struct gfs2_ea_location el
;
519 error
= gfs2_ea_find(ip
, er
, &el
);
525 if (er
->er_data_len
) {
526 if (GFS2_EA_DATA_LEN(el
.el_ea
) > er
->er_data_len
)
529 error
= gfs2_ea_get_copy(ip
, &el
, er
->er_data
);
532 error
= GFS2_EA_DATA_LEN(el
.el_ea
);
541 * @ip: The GFS2 inode
542 * @er: The request structure
544 * Returns: actual size of data on success, -errno on error
547 int gfs2_ea_get(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
)
549 struct gfs2_holder i_gh
;
552 if (!er
->er_name_len
||
553 er
->er_name_len
> GFS2_EA_MAX_NAME_LEN
)
555 if (!er
->er_data
|| !er
->er_data_len
) {
560 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
, &i_gh
);
564 error
= gfs2_ea_ops
[er
->er_type
]->eo_get(ip
, er
);
566 gfs2_glock_dq_uninit(&i_gh
);
572 * ea_alloc_blk - allocates a new block for extended attributes.
573 * @ip: A pointer to the inode that's getting extended attributes
574 * @bhp: Pointer to pointer to a struct buffer_head
579 static int ea_alloc_blk(struct gfs2_inode
*ip
, struct buffer_head
**bhp
)
581 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
582 struct gfs2_ea_header
*ea
;
586 block
= gfs2_alloc_block(ip
, &n
);
587 gfs2_trans_add_unrevoke(sdp
, block
, 1);
588 *bhp
= gfs2_meta_new(ip
->i_gl
, block
);
589 gfs2_trans_add_bh(ip
->i_gl
, *bhp
, 1);
590 gfs2_metatype_set(*bhp
, GFS2_METATYPE_EA
, GFS2_FORMAT_EA
);
591 gfs2_buffer_clear_tail(*bhp
, sizeof(struct gfs2_meta_header
));
593 ea
= GFS2_EA_BH2FIRST(*bhp
);
594 ea
->ea_rec_len
= cpu_to_be32(sdp
->sd_jbsize
);
595 ea
->ea_type
= GFS2_EATYPE_UNUSED
;
596 ea
->ea_flags
= GFS2_EAFLAG_LAST
;
599 gfs2_add_inode_blocks(&ip
->i_inode
, 1);
605 * ea_write - writes the request info to an ea, creating new blocks if
607 * @ip: inode that is being modified
608 * @ea: the location of the new ea in a block
609 * @er: the write request
611 * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
616 static int ea_write(struct gfs2_inode
*ip
, struct gfs2_ea_header
*ea
,
617 struct gfs2_ea_request
*er
)
619 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
621 ea
->ea_data_len
= cpu_to_be32(er
->er_data_len
);
622 ea
->ea_name_len
= er
->er_name_len
;
623 ea
->ea_type
= er
->er_type
;
626 memcpy(GFS2_EA2NAME(ea
), er
->er_name
, er
->er_name_len
);
628 if (GFS2_EAREQ_SIZE_STUFFED(er
) <= sdp
->sd_jbsize
) {
630 memcpy(GFS2_EA2DATA(ea
), er
->er_data
, er
->er_data_len
);
632 __be64
*dataptr
= GFS2_EA2DATAPTRS(ea
);
633 const char *data
= er
->er_data
;
634 unsigned int data_len
= er
->er_data_len
;
638 ea
->ea_num_ptrs
= DIV_ROUND_UP(er
->er_data_len
, sdp
->sd_jbsize
);
639 for (x
= 0; x
< ea
->ea_num_ptrs
; x
++) {
640 struct buffer_head
*bh
;
642 int mh_size
= sizeof(struct gfs2_meta_header
);
645 block
= gfs2_alloc_block(ip
, &n
);
646 gfs2_trans_add_unrevoke(sdp
, block
, 1);
647 bh
= gfs2_meta_new(ip
->i_gl
, block
);
648 gfs2_trans_add_bh(ip
->i_gl
, bh
, 1);
649 gfs2_metatype_set(bh
, GFS2_METATYPE_ED
, GFS2_FORMAT_ED
);
651 gfs2_add_inode_blocks(&ip
->i_inode
, 1);
653 copy
= data_len
> sdp
->sd_jbsize
? sdp
->sd_jbsize
:
655 memcpy(bh
->b_data
+ mh_size
, data
, copy
);
656 if (copy
< sdp
->sd_jbsize
)
657 memset(bh
->b_data
+ mh_size
+ copy
, 0,
658 sdp
->sd_jbsize
- copy
);
660 *dataptr
++ = cpu_to_be64(bh
->b_blocknr
);
667 gfs2_assert_withdraw(sdp
, !data_len
);
673 typedef int (*ea_skeleton_call_t
) (struct gfs2_inode
*ip
,
674 struct gfs2_ea_request
*er
, void *private);
676 static int ea_alloc_skeleton(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
,
678 ea_skeleton_call_t skeleton_call
, void *private)
680 struct gfs2_alloc
*al
;
681 struct buffer_head
*dibh
;
684 al
= gfs2_alloc_get(ip
);
688 error
= gfs2_quota_lock_check(ip
);
692 al
->al_requested
= blks
;
694 error
= gfs2_inplace_reserve(ip
);
698 error
= gfs2_trans_begin(GFS2_SB(&ip
->i_inode
),
699 blks
+ al
->al_rgd
->rd_length
+
700 RES_DINODE
+ RES_STATFS
+ RES_QUOTA
, 0);
704 error
= skeleton_call(ip
, er
, private);
708 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
710 if (er
->er_flags
& GFS2_ERF_MODE
) {
711 gfs2_assert_withdraw(GFS2_SB(&ip
->i_inode
),
712 (ip
->i_inode
.i_mode
& S_IFMT
) ==
713 (er
->er_mode
& S_IFMT
));
714 ip
->i_inode
.i_mode
= er
->er_mode
;
716 ip
->i_inode
.i_ctime
= CURRENT_TIME
;
717 gfs2_trans_add_bh(ip
->i_gl
, dibh
, 1);
718 gfs2_dinode_out(ip
, dibh
->b_data
);
723 gfs2_trans_end(GFS2_SB(&ip
->i_inode
));
725 gfs2_inplace_release(ip
);
727 gfs2_quota_unlock(ip
);
733 static int ea_init_i(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
,
736 struct buffer_head
*bh
;
739 error
= ea_alloc_blk(ip
, &bh
);
743 ip
->i_eattr
= bh
->b_blocknr
;
744 error
= ea_write(ip
, GFS2_EA_BH2FIRST(bh
), er
);
752 * ea_init - initializes a new eattr block
759 static int ea_init(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
)
761 unsigned int jbsize
= GFS2_SB(&ip
->i_inode
)->sd_jbsize
;
762 unsigned int blks
= 1;
764 if (GFS2_EAREQ_SIZE_STUFFED(er
) > jbsize
)
765 blks
+= DIV_ROUND_UP(er
->er_data_len
, jbsize
);
767 return ea_alloc_skeleton(ip
, er
, blks
, ea_init_i
, NULL
);
770 static struct gfs2_ea_header
*ea_split_ea(struct gfs2_ea_header
*ea
)
772 u32 ea_size
= GFS2_EA_SIZE(ea
);
773 struct gfs2_ea_header
*new = (struct gfs2_ea_header
*)((char *)ea
+
775 u32 new_size
= GFS2_EA_REC_LEN(ea
) - ea_size
;
776 int last
= ea
->ea_flags
& GFS2_EAFLAG_LAST
;
778 ea
->ea_rec_len
= cpu_to_be32(ea_size
);
779 ea
->ea_flags
^= last
;
781 new->ea_rec_len
= cpu_to_be32(new_size
);
782 new->ea_flags
= last
;
787 static void ea_set_remove_stuffed(struct gfs2_inode
*ip
,
788 struct gfs2_ea_location
*el
)
790 struct gfs2_ea_header
*ea
= el
->el_ea
;
791 struct gfs2_ea_header
*prev
= el
->el_prev
;
794 gfs2_trans_add_bh(ip
->i_gl
, el
->el_bh
, 1);
796 if (!prev
|| !GFS2_EA_IS_STUFFED(ea
)) {
797 ea
->ea_type
= GFS2_EATYPE_UNUSED
;
799 } else if (GFS2_EA2NEXT(prev
) != ea
) {
800 prev
= GFS2_EA2NEXT(prev
);
801 gfs2_assert_withdraw(GFS2_SB(&ip
->i_inode
), GFS2_EA2NEXT(prev
) == ea
);
804 len
= GFS2_EA_REC_LEN(prev
) + GFS2_EA_REC_LEN(ea
);
805 prev
->ea_rec_len
= cpu_to_be32(len
);
807 if (GFS2_EA_IS_LAST(ea
))
808 prev
->ea_flags
|= GFS2_EAFLAG_LAST
;
814 struct gfs2_ea_request
*es_er
;
815 struct gfs2_ea_location
*es_el
;
817 struct buffer_head
*es_bh
;
818 struct gfs2_ea_header
*es_ea
;
821 static int ea_set_simple_noalloc(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
822 struct gfs2_ea_header
*ea
, struct ea_set
*es
)
824 struct gfs2_ea_request
*er
= es
->es_er
;
825 struct buffer_head
*dibh
;
828 error
= gfs2_trans_begin(GFS2_SB(&ip
->i_inode
), RES_DINODE
+ 2 * RES_EATTR
, 0);
832 gfs2_trans_add_bh(ip
->i_gl
, bh
, 1);
835 ea
= ea_split_ea(ea
);
837 ea_write(ip
, ea
, er
);
840 ea_set_remove_stuffed(ip
, es
->es_el
);
842 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
846 if (er
->er_flags
& GFS2_ERF_MODE
) {
847 gfs2_assert_withdraw(GFS2_SB(&ip
->i_inode
),
848 (ip
->i_inode
.i_mode
& S_IFMT
) == (er
->er_mode
& S_IFMT
));
849 ip
->i_inode
.i_mode
= er
->er_mode
;
851 ip
->i_inode
.i_ctime
= CURRENT_TIME
;
852 gfs2_trans_add_bh(ip
->i_gl
, dibh
, 1);
853 gfs2_dinode_out(ip
, dibh
->b_data
);
856 gfs2_trans_end(GFS2_SB(&ip
->i_inode
));
860 static int ea_set_simple_alloc(struct gfs2_inode
*ip
,
861 struct gfs2_ea_request
*er
, void *private)
863 struct ea_set
*es
= private;
864 struct gfs2_ea_header
*ea
= es
->es_ea
;
867 gfs2_trans_add_bh(ip
->i_gl
, es
->es_bh
, 1);
870 ea
= ea_split_ea(ea
);
872 error
= ea_write(ip
, ea
, er
);
877 ea_set_remove_stuffed(ip
, es
->es_el
);
882 static int ea_set_simple(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
883 struct gfs2_ea_header
*ea
, struct gfs2_ea_header
*prev
,
886 struct ea_set
*es
= private;
891 stuffed
= ea_calc_size(GFS2_SB(&ip
->i_inode
), es
->es_er
, &size
);
893 if (ea
->ea_type
== GFS2_EATYPE_UNUSED
) {
894 if (GFS2_EA_REC_LEN(ea
) < size
)
896 if (!GFS2_EA_IS_STUFFED(ea
)) {
897 error
= ea_remove_unstuffed(ip
, bh
, ea
, prev
, 1);
902 } else if (GFS2_EA_REC_LEN(ea
) - GFS2_EA_SIZE(ea
) >= size
)
908 error
= ea_set_simple_noalloc(ip
, bh
, ea
, es
);
916 blks
= 2 + DIV_ROUND_UP(es
->es_er
->er_data_len
,
917 GFS2_SB(&ip
->i_inode
)->sd_jbsize
);
919 error
= ea_alloc_skeleton(ip
, es
->es_er
, blks
,
920 ea_set_simple_alloc
, es
);
928 static int ea_set_block(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
,
931 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
932 struct buffer_head
*indbh
, *newbh
;
935 int mh_size
= sizeof(struct gfs2_meta_header
);
937 if (ip
->i_diskflags
& GFS2_DIF_EA_INDIRECT
) {
940 error
= gfs2_meta_read(ip
->i_gl
, ip
->i_eattr
, DIO_WAIT
,
945 if (gfs2_metatype_check(sdp
, indbh
, GFS2_METATYPE_IN
)) {
950 eablk
= (__be64
*)(indbh
->b_data
+ mh_size
);
951 end
= eablk
+ sdp
->sd_inptrs
;
953 for (; eablk
< end
; eablk
++)
962 gfs2_trans_add_bh(ip
->i_gl
, indbh
, 1);
966 blk
= gfs2_alloc_block(ip
, &n
);
967 gfs2_trans_add_unrevoke(sdp
, blk
, 1);
968 indbh
= gfs2_meta_new(ip
->i_gl
, blk
);
969 gfs2_trans_add_bh(ip
->i_gl
, indbh
, 1);
970 gfs2_metatype_set(indbh
, GFS2_METATYPE_IN
, GFS2_FORMAT_IN
);
971 gfs2_buffer_clear_tail(indbh
, mh_size
);
973 eablk
= (__be64
*)(indbh
->b_data
+ mh_size
);
974 *eablk
= cpu_to_be64(ip
->i_eattr
);
976 ip
->i_diskflags
|= GFS2_DIF_EA_INDIRECT
;
977 gfs2_add_inode_blocks(&ip
->i_inode
, 1);
982 error
= ea_alloc_blk(ip
, &newbh
);
986 *eablk
= cpu_to_be64((u64
)newbh
->b_blocknr
);
987 error
= ea_write(ip
, GFS2_EA_BH2FIRST(newbh
), er
);
993 ea_set_remove_stuffed(ip
, private);
1000 static int ea_set_i(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
,
1001 struct gfs2_ea_location
*el
)
1004 unsigned int blks
= 2;
1007 memset(&es
, 0, sizeof(struct ea_set
));
1011 error
= ea_foreach(ip
, ea_set_simple
, &es
);
1017 if (!(ip
->i_diskflags
& GFS2_DIF_EA_INDIRECT
))
1019 if (GFS2_EAREQ_SIZE_STUFFED(er
) > GFS2_SB(&ip
->i_inode
)->sd_jbsize
)
1020 blks
+= DIV_ROUND_UP(er
->er_data_len
, GFS2_SB(&ip
->i_inode
)->sd_jbsize
);
1022 return ea_alloc_skeleton(ip
, er
, blks
, ea_set_block
, el
);
1025 static int ea_set_remove_unstuffed(struct gfs2_inode
*ip
,
1026 struct gfs2_ea_location
*el
)
1028 if (el
->el_prev
&& GFS2_EA2NEXT(el
->el_prev
) != el
->el_ea
) {
1029 el
->el_prev
= GFS2_EA2NEXT(el
->el_prev
);
1030 gfs2_assert_withdraw(GFS2_SB(&ip
->i_inode
),
1031 GFS2_EA2NEXT(el
->el_prev
) == el
->el_ea
);
1034 return ea_remove_unstuffed(ip
, el
->el_bh
, el
->el_ea
, el
->el_prev
,0);
1037 int gfs2_ea_set_i(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
)
1039 struct gfs2_ea_location el
;
1043 if (er
->er_flags
& XATTR_REPLACE
)
1045 return ea_init(ip
, er
);
1048 error
= gfs2_ea_find(ip
, er
, &el
);
1053 if (ip
->i_diskflags
& GFS2_DIF_APPENDONLY
) {
1059 if (!(er
->er_flags
& XATTR_CREATE
)) {
1060 int unstuffed
= !GFS2_EA_IS_STUFFED(el
.el_ea
);
1061 error
= ea_set_i(ip
, er
, &el
);
1062 if (!error
&& unstuffed
)
1063 ea_set_remove_unstuffed(ip
, &el
);
1069 if (!(er
->er_flags
& XATTR_REPLACE
))
1070 error
= ea_set_i(ip
, er
, NULL
);
1076 int gfs2_ea_set(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
)
1078 struct gfs2_holder i_gh
;
1081 if (!er
->er_name_len
|| er
->er_name_len
> GFS2_EA_MAX_NAME_LEN
)
1083 if (!er
->er_data
|| !er
->er_data_len
) {
1085 er
->er_data_len
= 0;
1087 error
= ea_check_size(GFS2_SB(&ip
->i_inode
), er
);
1091 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &i_gh
);
1095 if (IS_IMMUTABLE(&ip
->i_inode
))
1098 error
= gfs2_ea_ops
[er
->er_type
]->eo_set(ip
, er
);
1100 gfs2_glock_dq_uninit(&i_gh
);
1105 static int ea_remove_stuffed(struct gfs2_inode
*ip
, struct gfs2_ea_location
*el
)
1107 struct gfs2_ea_header
*ea
= el
->el_ea
;
1108 struct gfs2_ea_header
*prev
= el
->el_prev
;
1109 struct buffer_head
*dibh
;
1112 error
= gfs2_trans_begin(GFS2_SB(&ip
->i_inode
), RES_DINODE
+ RES_EATTR
, 0);
1116 gfs2_trans_add_bh(ip
->i_gl
, el
->el_bh
, 1);
1121 len
= GFS2_EA_REC_LEN(prev
) + GFS2_EA_REC_LEN(ea
);
1122 prev
->ea_rec_len
= cpu_to_be32(len
);
1124 if (GFS2_EA_IS_LAST(ea
))
1125 prev
->ea_flags
|= GFS2_EAFLAG_LAST
;
1127 ea
->ea_type
= GFS2_EATYPE_UNUSED
;
1129 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
1131 ip
->i_inode
.i_ctime
= CURRENT_TIME
;
1132 gfs2_trans_add_bh(ip
->i_gl
, dibh
, 1);
1133 gfs2_dinode_out(ip
, dibh
->b_data
);
1137 gfs2_trans_end(GFS2_SB(&ip
->i_inode
));
1142 int gfs2_ea_remove_i(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
)
1144 struct gfs2_ea_location el
;
1150 error
= gfs2_ea_find(ip
, er
, &el
);
1156 if (GFS2_EA_IS_STUFFED(el
.el_ea
))
1157 error
= ea_remove_stuffed(ip
, &el
);
1159 error
= ea_remove_unstuffed(ip
, el
.el_bh
, el
.el_ea
, el
.el_prev
,
1168 * gfs2_ea_remove - sets (or creates or replaces) an extended attribute
1169 * @ip: pointer to the inode of the target file
1170 * @er: request information
1175 int gfs2_ea_remove(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
)
1177 struct gfs2_holder i_gh
;
1180 if (!er
->er_name_len
|| er
->er_name_len
> GFS2_EA_MAX_NAME_LEN
)
1183 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &i_gh
);
1187 if (IS_IMMUTABLE(&ip
->i_inode
) || IS_APPEND(&ip
->i_inode
))
1190 error
= gfs2_ea_ops
[er
->er_type
]->eo_remove(ip
, er
);
1192 gfs2_glock_dq_uninit(&i_gh
);
1197 static int ea_acl_chmod_unstuffed(struct gfs2_inode
*ip
,
1198 struct gfs2_ea_header
*ea
, char *data
)
1200 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1201 struct buffer_head
**bh
;
1202 unsigned int amount
= GFS2_EA_DATA_LEN(ea
);
1203 unsigned int nptrs
= DIV_ROUND_UP(amount
, sdp
->sd_jbsize
);
1204 __be64
*dataptrs
= GFS2_EA2DATAPTRS(ea
);
1208 bh
= kcalloc(nptrs
, sizeof(struct buffer_head
*), GFP_NOFS
);
1212 error
= gfs2_trans_begin(sdp
, nptrs
+ RES_DINODE
, 0);
1216 for (x
= 0; x
< nptrs
; x
++) {
1217 error
= gfs2_meta_read(ip
->i_gl
, be64_to_cpu(*dataptrs
), 0,
1227 for (x
= 0; x
< nptrs
; x
++) {
1228 error
= gfs2_meta_wait(sdp
, bh
[x
]);
1230 for (; x
< nptrs
; x
++)
1234 if (gfs2_metatype_check(sdp
, bh
[x
], GFS2_METATYPE_ED
)) {
1235 for (; x
< nptrs
; x
++)
1241 gfs2_trans_add_bh(ip
->i_gl
, bh
[x
], 1);
1243 memcpy(bh
[x
]->b_data
+ sizeof(struct gfs2_meta_header
), data
,
1244 (sdp
->sd_jbsize
> amount
) ? amount
: sdp
->sd_jbsize
);
1246 amount
-= sdp
->sd_jbsize
;
1247 data
+= sdp
->sd_jbsize
;
1257 gfs2_trans_end(sdp
);
1262 int gfs2_ea_acl_chmod(struct gfs2_inode
*ip
, struct gfs2_ea_location
*el
,
1263 struct iattr
*attr
, char *data
)
1265 struct buffer_head
*dibh
;
1268 if (GFS2_EA_IS_STUFFED(el
->el_ea
)) {
1269 error
= gfs2_trans_begin(GFS2_SB(&ip
->i_inode
), RES_DINODE
+ RES_EATTR
, 0);
1273 gfs2_trans_add_bh(ip
->i_gl
, el
->el_bh
, 1);
1274 memcpy(GFS2_EA2DATA(el
->el_ea
), data
,
1275 GFS2_EA_DATA_LEN(el
->el_ea
));
1277 error
= ea_acl_chmod_unstuffed(ip
, el
->el_ea
, data
);
1282 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
1284 error
= inode_setattr(&ip
->i_inode
, attr
);
1285 gfs2_assert_warn(GFS2_SB(&ip
->i_inode
), !error
);
1286 gfs2_trans_add_bh(ip
->i_gl
, dibh
, 1);
1287 gfs2_dinode_out(ip
, dibh
->b_data
);
1291 gfs2_trans_end(GFS2_SB(&ip
->i_inode
));
1296 static int ea_dealloc_indirect(struct gfs2_inode
*ip
)
1298 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1299 struct gfs2_rgrp_list rlist
;
1300 struct buffer_head
*indbh
, *dibh
;
1301 __be64
*eablk
, *end
;
1302 unsigned int rg_blocks
= 0;
1304 unsigned int blen
= 0;
1305 unsigned int blks
= 0;
1309 memset(&rlist
, 0, sizeof(struct gfs2_rgrp_list
));
1311 error
= gfs2_meta_read(ip
->i_gl
, ip
->i_eattr
, DIO_WAIT
, &indbh
);
1315 if (gfs2_metatype_check(sdp
, indbh
, GFS2_METATYPE_IN
)) {
1320 eablk
= (__be64
*)(indbh
->b_data
+ sizeof(struct gfs2_meta_header
));
1321 end
= eablk
+ sdp
->sd_inptrs
;
1323 for (; eablk
< end
; eablk
++) {
1328 bn
= be64_to_cpu(*eablk
);
1330 if (bstart
+ blen
== bn
)
1334 gfs2_rlist_add(sdp
, &rlist
, bstart
);
1341 gfs2_rlist_add(sdp
, &rlist
, bstart
);
1345 gfs2_rlist_alloc(&rlist
, LM_ST_EXCLUSIVE
);
1347 for (x
= 0; x
< rlist
.rl_rgrps
; x
++) {
1348 struct gfs2_rgrpd
*rgd
;
1349 rgd
= rlist
.rl_ghs
[x
].gh_gl
->gl_object
;
1350 rg_blocks
+= rgd
->rd_length
;
1353 error
= gfs2_glock_nq_m(rlist
.rl_rgrps
, rlist
.rl_ghs
);
1355 goto out_rlist_free
;
1357 error
= gfs2_trans_begin(sdp
, rg_blocks
+ RES_DINODE
+ RES_INDIRECT
+
1358 RES_STATFS
+ RES_QUOTA
, blks
);
1362 gfs2_trans_add_bh(ip
->i_gl
, indbh
, 1);
1364 eablk
= (__be64
*)(indbh
->b_data
+ sizeof(struct gfs2_meta_header
));
1368 for (; eablk
< end
; eablk
++) {
1373 bn
= be64_to_cpu(*eablk
);
1375 if (bstart
+ blen
== bn
)
1379 gfs2_free_meta(ip
, bstart
, blen
);
1385 gfs2_add_inode_blocks(&ip
->i_inode
, -1);
1388 gfs2_free_meta(ip
, bstart
, blen
);
1390 ip
->i_diskflags
&= ~GFS2_DIF_EA_INDIRECT
;
1392 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
1394 gfs2_trans_add_bh(ip
->i_gl
, dibh
, 1);
1395 gfs2_dinode_out(ip
, dibh
->b_data
);
1399 gfs2_trans_end(sdp
);
1402 gfs2_glock_dq_m(rlist
.rl_rgrps
, rlist
.rl_ghs
);
1404 gfs2_rlist_free(&rlist
);
1410 static int ea_dealloc_block(struct gfs2_inode
*ip
)
1412 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1413 struct gfs2_alloc
*al
= ip
->i_alloc
;
1414 struct gfs2_rgrpd
*rgd
;
1415 struct buffer_head
*dibh
;
1418 rgd
= gfs2_blk2rgrpd(sdp
, ip
->i_eattr
);
1420 gfs2_consist_inode(ip
);
1424 error
= gfs2_glock_nq_init(rgd
->rd_gl
, LM_ST_EXCLUSIVE
, 0,
1429 error
= gfs2_trans_begin(sdp
, RES_RG_BIT
+ RES_DINODE
+ RES_STATFS
+
1434 gfs2_free_meta(ip
, ip
->i_eattr
, 1);
1437 gfs2_add_inode_blocks(&ip
->i_inode
, -1);
1439 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
1441 gfs2_trans_add_bh(ip
->i_gl
, dibh
, 1);
1442 gfs2_dinode_out(ip
, dibh
->b_data
);
1446 gfs2_trans_end(sdp
);
1449 gfs2_glock_dq_uninit(&al
->al_rgd_gh
);
1454 * gfs2_ea_dealloc - deallocate the extended attribute fork
1460 int gfs2_ea_dealloc(struct gfs2_inode
*ip
)
1462 struct gfs2_alloc
*al
;
1465 al
= gfs2_alloc_get(ip
);
1469 error
= gfs2_quota_hold(ip
, NO_QUOTA_CHANGE
, NO_QUOTA_CHANGE
);
1473 error
= gfs2_rindex_hold(GFS2_SB(&ip
->i_inode
), &al
->al_ri_gh
);
1477 error
= ea_foreach(ip
, ea_dealloc_unstuffed
, NULL
);
1481 if (ip
->i_diskflags
& GFS2_DIF_EA_INDIRECT
) {
1482 error
= ea_dealloc_indirect(ip
);
1487 error
= ea_dealloc_block(ip
);
1490 gfs2_glock_dq_uninit(&al
->al_ri_gh
);
1492 gfs2_quota_unhold(ip
);