Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / fs / gfs2 / eattr.c
blobbee99704ea10b55f49f8bdcdfb17cb69aac63649
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/xattr.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/lm_interface.h>
17 #include <asm/uaccess.h>
19 #include "gfs2.h"
20 #include "incore.h"
21 #include "acl.h"
22 #include "eaops.h"
23 #include "eattr.h"
24 #include "glock.h"
25 #include "inode.h"
26 #include "meta_io.h"
27 #include "quota.h"
28 #include "rgrp.h"
29 #include "trans.h"
30 #include "util.h"
32 /**
33 * ea_calc_size - returns the acutal number of bytes the request will take up
34 * (not counting any unstuffed data blocks)
35 * @sdp:
36 * @er:
37 * @size:
39 * Returns: 1 if the EA should be stuffed
42 static int ea_calc_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er,
43 unsigned int *size)
45 *size = GFS2_EAREQ_SIZE_STUFFED(er);
46 if (*size <= sdp->sd_jbsize)
47 return 1;
49 *size = GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er);
51 return 0;
54 static int ea_check_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er)
56 unsigned int size;
58 if (er->er_data_len > GFS2_EA_MAX_DATA_LEN)
59 return -ERANGE;
61 ea_calc_size(sdp, er, &size);
63 /* This can only happen with 512 byte blocks */
64 if (size > sdp->sd_jbsize)
65 return -ERANGE;
67 return 0;
70 typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
71 struct gfs2_ea_header *ea,
72 struct gfs2_ea_header *prev, void *private);
74 static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
75 ea_call_t ea_call, void *data)
77 struct gfs2_ea_header *ea, *prev = NULL;
78 int error = 0;
80 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA))
81 return -EIO;
83 for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
84 if (!GFS2_EA_REC_LEN(ea))
85 goto fail;
86 if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
87 bh->b_data + bh->b_size))
88 goto fail;
89 if (!GFS2_EATYPE_VALID(ea->ea_type))
90 goto fail;
92 error = ea_call(ip, bh, ea, prev, data);
93 if (error)
94 return error;
96 if (GFS2_EA_IS_LAST(ea)) {
97 if ((char *)GFS2_EA2NEXT(ea) !=
98 bh->b_data + bh->b_size)
99 goto fail;
100 break;
104 return error;
106 fail:
107 gfs2_consist_inode(ip);
108 return -EIO;
111 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
113 struct buffer_head *bh, *eabh;
114 __be64 *eablk, *end;
115 int error;
117 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &bh);
118 if (error)
119 return error;
121 if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT)) {
122 error = ea_foreach_i(ip, bh, ea_call, data);
123 goto out;
126 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) {
127 error = -EIO;
128 goto out;
131 eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
132 end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
134 for (; eablk < end; eablk++) {
135 u64 bn;
137 if (!*eablk)
138 break;
139 bn = be64_to_cpu(*eablk);
141 error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, &eabh);
142 if (error)
143 break;
144 error = ea_foreach_i(ip, eabh, ea_call, data);
145 brelse(eabh);
146 if (error)
147 break;
149 out:
150 brelse(bh);
151 return error;
154 struct ea_find {
155 struct gfs2_ea_request *ef_er;
156 struct gfs2_ea_location *ef_el;
159 static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
160 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
161 void *private)
163 struct ea_find *ef = private;
164 struct gfs2_ea_request *er = ef->ef_er;
166 if (ea->ea_type == GFS2_EATYPE_UNUSED)
167 return 0;
169 if (ea->ea_type == er->er_type) {
170 if (ea->ea_name_len == er->er_name_len &&
171 !memcmp(GFS2_EA2NAME(ea), er->er_name, ea->ea_name_len)) {
172 struct gfs2_ea_location *el = ef->ef_el;
173 get_bh(bh);
174 el->el_bh = bh;
175 el->el_ea = ea;
176 el->el_prev = prev;
177 return 1;
181 return 0;
184 int gfs2_ea_find(struct gfs2_inode *ip, struct gfs2_ea_request *er,
185 struct gfs2_ea_location *el)
187 struct ea_find ef;
188 int error;
190 ef.ef_er = er;
191 ef.ef_el = el;
193 memset(el, 0, sizeof(struct gfs2_ea_location));
195 error = ea_foreach(ip, ea_find_i, &ef);
196 if (error > 0)
197 return 0;
199 return error;
203 * ea_dealloc_unstuffed -
204 * @ip:
205 * @bh:
206 * @ea:
207 * @prev:
208 * @private:
210 * Take advantage of the fact that all unstuffed blocks are
211 * allocated from the same RG. But watch, this may not always
212 * be true.
214 * Returns: errno
217 static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
218 struct gfs2_ea_header *ea,
219 struct gfs2_ea_header *prev, void *private)
221 int *leave = private;
222 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
223 struct gfs2_rgrpd *rgd;
224 struct gfs2_holder rg_gh;
225 struct buffer_head *dibh;
226 __be64 *dataptrs;
227 u64 bn = 0;
228 u64 bstart = 0;
229 unsigned int blen = 0;
230 unsigned int blks = 0;
231 unsigned int x;
232 int error;
234 if (GFS2_EA_IS_STUFFED(ea))
235 return 0;
237 dataptrs = GFS2_EA2DATAPTRS(ea);
238 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
239 if (*dataptrs) {
240 blks++;
241 bn = be64_to_cpu(*dataptrs);
244 if (!blks)
245 return 0;
247 rgd = gfs2_blk2rgrpd(sdp, bn);
248 if (!rgd) {
249 gfs2_consist_inode(ip);
250 return -EIO;
253 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
254 if (error)
255 return error;
257 error = gfs2_trans_begin(sdp, rgd->rd_length + RES_DINODE +
258 RES_EATTR + RES_STATFS + RES_QUOTA, blks);
259 if (error)
260 goto out_gunlock;
262 gfs2_trans_add_bh(ip->i_gl, bh, 1);
264 dataptrs = GFS2_EA2DATAPTRS(ea);
265 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
266 if (!*dataptrs)
267 break;
268 bn = be64_to_cpu(*dataptrs);
270 if (bstart + blen == bn)
271 blen++;
272 else {
273 if (bstart)
274 gfs2_free_meta(ip, bstart, blen);
275 bstart = bn;
276 blen = 1;
279 *dataptrs = 0;
280 if (!ip->i_di.di_blocks)
281 gfs2_consist_inode(ip);
282 ip->i_di.di_blocks--;
283 gfs2_set_inode_blocks(&ip->i_inode);
285 if (bstart)
286 gfs2_free_meta(ip, bstart, blen);
288 if (prev && !leave) {
289 u32 len;
291 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
292 prev->ea_rec_len = cpu_to_be32(len);
294 if (GFS2_EA_IS_LAST(ea))
295 prev->ea_flags |= GFS2_EAFLAG_LAST;
296 } else {
297 ea->ea_type = GFS2_EATYPE_UNUSED;
298 ea->ea_num_ptrs = 0;
301 error = gfs2_meta_inode_buffer(ip, &dibh);
302 if (!error) {
303 ip->i_inode.i_ctime = CURRENT_TIME;
304 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
305 gfs2_dinode_out(ip, dibh->b_data);
306 brelse(dibh);
309 gfs2_trans_end(sdp);
311 out_gunlock:
312 gfs2_glock_dq_uninit(&rg_gh);
313 return error;
316 static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
317 struct gfs2_ea_header *ea,
318 struct gfs2_ea_header *prev, int leave)
320 struct gfs2_alloc *al;
321 int error;
323 al = gfs2_alloc_get(ip);
325 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
326 if (error)
327 goto out_alloc;
329 error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
330 if (error)
331 goto out_quota;
333 error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL);
335 gfs2_glock_dq_uninit(&al->al_ri_gh);
337 out_quota:
338 gfs2_quota_unhold(ip);
339 out_alloc:
340 gfs2_alloc_put(ip);
341 return error;
344 struct ea_list {
345 struct gfs2_ea_request *ei_er;
346 unsigned int ei_size;
349 static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
350 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
351 void *private)
353 struct ea_list *ei = private;
354 struct gfs2_ea_request *er = ei->ei_er;
355 unsigned int ea_size = gfs2_ea_strlen(ea);
357 if (ea->ea_type == GFS2_EATYPE_UNUSED)
358 return 0;
360 if (er->er_data_len) {
361 char *prefix = NULL;
362 unsigned int l = 0;
363 char c = 0;
365 if (ei->ei_size + ea_size > er->er_data_len)
366 return -ERANGE;
368 switch (ea->ea_type) {
369 case GFS2_EATYPE_USR:
370 prefix = "user.";
371 l = 5;
372 break;
373 case GFS2_EATYPE_SYS:
374 prefix = "system.";
375 l = 7;
376 break;
377 case GFS2_EATYPE_SECURITY:
378 prefix = "security.";
379 l = 9;
380 break;
383 BUG_ON(l == 0);
385 memcpy(er->er_data + ei->ei_size, prefix, l);
386 memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
387 ea->ea_name_len);
388 memcpy(er->er_data + ei->ei_size + ea_size - 1, &c, 1);
391 ei->ei_size += ea_size;
393 return 0;
397 * gfs2_ea_list -
398 * @ip:
399 * @er:
401 * Returns: actual size of data on success, -errno on error
404 int gfs2_ea_list(struct gfs2_inode *ip, struct gfs2_ea_request *er)
406 struct gfs2_holder i_gh;
407 int error;
409 if (!er->er_data || !er->er_data_len) {
410 er->er_data = NULL;
411 er->er_data_len = 0;
414 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
415 if (error)
416 return error;
418 if (ip->i_di.di_eattr) {
419 struct ea_list ei = { .ei_er = er, .ei_size = 0 };
421 error = ea_foreach(ip, ea_list_i, &ei);
422 if (!error)
423 error = ei.ei_size;
426 gfs2_glock_dq_uninit(&i_gh);
428 return error;
432 * ea_get_unstuffed - actually copies the unstuffed data into the
433 * request buffer
434 * @ip: The GFS2 inode
435 * @ea: The extended attribute header structure
436 * @data: The data to be copied
438 * Returns: errno
441 static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
442 char *data)
444 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
445 struct buffer_head **bh;
446 unsigned int amount = GFS2_EA_DATA_LEN(ea);
447 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
448 __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
449 unsigned int x;
450 int error = 0;
452 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
453 if (!bh)
454 return -ENOMEM;
456 for (x = 0; x < nptrs; x++) {
457 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
458 bh + x);
459 if (error) {
460 while (x--)
461 brelse(bh[x]);
462 goto out;
464 dataptrs++;
467 for (x = 0; x < nptrs; x++) {
468 error = gfs2_meta_wait(sdp, bh[x]);
469 if (error) {
470 for (; x < nptrs; x++)
471 brelse(bh[x]);
472 goto out;
474 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
475 for (; x < nptrs; x++)
476 brelse(bh[x]);
477 error = -EIO;
478 goto out;
481 memcpy(data, bh[x]->b_data + sizeof(struct gfs2_meta_header),
482 (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
484 amount -= sdp->sd_jbsize;
485 data += sdp->sd_jbsize;
487 brelse(bh[x]);
490 out:
491 kfree(bh);
492 return error;
495 int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
496 char *data)
498 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
499 memcpy(data, GFS2_EA2DATA(el->el_ea), GFS2_EA_DATA_LEN(el->el_ea));
500 return 0;
501 } else
502 return ea_get_unstuffed(ip, el->el_ea, data);
506 * gfs2_ea_get_i -
507 * @ip: The GFS2 inode
508 * @er: The request structure
510 * Returns: actual size of data on success, -errno on error
513 int gfs2_ea_get_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
515 struct gfs2_ea_location el;
516 int error;
518 if (!ip->i_di.di_eattr)
519 return -ENODATA;
521 error = gfs2_ea_find(ip, er, &el);
522 if (error)
523 return error;
524 if (!el.el_ea)
525 return -ENODATA;
527 if (er->er_data_len) {
528 if (GFS2_EA_DATA_LEN(el.el_ea) > er->er_data_len)
529 error = -ERANGE;
530 else
531 error = gfs2_ea_get_copy(ip, &el, er->er_data);
533 if (!error)
534 error = GFS2_EA_DATA_LEN(el.el_ea);
536 brelse(el.el_bh);
538 return error;
542 * gfs2_ea_get -
543 * @ip: The GFS2 inode
544 * @er: The request structure
546 * Returns: actual size of data on success, -errno on error
549 int gfs2_ea_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
551 struct gfs2_holder i_gh;
552 int error;
554 if (!er->er_name_len ||
555 er->er_name_len > GFS2_EA_MAX_NAME_LEN)
556 return -EINVAL;
557 if (!er->er_data || !er->er_data_len) {
558 er->er_data = NULL;
559 er->er_data_len = 0;
562 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
563 if (error)
564 return error;
566 error = gfs2_ea_ops[er->er_type]->eo_get(ip, er);
568 gfs2_glock_dq_uninit(&i_gh);
570 return error;
574 * ea_alloc_blk - allocates a new block for extended attributes.
575 * @ip: A pointer to the inode that's getting extended attributes
576 * @bhp: Pointer to pointer to a struct buffer_head
578 * Returns: errno
581 static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
583 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
584 struct gfs2_ea_header *ea;
585 u64 block;
587 block = gfs2_alloc_meta(ip);
589 *bhp = gfs2_meta_new(ip->i_gl, block);
590 gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
591 gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
592 gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
594 ea = GFS2_EA_BH2FIRST(*bhp);
595 ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
596 ea->ea_type = GFS2_EATYPE_UNUSED;
597 ea->ea_flags = GFS2_EAFLAG_LAST;
598 ea->ea_num_ptrs = 0;
600 ip->i_di.di_blocks++;
601 gfs2_set_inode_blocks(&ip->i_inode);
603 return 0;
607 * ea_write - writes the request info to an ea, creating new blocks if
608 * necessary
609 * @ip: inode that is being modified
610 * @ea: the location of the new ea in a block
611 * @er: the write request
613 * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
615 * returns : errno
618 static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
619 struct gfs2_ea_request *er)
621 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
623 ea->ea_data_len = cpu_to_be32(er->er_data_len);
624 ea->ea_name_len = er->er_name_len;
625 ea->ea_type = er->er_type;
626 ea->__pad = 0;
628 memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
630 if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
631 ea->ea_num_ptrs = 0;
632 memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
633 } else {
634 __be64 *dataptr = GFS2_EA2DATAPTRS(ea);
635 const char *data = er->er_data;
636 unsigned int data_len = er->er_data_len;
637 unsigned int copy;
638 unsigned int x;
640 ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
641 for (x = 0; x < ea->ea_num_ptrs; x++) {
642 struct buffer_head *bh;
643 u64 block;
644 int mh_size = sizeof(struct gfs2_meta_header);
646 block = gfs2_alloc_meta(ip);
648 bh = gfs2_meta_new(ip->i_gl, block);
649 gfs2_trans_add_bh(ip->i_gl, bh, 1);
650 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
652 ip->i_di.di_blocks++;
653 gfs2_set_inode_blocks(&ip->i_inode);
655 copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
656 data_len;
657 memcpy(bh->b_data + mh_size, data, copy);
658 if (copy < sdp->sd_jbsize)
659 memset(bh->b_data + mh_size + copy, 0,
660 sdp->sd_jbsize - copy);
662 *dataptr++ = cpu_to_be64(bh->b_blocknr);
663 data += copy;
664 data_len -= copy;
666 brelse(bh);
669 gfs2_assert_withdraw(sdp, !data_len);
672 return 0;
675 typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
676 struct gfs2_ea_request *er, void *private);
678 static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
679 unsigned int blks,
680 ea_skeleton_call_t skeleton_call, void *private)
682 struct gfs2_alloc *al;
683 struct buffer_head *dibh;
684 int error;
686 al = gfs2_alloc_get(ip);
688 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
689 if (error)
690 goto out;
692 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
693 if (error)
694 goto out_gunlock_q;
696 al->al_requested = blks;
698 error = gfs2_inplace_reserve(ip);
699 if (error)
700 goto out_gunlock_q;
702 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
703 blks + al->al_rgd->rd_length +
704 RES_DINODE + RES_STATFS + RES_QUOTA, 0);
705 if (error)
706 goto out_ipres;
708 error = skeleton_call(ip, er, private);
709 if (error)
710 goto out_end_trans;
712 error = gfs2_meta_inode_buffer(ip, &dibh);
713 if (!error) {
714 if (er->er_flags & GFS2_ERF_MODE) {
715 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
716 (ip->i_inode.i_mode & S_IFMT) ==
717 (er->er_mode & S_IFMT));
718 ip->i_inode.i_mode = er->er_mode;
720 ip->i_inode.i_ctime = CURRENT_TIME;
721 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
722 gfs2_dinode_out(ip, dibh->b_data);
723 brelse(dibh);
726 out_end_trans:
727 gfs2_trans_end(GFS2_SB(&ip->i_inode));
728 out_ipres:
729 gfs2_inplace_release(ip);
730 out_gunlock_q:
731 gfs2_quota_unlock(ip);
732 out:
733 gfs2_alloc_put(ip);
734 return error;
737 static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
738 void *private)
740 struct buffer_head *bh;
741 int error;
743 error = ea_alloc_blk(ip, &bh);
744 if (error)
745 return error;
747 ip->i_di.di_eattr = bh->b_blocknr;
748 error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
750 brelse(bh);
752 return error;
756 * ea_init - initializes a new eattr block
757 * @ip:
758 * @er:
760 * Returns: errno
763 static int ea_init(struct gfs2_inode *ip, struct gfs2_ea_request *er)
765 unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize;
766 unsigned int blks = 1;
768 if (GFS2_EAREQ_SIZE_STUFFED(er) > jbsize)
769 blks += DIV_ROUND_UP(er->er_data_len, jbsize);
771 return ea_alloc_skeleton(ip, er, blks, ea_init_i, NULL);
774 static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
776 u32 ea_size = GFS2_EA_SIZE(ea);
777 struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
778 ea_size);
779 u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size;
780 int last = ea->ea_flags & GFS2_EAFLAG_LAST;
782 ea->ea_rec_len = cpu_to_be32(ea_size);
783 ea->ea_flags ^= last;
785 new->ea_rec_len = cpu_to_be32(new_size);
786 new->ea_flags = last;
788 return new;
791 static void ea_set_remove_stuffed(struct gfs2_inode *ip,
792 struct gfs2_ea_location *el)
794 struct gfs2_ea_header *ea = el->el_ea;
795 struct gfs2_ea_header *prev = el->el_prev;
796 u32 len;
798 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
800 if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
801 ea->ea_type = GFS2_EATYPE_UNUSED;
802 return;
803 } else if (GFS2_EA2NEXT(prev) != ea) {
804 prev = GFS2_EA2NEXT(prev);
805 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea);
808 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
809 prev->ea_rec_len = cpu_to_be32(len);
811 if (GFS2_EA_IS_LAST(ea))
812 prev->ea_flags |= GFS2_EAFLAG_LAST;
815 struct ea_set {
816 int ea_split;
818 struct gfs2_ea_request *es_er;
819 struct gfs2_ea_location *es_el;
821 struct buffer_head *es_bh;
822 struct gfs2_ea_header *es_ea;
825 static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
826 struct gfs2_ea_header *ea, struct ea_set *es)
828 struct gfs2_ea_request *er = es->es_er;
829 struct buffer_head *dibh;
830 int error;
832 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
833 if (error)
834 return error;
836 gfs2_trans_add_bh(ip->i_gl, bh, 1);
838 if (es->ea_split)
839 ea = ea_split_ea(ea);
841 ea_write(ip, ea, er);
843 if (es->es_el)
844 ea_set_remove_stuffed(ip, es->es_el);
846 error = gfs2_meta_inode_buffer(ip, &dibh);
847 if (error)
848 goto out;
850 if (er->er_flags & GFS2_ERF_MODE) {
851 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
852 (ip->i_inode.i_mode & S_IFMT) == (er->er_mode & S_IFMT));
853 ip->i_inode.i_mode = er->er_mode;
855 ip->i_inode.i_ctime = CURRENT_TIME;
856 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
857 gfs2_dinode_out(ip, dibh->b_data);
858 brelse(dibh);
859 out:
860 gfs2_trans_end(GFS2_SB(&ip->i_inode));
861 return error;
864 static int ea_set_simple_alloc(struct gfs2_inode *ip,
865 struct gfs2_ea_request *er, void *private)
867 struct ea_set *es = private;
868 struct gfs2_ea_header *ea = es->es_ea;
869 int error;
871 gfs2_trans_add_bh(ip->i_gl, es->es_bh, 1);
873 if (es->ea_split)
874 ea = ea_split_ea(ea);
876 error = ea_write(ip, ea, er);
877 if (error)
878 return error;
880 if (es->es_el)
881 ea_set_remove_stuffed(ip, es->es_el);
883 return 0;
886 static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
887 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
888 void *private)
890 struct ea_set *es = private;
891 unsigned int size;
892 int stuffed;
893 int error;
895 stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er, &size);
897 if (ea->ea_type == GFS2_EATYPE_UNUSED) {
898 if (GFS2_EA_REC_LEN(ea) < size)
899 return 0;
900 if (!GFS2_EA_IS_STUFFED(ea)) {
901 error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
902 if (error)
903 return error;
905 es->ea_split = 0;
906 } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
907 es->ea_split = 1;
908 else
909 return 0;
911 if (stuffed) {
912 error = ea_set_simple_noalloc(ip, bh, ea, es);
913 if (error)
914 return error;
915 } else {
916 unsigned int blks;
918 es->es_bh = bh;
919 es->es_ea = ea;
920 blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
921 GFS2_SB(&ip->i_inode)->sd_jbsize);
923 error = ea_alloc_skeleton(ip, es->es_er, blks,
924 ea_set_simple_alloc, es);
925 if (error)
926 return error;
929 return 1;
932 static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
933 void *private)
935 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
936 struct buffer_head *indbh, *newbh;
937 __be64 *eablk;
938 int error;
939 int mh_size = sizeof(struct gfs2_meta_header);
941 if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
942 __be64 *end;
944 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT,
945 &indbh);
946 if (error)
947 return error;
949 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
950 error = -EIO;
951 goto out;
954 eablk = (__be64 *)(indbh->b_data + mh_size);
955 end = eablk + sdp->sd_inptrs;
957 for (; eablk < end; eablk++)
958 if (!*eablk)
959 break;
961 if (eablk == end) {
962 error = -ENOSPC;
963 goto out;
966 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
967 } else {
968 u64 blk;
970 blk = gfs2_alloc_meta(ip);
972 indbh = gfs2_meta_new(ip->i_gl, blk);
973 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
974 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
975 gfs2_buffer_clear_tail(indbh, mh_size);
977 eablk = (__be64 *)(indbh->b_data + mh_size);
978 *eablk = cpu_to_be64(ip->i_di.di_eattr);
979 ip->i_di.di_eattr = blk;
980 ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
981 ip->i_di.di_blocks++;
982 gfs2_set_inode_blocks(&ip->i_inode);
984 eablk++;
987 error = ea_alloc_blk(ip, &newbh);
988 if (error)
989 goto out;
991 *eablk = cpu_to_be64((u64)newbh->b_blocknr);
992 error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
993 brelse(newbh);
994 if (error)
995 goto out;
997 if (private)
998 ea_set_remove_stuffed(ip, private);
1000 out:
1001 brelse(indbh);
1002 return error;
1005 static int ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
1006 struct gfs2_ea_location *el)
1008 struct ea_set es;
1009 unsigned int blks = 2;
1010 int error;
1012 memset(&es, 0, sizeof(struct ea_set));
1013 es.es_er = er;
1014 es.es_el = el;
1016 error = ea_foreach(ip, ea_set_simple, &es);
1017 if (error > 0)
1018 return 0;
1019 if (error)
1020 return error;
1022 if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT))
1023 blks++;
1024 if (GFS2_EAREQ_SIZE_STUFFED(er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
1025 blks += DIV_ROUND_UP(er->er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
1027 return ea_alloc_skeleton(ip, er, blks, ea_set_block, el);
1030 static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
1031 struct gfs2_ea_location *el)
1033 if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
1034 el->el_prev = GFS2_EA2NEXT(el->el_prev);
1035 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
1036 GFS2_EA2NEXT(el->el_prev) == el->el_ea);
1039 return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev,0);
1042 int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1044 struct gfs2_ea_location el;
1045 int error;
1047 if (!ip->i_di.di_eattr) {
1048 if (er->er_flags & XATTR_REPLACE)
1049 return -ENODATA;
1050 return ea_init(ip, er);
1053 error = gfs2_ea_find(ip, er, &el);
1054 if (error)
1055 return error;
1057 if (el.el_ea) {
1058 if (ip->i_di.di_flags & GFS2_DIF_APPENDONLY) {
1059 brelse(el.el_bh);
1060 return -EPERM;
1063 error = -EEXIST;
1064 if (!(er->er_flags & XATTR_CREATE)) {
1065 int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
1066 error = ea_set_i(ip, er, &el);
1067 if (!error && unstuffed)
1068 ea_set_remove_unstuffed(ip, &el);
1071 brelse(el.el_bh);
1072 } else {
1073 error = -ENODATA;
1074 if (!(er->er_flags & XATTR_REPLACE))
1075 error = ea_set_i(ip, er, NULL);
1078 return error;
1081 int gfs2_ea_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1083 struct gfs2_holder i_gh;
1084 int error;
1086 if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1087 return -EINVAL;
1088 if (!er->er_data || !er->er_data_len) {
1089 er->er_data = NULL;
1090 er->er_data_len = 0;
1092 error = ea_check_size(GFS2_SB(&ip->i_inode), er);
1093 if (error)
1094 return error;
1096 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1097 if (error)
1098 return error;
1100 if (IS_IMMUTABLE(&ip->i_inode))
1101 error = -EPERM;
1102 else
1103 error = gfs2_ea_ops[er->er_type]->eo_set(ip, er);
1105 gfs2_glock_dq_uninit(&i_gh);
1107 return error;
1110 static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1112 struct gfs2_ea_header *ea = el->el_ea;
1113 struct gfs2_ea_header *prev = el->el_prev;
1114 struct buffer_head *dibh;
1115 int error;
1117 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1118 if (error)
1119 return error;
1121 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1123 if (prev) {
1124 u32 len;
1126 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
1127 prev->ea_rec_len = cpu_to_be32(len);
1129 if (GFS2_EA_IS_LAST(ea))
1130 prev->ea_flags |= GFS2_EAFLAG_LAST;
1131 } else
1132 ea->ea_type = GFS2_EATYPE_UNUSED;
1134 error = gfs2_meta_inode_buffer(ip, &dibh);
1135 if (!error) {
1136 ip->i_inode.i_ctime = CURRENT_TIME;
1137 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1138 gfs2_dinode_out(ip, dibh->b_data);
1139 brelse(dibh);
1142 gfs2_trans_end(GFS2_SB(&ip->i_inode));
1144 return error;
1147 int gfs2_ea_remove_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1149 struct gfs2_ea_location el;
1150 int error;
1152 if (!ip->i_di.di_eattr)
1153 return -ENODATA;
1155 error = gfs2_ea_find(ip, er, &el);
1156 if (error)
1157 return error;
1158 if (!el.el_ea)
1159 return -ENODATA;
1161 if (GFS2_EA_IS_STUFFED(el.el_ea))
1162 error = ea_remove_stuffed(ip, &el);
1163 else
1164 error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev,
1167 brelse(el.el_bh);
1169 return error;
1173 * gfs2_ea_remove - sets (or creates or replaces) an extended attribute
1174 * @ip: pointer to the inode of the target file
1175 * @er: request information
1177 * Returns: errno
1180 int gfs2_ea_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1182 struct gfs2_holder i_gh;
1183 int error;
1185 if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1186 return -EINVAL;
1188 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1189 if (error)
1190 return error;
1192 if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
1193 error = -EPERM;
1194 else
1195 error = gfs2_ea_ops[er->er_type]->eo_remove(ip, er);
1197 gfs2_glock_dq_uninit(&i_gh);
1199 return error;
1202 static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
1203 struct gfs2_ea_header *ea, char *data)
1205 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1206 struct buffer_head **bh;
1207 unsigned int amount = GFS2_EA_DATA_LEN(ea);
1208 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
1209 __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
1210 unsigned int x;
1211 int error;
1213 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
1214 if (!bh)
1215 return -ENOMEM;
1217 error = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
1218 if (error)
1219 goto out;
1221 for (x = 0; x < nptrs; x++) {
1222 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
1223 bh + x);
1224 if (error) {
1225 while (x--)
1226 brelse(bh[x]);
1227 goto fail;
1229 dataptrs++;
1232 for (x = 0; x < nptrs; x++) {
1233 error = gfs2_meta_wait(sdp, bh[x]);
1234 if (error) {
1235 for (; x < nptrs; x++)
1236 brelse(bh[x]);
1237 goto fail;
1239 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
1240 for (; x < nptrs; x++)
1241 brelse(bh[x]);
1242 error = -EIO;
1243 goto fail;
1246 gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
1248 memcpy(bh[x]->b_data + sizeof(struct gfs2_meta_header), data,
1249 (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
1251 amount -= sdp->sd_jbsize;
1252 data += sdp->sd_jbsize;
1254 brelse(bh[x]);
1257 out:
1258 kfree(bh);
1259 return error;
1261 fail:
1262 gfs2_trans_end(sdp);
1263 kfree(bh);
1264 return error;
1267 int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
1268 struct iattr *attr, char *data)
1270 struct buffer_head *dibh;
1271 int error;
1273 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
1274 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1275 if (error)
1276 return error;
1278 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1279 memcpy(GFS2_EA2DATA(el->el_ea), data,
1280 GFS2_EA_DATA_LEN(el->el_ea));
1281 } else
1282 error = ea_acl_chmod_unstuffed(ip, el->el_ea, data);
1284 if (error)
1285 return error;
1287 error = gfs2_meta_inode_buffer(ip, &dibh);
1288 if (!error) {
1289 error = inode_setattr(&ip->i_inode, attr);
1290 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1291 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1292 gfs2_dinode_out(ip, dibh->b_data);
1293 brelse(dibh);
1296 gfs2_trans_end(GFS2_SB(&ip->i_inode));
1298 return error;
1301 static int ea_dealloc_indirect(struct gfs2_inode *ip)
1303 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1304 struct gfs2_rgrp_list rlist;
1305 struct buffer_head *indbh, *dibh;
1306 __be64 *eablk, *end;
1307 unsigned int rg_blocks = 0;
1308 u64 bstart = 0;
1309 unsigned int blen = 0;
1310 unsigned int blks = 0;
1311 unsigned int x;
1312 int error;
1314 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1316 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &indbh);
1317 if (error)
1318 return error;
1320 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1321 error = -EIO;
1322 goto out;
1325 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1326 end = eablk + sdp->sd_inptrs;
1328 for (; eablk < end; eablk++) {
1329 u64 bn;
1331 if (!*eablk)
1332 break;
1333 bn = be64_to_cpu(*eablk);
1335 if (bstart + blen == bn)
1336 blen++;
1337 else {
1338 if (bstart)
1339 gfs2_rlist_add(sdp, &rlist, bstart);
1340 bstart = bn;
1341 blen = 1;
1343 blks++;
1345 if (bstart)
1346 gfs2_rlist_add(sdp, &rlist, bstart);
1347 else
1348 goto out;
1350 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0);
1352 for (x = 0; x < rlist.rl_rgrps; x++) {
1353 struct gfs2_rgrpd *rgd;
1354 rgd = rlist.rl_ghs[x].gh_gl->gl_object;
1355 rg_blocks += rgd->rd_length;
1358 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
1359 if (error)
1360 goto out_rlist_free;
1362 error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT +
1363 RES_STATFS + RES_QUOTA, blks);
1364 if (error)
1365 goto out_gunlock;
1367 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1369 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1370 bstart = 0;
1371 blen = 0;
1373 for (; eablk < end; eablk++) {
1374 u64 bn;
1376 if (!*eablk)
1377 break;
1378 bn = be64_to_cpu(*eablk);
1380 if (bstart + blen == bn)
1381 blen++;
1382 else {
1383 if (bstart)
1384 gfs2_free_meta(ip, bstart, blen);
1385 bstart = bn;
1386 blen = 1;
1389 *eablk = 0;
1390 if (!ip->i_di.di_blocks)
1391 gfs2_consist_inode(ip);
1392 ip->i_di.di_blocks--;
1393 gfs2_set_inode_blocks(&ip->i_inode);
1395 if (bstart)
1396 gfs2_free_meta(ip, bstart, blen);
1398 ip->i_di.di_flags &= ~GFS2_DIF_EA_INDIRECT;
1400 error = gfs2_meta_inode_buffer(ip, &dibh);
1401 if (!error) {
1402 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1403 gfs2_dinode_out(ip, dibh->b_data);
1404 brelse(dibh);
1407 gfs2_trans_end(sdp);
1409 out_gunlock:
1410 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
1411 out_rlist_free:
1412 gfs2_rlist_free(&rlist);
1413 out:
1414 brelse(indbh);
1415 return error;
1418 static int ea_dealloc_block(struct gfs2_inode *ip)
1420 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1421 struct gfs2_alloc *al = ip->i_alloc;
1422 struct gfs2_rgrpd *rgd;
1423 struct buffer_head *dibh;
1424 int error;
1426 rgd = gfs2_blk2rgrpd(sdp, ip->i_di.di_eattr);
1427 if (!rgd) {
1428 gfs2_consist_inode(ip);
1429 return -EIO;
1432 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
1433 &al->al_rgd_gh);
1434 if (error)
1435 return error;
1437 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE + RES_STATFS +
1438 RES_QUOTA, 1);
1439 if (error)
1440 goto out_gunlock;
1442 gfs2_free_meta(ip, ip->i_di.di_eattr, 1);
1444 ip->i_di.di_eattr = 0;
1445 if (!ip->i_di.di_blocks)
1446 gfs2_consist_inode(ip);
1447 ip->i_di.di_blocks--;
1448 gfs2_set_inode_blocks(&ip->i_inode);
1450 error = gfs2_meta_inode_buffer(ip, &dibh);
1451 if (!error) {
1452 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1453 gfs2_dinode_out(ip, dibh->b_data);
1454 brelse(dibh);
1457 gfs2_trans_end(sdp);
1459 out_gunlock:
1460 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1461 return error;
1465 * gfs2_ea_dealloc - deallocate the extended attribute fork
1466 * @ip: the inode
1468 * Returns: errno
1471 int gfs2_ea_dealloc(struct gfs2_inode *ip)
1473 struct gfs2_alloc *al;
1474 int error;
1476 al = gfs2_alloc_get(ip);
1478 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1479 if (error)
1480 goto out_alloc;
1482 error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
1483 if (error)
1484 goto out_quota;
1486 error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
1487 if (error)
1488 goto out_rindex;
1490 if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
1491 error = ea_dealloc_indirect(ip);
1492 if (error)
1493 goto out_rindex;
1496 error = ea_dealloc_block(ip);
1498 out_rindex:
1499 gfs2_glock_dq_uninit(&al->al_ri_gh);
1500 out_quota:
1501 gfs2_quota_unhold(ip);
1502 out_alloc:
1503 gfs2_alloc_put(ip);
1504 return error;