[ARM] 5087/1: Get the PWM layer to handle clock enable/disable properly.
[linux-2.6/mini2440.git] / fs / gfs2 / eattr.c
blobe3f76f451b0a7a6c504b6c3d9cb3c096c77e96c0
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/xattr.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/lm_interface.h>
17 #include <asm/uaccess.h>
19 #include "gfs2.h"
20 #include "incore.h"
21 #include "acl.h"
22 #include "eaops.h"
23 #include "eattr.h"
24 #include "glock.h"
25 #include "inode.h"
26 #include "meta_io.h"
27 #include "quota.h"
28 #include "rgrp.h"
29 #include "trans.h"
30 #include "util.h"
32 /**
33 * ea_calc_size - returns the acutal number of bytes the request will take up
34 * (not counting any unstuffed data blocks)
35 * @sdp:
36 * @er:
37 * @size:
39 * Returns: 1 if the EA should be stuffed
42 static int ea_calc_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er,
43 unsigned int *size)
45 *size = GFS2_EAREQ_SIZE_STUFFED(er);
46 if (*size <= sdp->sd_jbsize)
47 return 1;
49 *size = GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er);
51 return 0;
54 static int ea_check_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er)
56 unsigned int size;
58 if (er->er_data_len > GFS2_EA_MAX_DATA_LEN)
59 return -ERANGE;
61 ea_calc_size(sdp, er, &size);
63 /* This can only happen with 512 byte blocks */
64 if (size > sdp->sd_jbsize)
65 return -ERANGE;
67 return 0;
70 typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
71 struct gfs2_ea_header *ea,
72 struct gfs2_ea_header *prev, void *private);
74 static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
75 ea_call_t ea_call, void *data)
77 struct gfs2_ea_header *ea, *prev = NULL;
78 int error = 0;
80 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA))
81 return -EIO;
83 for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
84 if (!GFS2_EA_REC_LEN(ea))
85 goto fail;
86 if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
87 bh->b_data + bh->b_size))
88 goto fail;
89 if (!GFS2_EATYPE_VALID(ea->ea_type))
90 goto fail;
92 error = ea_call(ip, bh, ea, prev, data);
93 if (error)
94 return error;
96 if (GFS2_EA_IS_LAST(ea)) {
97 if ((char *)GFS2_EA2NEXT(ea) !=
98 bh->b_data + bh->b_size)
99 goto fail;
100 break;
104 return error;
106 fail:
107 gfs2_consist_inode(ip);
108 return -EIO;
111 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
113 struct buffer_head *bh, *eabh;
114 __be64 *eablk, *end;
115 int error;
117 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &bh);
118 if (error)
119 return error;
121 if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT)) {
122 error = ea_foreach_i(ip, bh, ea_call, data);
123 goto out;
126 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) {
127 error = -EIO;
128 goto out;
131 eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
132 end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
134 for (; eablk < end; eablk++) {
135 u64 bn;
137 if (!*eablk)
138 break;
139 bn = be64_to_cpu(*eablk);
141 error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, &eabh);
142 if (error)
143 break;
144 error = ea_foreach_i(ip, eabh, ea_call, data);
145 brelse(eabh);
146 if (error)
147 break;
149 out:
150 brelse(bh);
151 return error;
154 struct ea_find {
155 struct gfs2_ea_request *ef_er;
156 struct gfs2_ea_location *ef_el;
159 static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
160 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
161 void *private)
163 struct ea_find *ef = private;
164 struct gfs2_ea_request *er = ef->ef_er;
166 if (ea->ea_type == GFS2_EATYPE_UNUSED)
167 return 0;
169 if (ea->ea_type == er->er_type) {
170 if (ea->ea_name_len == er->er_name_len &&
171 !memcmp(GFS2_EA2NAME(ea), er->er_name, ea->ea_name_len)) {
172 struct gfs2_ea_location *el = ef->ef_el;
173 get_bh(bh);
174 el->el_bh = bh;
175 el->el_ea = ea;
176 el->el_prev = prev;
177 return 1;
181 return 0;
184 int gfs2_ea_find(struct gfs2_inode *ip, struct gfs2_ea_request *er,
185 struct gfs2_ea_location *el)
187 struct ea_find ef;
188 int error;
190 ef.ef_er = er;
191 ef.ef_el = el;
193 memset(el, 0, sizeof(struct gfs2_ea_location));
195 error = ea_foreach(ip, ea_find_i, &ef);
196 if (error > 0)
197 return 0;
199 return error;
203 * ea_dealloc_unstuffed -
204 * @ip:
205 * @bh:
206 * @ea:
207 * @prev:
208 * @private:
210 * Take advantage of the fact that all unstuffed blocks are
211 * allocated from the same RG. But watch, this may not always
212 * be true.
214 * Returns: errno
217 static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
218 struct gfs2_ea_header *ea,
219 struct gfs2_ea_header *prev, void *private)
221 int *leave = private;
222 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
223 struct gfs2_rgrpd *rgd;
224 struct gfs2_holder rg_gh;
225 struct buffer_head *dibh;
226 __be64 *dataptrs;
227 u64 bn = 0;
228 u64 bstart = 0;
229 unsigned int blen = 0;
230 unsigned int blks = 0;
231 unsigned int x;
232 int error;
234 if (GFS2_EA_IS_STUFFED(ea))
235 return 0;
237 dataptrs = GFS2_EA2DATAPTRS(ea);
238 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
239 if (*dataptrs) {
240 blks++;
241 bn = be64_to_cpu(*dataptrs);
244 if (!blks)
245 return 0;
247 rgd = gfs2_blk2rgrpd(sdp, bn);
248 if (!rgd) {
249 gfs2_consist_inode(ip);
250 return -EIO;
253 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
254 if (error)
255 return error;
257 error = gfs2_trans_begin(sdp, rgd->rd_length + RES_DINODE +
258 RES_EATTR + RES_STATFS + RES_QUOTA, blks);
259 if (error)
260 goto out_gunlock;
262 gfs2_trans_add_bh(ip->i_gl, bh, 1);
264 dataptrs = GFS2_EA2DATAPTRS(ea);
265 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
266 if (!*dataptrs)
267 break;
268 bn = be64_to_cpu(*dataptrs);
270 if (bstart + blen == bn)
271 blen++;
272 else {
273 if (bstart)
274 gfs2_free_meta(ip, bstart, blen);
275 bstart = bn;
276 blen = 1;
279 *dataptrs = 0;
280 gfs2_add_inode_blocks(&ip->i_inode, -1);
282 if (bstart)
283 gfs2_free_meta(ip, bstart, blen);
285 if (prev && !leave) {
286 u32 len;
288 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
289 prev->ea_rec_len = cpu_to_be32(len);
291 if (GFS2_EA_IS_LAST(ea))
292 prev->ea_flags |= GFS2_EAFLAG_LAST;
293 } else {
294 ea->ea_type = GFS2_EATYPE_UNUSED;
295 ea->ea_num_ptrs = 0;
298 error = gfs2_meta_inode_buffer(ip, &dibh);
299 if (!error) {
300 ip->i_inode.i_ctime = CURRENT_TIME;
301 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
302 gfs2_dinode_out(ip, dibh->b_data);
303 brelse(dibh);
306 gfs2_trans_end(sdp);
308 out_gunlock:
309 gfs2_glock_dq_uninit(&rg_gh);
310 return error;
313 static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
314 struct gfs2_ea_header *ea,
315 struct gfs2_ea_header *prev, int leave)
317 struct gfs2_alloc *al;
318 int error;
320 al = gfs2_alloc_get(ip);
321 if (!al)
322 return -ENOMEM;
324 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
325 if (error)
326 goto out_alloc;
328 error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
329 if (error)
330 goto out_quota;
332 error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL);
334 gfs2_glock_dq_uninit(&al->al_ri_gh);
336 out_quota:
337 gfs2_quota_unhold(ip);
338 out_alloc:
339 gfs2_alloc_put(ip);
340 return error;
343 struct ea_list {
344 struct gfs2_ea_request *ei_er;
345 unsigned int ei_size;
348 static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
349 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
350 void *private)
352 struct ea_list *ei = private;
353 struct gfs2_ea_request *er = ei->ei_er;
354 unsigned int ea_size = gfs2_ea_strlen(ea);
356 if (ea->ea_type == GFS2_EATYPE_UNUSED)
357 return 0;
359 if (er->er_data_len) {
360 char *prefix = NULL;
361 unsigned int l = 0;
362 char c = 0;
364 if (ei->ei_size + ea_size > er->er_data_len)
365 return -ERANGE;
367 switch (ea->ea_type) {
368 case GFS2_EATYPE_USR:
369 prefix = "user.";
370 l = 5;
371 break;
372 case GFS2_EATYPE_SYS:
373 prefix = "system.";
374 l = 7;
375 break;
376 case GFS2_EATYPE_SECURITY:
377 prefix = "security.";
378 l = 9;
379 break;
382 BUG_ON(l == 0);
384 memcpy(er->er_data + ei->ei_size, prefix, l);
385 memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
386 ea->ea_name_len);
387 memcpy(er->er_data + ei->ei_size + ea_size - 1, &c, 1);
390 ei->ei_size += ea_size;
392 return 0;
396 * gfs2_ea_list -
397 * @ip:
398 * @er:
400 * Returns: actual size of data on success, -errno on error
403 int gfs2_ea_list(struct gfs2_inode *ip, struct gfs2_ea_request *er)
405 struct gfs2_holder i_gh;
406 int error;
408 if (!er->er_data || !er->er_data_len) {
409 er->er_data = NULL;
410 er->er_data_len = 0;
413 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
414 if (error)
415 return error;
417 if (ip->i_di.di_eattr) {
418 struct ea_list ei = { .ei_er = er, .ei_size = 0 };
420 error = ea_foreach(ip, ea_list_i, &ei);
421 if (!error)
422 error = ei.ei_size;
425 gfs2_glock_dq_uninit(&i_gh);
427 return error;
431 * ea_get_unstuffed - actually copies the unstuffed data into the
432 * request buffer
433 * @ip: The GFS2 inode
434 * @ea: The extended attribute header structure
435 * @data: The data to be copied
437 * Returns: errno
440 static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
441 char *data)
443 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
444 struct buffer_head **bh;
445 unsigned int amount = GFS2_EA_DATA_LEN(ea);
446 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
447 __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
448 unsigned int x;
449 int error = 0;
451 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
452 if (!bh)
453 return -ENOMEM;
455 for (x = 0; x < nptrs; x++) {
456 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
457 bh + x);
458 if (error) {
459 while (x--)
460 brelse(bh[x]);
461 goto out;
463 dataptrs++;
466 for (x = 0; x < nptrs; x++) {
467 error = gfs2_meta_wait(sdp, bh[x]);
468 if (error) {
469 for (; x < nptrs; x++)
470 brelse(bh[x]);
471 goto out;
473 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
474 for (; x < nptrs; x++)
475 brelse(bh[x]);
476 error = -EIO;
477 goto out;
480 memcpy(data, bh[x]->b_data + sizeof(struct gfs2_meta_header),
481 (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
483 amount -= sdp->sd_jbsize;
484 data += sdp->sd_jbsize;
486 brelse(bh[x]);
489 out:
490 kfree(bh);
491 return error;
494 int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
495 char *data)
497 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
498 memcpy(data, GFS2_EA2DATA(el->el_ea), GFS2_EA_DATA_LEN(el->el_ea));
499 return 0;
500 } else
501 return ea_get_unstuffed(ip, el->el_ea, data);
505 * gfs2_ea_get_i -
506 * @ip: The GFS2 inode
507 * @er: The request structure
509 * Returns: actual size of data on success, -errno on error
512 int gfs2_ea_get_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
514 struct gfs2_ea_location el;
515 int error;
517 if (!ip->i_di.di_eattr)
518 return -ENODATA;
520 error = gfs2_ea_find(ip, er, &el);
521 if (error)
522 return error;
523 if (!el.el_ea)
524 return -ENODATA;
526 if (er->er_data_len) {
527 if (GFS2_EA_DATA_LEN(el.el_ea) > er->er_data_len)
528 error = -ERANGE;
529 else
530 error = gfs2_ea_get_copy(ip, &el, er->er_data);
532 if (!error)
533 error = GFS2_EA_DATA_LEN(el.el_ea);
535 brelse(el.el_bh);
537 return error;
541 * gfs2_ea_get -
542 * @ip: The GFS2 inode
543 * @er: The request structure
545 * Returns: actual size of data on success, -errno on error
548 int gfs2_ea_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
550 struct gfs2_holder i_gh;
551 int error;
553 if (!er->er_name_len ||
554 er->er_name_len > GFS2_EA_MAX_NAME_LEN)
555 return -EINVAL;
556 if (!er->er_data || !er->er_data_len) {
557 er->er_data = NULL;
558 er->er_data_len = 0;
561 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
562 if (error)
563 return error;
565 error = gfs2_ea_ops[er->er_type]->eo_get(ip, er);
567 gfs2_glock_dq_uninit(&i_gh);
569 return error;
573 * ea_alloc_blk - allocates a new block for extended attributes.
574 * @ip: A pointer to the inode that's getting extended attributes
575 * @bhp: Pointer to pointer to a struct buffer_head
577 * Returns: errno
580 static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
582 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
583 struct gfs2_ea_header *ea;
584 unsigned int n = 1;
585 u64 block;
587 block = gfs2_alloc_block(ip, &n);
588 gfs2_trans_add_unrevoke(sdp, block, 1);
589 *bhp = gfs2_meta_new(ip->i_gl, block);
590 gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
591 gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
592 gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
594 ea = GFS2_EA_BH2FIRST(*bhp);
595 ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
596 ea->ea_type = GFS2_EATYPE_UNUSED;
597 ea->ea_flags = GFS2_EAFLAG_LAST;
598 ea->ea_num_ptrs = 0;
600 gfs2_add_inode_blocks(&ip->i_inode, 1);
602 return 0;
606 * ea_write - writes the request info to an ea, creating new blocks if
607 * necessary
608 * @ip: inode that is being modified
609 * @ea: the location of the new ea in a block
610 * @er: the write request
612 * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
614 * returns : errno
617 static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
618 struct gfs2_ea_request *er)
620 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
622 ea->ea_data_len = cpu_to_be32(er->er_data_len);
623 ea->ea_name_len = er->er_name_len;
624 ea->ea_type = er->er_type;
625 ea->__pad = 0;
627 memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
629 if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
630 ea->ea_num_ptrs = 0;
631 memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
632 } else {
633 __be64 *dataptr = GFS2_EA2DATAPTRS(ea);
634 const char *data = er->er_data;
635 unsigned int data_len = er->er_data_len;
636 unsigned int copy;
637 unsigned int x;
639 ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
640 for (x = 0; x < ea->ea_num_ptrs; x++) {
641 struct buffer_head *bh;
642 u64 block;
643 int mh_size = sizeof(struct gfs2_meta_header);
644 unsigned int n = 1;
646 block = gfs2_alloc_block(ip, &n);
647 gfs2_trans_add_unrevoke(sdp, block, 1);
648 bh = gfs2_meta_new(ip->i_gl, block);
649 gfs2_trans_add_bh(ip->i_gl, bh, 1);
650 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
652 gfs2_add_inode_blocks(&ip->i_inode, 1);
654 copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
655 data_len;
656 memcpy(bh->b_data + mh_size, data, copy);
657 if (copy < sdp->sd_jbsize)
658 memset(bh->b_data + mh_size + copy, 0,
659 sdp->sd_jbsize - copy);
661 *dataptr++ = cpu_to_be64(bh->b_blocknr);
662 data += copy;
663 data_len -= copy;
665 brelse(bh);
668 gfs2_assert_withdraw(sdp, !data_len);
671 return 0;
674 typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
675 struct gfs2_ea_request *er, void *private);
677 static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
678 unsigned int blks,
679 ea_skeleton_call_t skeleton_call, void *private)
681 struct gfs2_alloc *al;
682 struct buffer_head *dibh;
683 int error;
685 al = gfs2_alloc_get(ip);
686 if (!al)
687 return -ENOMEM;
689 error = gfs2_quota_lock_check(ip);
690 if (error)
691 goto out;
693 al->al_requested = blks;
695 error = gfs2_inplace_reserve(ip);
696 if (error)
697 goto out_gunlock_q;
699 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
700 blks + al->al_rgd->rd_length +
701 RES_DINODE + RES_STATFS + RES_QUOTA, 0);
702 if (error)
703 goto out_ipres;
705 error = skeleton_call(ip, er, private);
706 if (error)
707 goto out_end_trans;
709 error = gfs2_meta_inode_buffer(ip, &dibh);
710 if (!error) {
711 if (er->er_flags & GFS2_ERF_MODE) {
712 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
713 (ip->i_inode.i_mode & S_IFMT) ==
714 (er->er_mode & S_IFMT));
715 ip->i_inode.i_mode = er->er_mode;
717 ip->i_inode.i_ctime = CURRENT_TIME;
718 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
719 gfs2_dinode_out(ip, dibh->b_data);
720 brelse(dibh);
723 out_end_trans:
724 gfs2_trans_end(GFS2_SB(&ip->i_inode));
725 out_ipres:
726 gfs2_inplace_release(ip);
727 out_gunlock_q:
728 gfs2_quota_unlock(ip);
729 out:
730 gfs2_alloc_put(ip);
731 return error;
734 static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
735 void *private)
737 struct buffer_head *bh;
738 int error;
740 error = ea_alloc_blk(ip, &bh);
741 if (error)
742 return error;
744 ip->i_di.di_eattr = bh->b_blocknr;
745 error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
747 brelse(bh);
749 return error;
753 * ea_init - initializes a new eattr block
754 * @ip:
755 * @er:
757 * Returns: errno
760 static int ea_init(struct gfs2_inode *ip, struct gfs2_ea_request *er)
762 unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize;
763 unsigned int blks = 1;
765 if (GFS2_EAREQ_SIZE_STUFFED(er) > jbsize)
766 blks += DIV_ROUND_UP(er->er_data_len, jbsize);
768 return ea_alloc_skeleton(ip, er, blks, ea_init_i, NULL);
771 static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
773 u32 ea_size = GFS2_EA_SIZE(ea);
774 struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
775 ea_size);
776 u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size;
777 int last = ea->ea_flags & GFS2_EAFLAG_LAST;
779 ea->ea_rec_len = cpu_to_be32(ea_size);
780 ea->ea_flags ^= last;
782 new->ea_rec_len = cpu_to_be32(new_size);
783 new->ea_flags = last;
785 return new;
788 static void ea_set_remove_stuffed(struct gfs2_inode *ip,
789 struct gfs2_ea_location *el)
791 struct gfs2_ea_header *ea = el->el_ea;
792 struct gfs2_ea_header *prev = el->el_prev;
793 u32 len;
795 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
797 if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
798 ea->ea_type = GFS2_EATYPE_UNUSED;
799 return;
800 } else if (GFS2_EA2NEXT(prev) != ea) {
801 prev = GFS2_EA2NEXT(prev);
802 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea);
805 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
806 prev->ea_rec_len = cpu_to_be32(len);
808 if (GFS2_EA_IS_LAST(ea))
809 prev->ea_flags |= GFS2_EAFLAG_LAST;
812 struct ea_set {
813 int ea_split;
815 struct gfs2_ea_request *es_er;
816 struct gfs2_ea_location *es_el;
818 struct buffer_head *es_bh;
819 struct gfs2_ea_header *es_ea;
822 static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
823 struct gfs2_ea_header *ea, struct ea_set *es)
825 struct gfs2_ea_request *er = es->es_er;
826 struct buffer_head *dibh;
827 int error;
829 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
830 if (error)
831 return error;
833 gfs2_trans_add_bh(ip->i_gl, bh, 1);
835 if (es->ea_split)
836 ea = ea_split_ea(ea);
838 ea_write(ip, ea, er);
840 if (es->es_el)
841 ea_set_remove_stuffed(ip, es->es_el);
843 error = gfs2_meta_inode_buffer(ip, &dibh);
844 if (error)
845 goto out;
847 if (er->er_flags & GFS2_ERF_MODE) {
848 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
849 (ip->i_inode.i_mode & S_IFMT) == (er->er_mode & S_IFMT));
850 ip->i_inode.i_mode = er->er_mode;
852 ip->i_inode.i_ctime = CURRENT_TIME;
853 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
854 gfs2_dinode_out(ip, dibh->b_data);
855 brelse(dibh);
856 out:
857 gfs2_trans_end(GFS2_SB(&ip->i_inode));
858 return error;
861 static int ea_set_simple_alloc(struct gfs2_inode *ip,
862 struct gfs2_ea_request *er, void *private)
864 struct ea_set *es = private;
865 struct gfs2_ea_header *ea = es->es_ea;
866 int error;
868 gfs2_trans_add_bh(ip->i_gl, es->es_bh, 1);
870 if (es->ea_split)
871 ea = ea_split_ea(ea);
873 error = ea_write(ip, ea, er);
874 if (error)
875 return error;
877 if (es->es_el)
878 ea_set_remove_stuffed(ip, es->es_el);
880 return 0;
883 static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
884 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
885 void *private)
887 struct ea_set *es = private;
888 unsigned int size;
889 int stuffed;
890 int error;
892 stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er, &size);
894 if (ea->ea_type == GFS2_EATYPE_UNUSED) {
895 if (GFS2_EA_REC_LEN(ea) < size)
896 return 0;
897 if (!GFS2_EA_IS_STUFFED(ea)) {
898 error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
899 if (error)
900 return error;
902 es->ea_split = 0;
903 } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
904 es->ea_split = 1;
905 else
906 return 0;
908 if (stuffed) {
909 error = ea_set_simple_noalloc(ip, bh, ea, es);
910 if (error)
911 return error;
912 } else {
913 unsigned int blks;
915 es->es_bh = bh;
916 es->es_ea = ea;
917 blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
918 GFS2_SB(&ip->i_inode)->sd_jbsize);
920 error = ea_alloc_skeleton(ip, es->es_er, blks,
921 ea_set_simple_alloc, es);
922 if (error)
923 return error;
926 return 1;
929 static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
930 void *private)
932 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
933 struct buffer_head *indbh, *newbh;
934 __be64 *eablk;
935 int error;
936 int mh_size = sizeof(struct gfs2_meta_header);
938 if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
939 __be64 *end;
941 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT,
942 &indbh);
943 if (error)
944 return error;
946 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
947 error = -EIO;
948 goto out;
951 eablk = (__be64 *)(indbh->b_data + mh_size);
952 end = eablk + sdp->sd_inptrs;
954 for (; eablk < end; eablk++)
955 if (!*eablk)
956 break;
958 if (eablk == end) {
959 error = -ENOSPC;
960 goto out;
963 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
964 } else {
965 u64 blk;
966 unsigned int n = 1;
967 blk = gfs2_alloc_block(ip, &n);
968 gfs2_trans_add_unrevoke(sdp, blk, 1);
969 indbh = gfs2_meta_new(ip->i_gl, blk);
970 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
971 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
972 gfs2_buffer_clear_tail(indbh, mh_size);
974 eablk = (__be64 *)(indbh->b_data + mh_size);
975 *eablk = cpu_to_be64(ip->i_di.di_eattr);
976 ip->i_di.di_eattr = blk;
977 ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
978 gfs2_add_inode_blocks(&ip->i_inode, 1);
980 eablk++;
983 error = ea_alloc_blk(ip, &newbh);
984 if (error)
985 goto out;
987 *eablk = cpu_to_be64((u64)newbh->b_blocknr);
988 error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
989 brelse(newbh);
990 if (error)
991 goto out;
993 if (private)
994 ea_set_remove_stuffed(ip, private);
996 out:
997 brelse(indbh);
998 return error;
1001 static int ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
1002 struct gfs2_ea_location *el)
1004 struct ea_set es;
1005 unsigned int blks = 2;
1006 int error;
1008 memset(&es, 0, sizeof(struct ea_set));
1009 es.es_er = er;
1010 es.es_el = el;
1012 error = ea_foreach(ip, ea_set_simple, &es);
1013 if (error > 0)
1014 return 0;
1015 if (error)
1016 return error;
1018 if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT))
1019 blks++;
1020 if (GFS2_EAREQ_SIZE_STUFFED(er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
1021 blks += DIV_ROUND_UP(er->er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
1023 return ea_alloc_skeleton(ip, er, blks, ea_set_block, el);
1026 static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
1027 struct gfs2_ea_location *el)
1029 if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
1030 el->el_prev = GFS2_EA2NEXT(el->el_prev);
1031 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
1032 GFS2_EA2NEXT(el->el_prev) == el->el_ea);
1035 return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev,0);
1038 int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1040 struct gfs2_ea_location el;
1041 int error;
1043 if (!ip->i_di.di_eattr) {
1044 if (er->er_flags & XATTR_REPLACE)
1045 return -ENODATA;
1046 return ea_init(ip, er);
1049 error = gfs2_ea_find(ip, er, &el);
1050 if (error)
1051 return error;
1053 if (el.el_ea) {
1054 if (ip->i_di.di_flags & GFS2_DIF_APPENDONLY) {
1055 brelse(el.el_bh);
1056 return -EPERM;
1059 error = -EEXIST;
1060 if (!(er->er_flags & XATTR_CREATE)) {
1061 int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
1062 error = ea_set_i(ip, er, &el);
1063 if (!error && unstuffed)
1064 ea_set_remove_unstuffed(ip, &el);
1067 brelse(el.el_bh);
1068 } else {
1069 error = -ENODATA;
1070 if (!(er->er_flags & XATTR_REPLACE))
1071 error = ea_set_i(ip, er, NULL);
1074 return error;
1077 int gfs2_ea_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1079 struct gfs2_holder i_gh;
1080 int error;
1082 if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1083 return -EINVAL;
1084 if (!er->er_data || !er->er_data_len) {
1085 er->er_data = NULL;
1086 er->er_data_len = 0;
1088 error = ea_check_size(GFS2_SB(&ip->i_inode), er);
1089 if (error)
1090 return error;
1092 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1093 if (error)
1094 return error;
1096 if (IS_IMMUTABLE(&ip->i_inode))
1097 error = -EPERM;
1098 else
1099 error = gfs2_ea_ops[er->er_type]->eo_set(ip, er);
1101 gfs2_glock_dq_uninit(&i_gh);
1103 return error;
1106 static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1108 struct gfs2_ea_header *ea = el->el_ea;
1109 struct gfs2_ea_header *prev = el->el_prev;
1110 struct buffer_head *dibh;
1111 int error;
1113 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1114 if (error)
1115 return error;
1117 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1119 if (prev) {
1120 u32 len;
1122 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
1123 prev->ea_rec_len = cpu_to_be32(len);
1125 if (GFS2_EA_IS_LAST(ea))
1126 prev->ea_flags |= GFS2_EAFLAG_LAST;
1127 } else
1128 ea->ea_type = GFS2_EATYPE_UNUSED;
1130 error = gfs2_meta_inode_buffer(ip, &dibh);
1131 if (!error) {
1132 ip->i_inode.i_ctime = CURRENT_TIME;
1133 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1134 gfs2_dinode_out(ip, dibh->b_data);
1135 brelse(dibh);
1138 gfs2_trans_end(GFS2_SB(&ip->i_inode));
1140 return error;
1143 int gfs2_ea_remove_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1145 struct gfs2_ea_location el;
1146 int error;
1148 if (!ip->i_di.di_eattr)
1149 return -ENODATA;
1151 error = gfs2_ea_find(ip, er, &el);
1152 if (error)
1153 return error;
1154 if (!el.el_ea)
1155 return -ENODATA;
1157 if (GFS2_EA_IS_STUFFED(el.el_ea))
1158 error = ea_remove_stuffed(ip, &el);
1159 else
1160 error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev,
1163 brelse(el.el_bh);
1165 return error;
1169 * gfs2_ea_remove - sets (or creates or replaces) an extended attribute
1170 * @ip: pointer to the inode of the target file
1171 * @er: request information
1173 * Returns: errno
1176 int gfs2_ea_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1178 struct gfs2_holder i_gh;
1179 int error;
1181 if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1182 return -EINVAL;
1184 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1185 if (error)
1186 return error;
1188 if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
1189 error = -EPERM;
1190 else
1191 error = gfs2_ea_ops[er->er_type]->eo_remove(ip, er);
1193 gfs2_glock_dq_uninit(&i_gh);
1195 return error;
1198 static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
1199 struct gfs2_ea_header *ea, char *data)
1201 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1202 struct buffer_head **bh;
1203 unsigned int amount = GFS2_EA_DATA_LEN(ea);
1204 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
1205 __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
1206 unsigned int x;
1207 int error;
1209 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
1210 if (!bh)
1211 return -ENOMEM;
1213 error = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
1214 if (error)
1215 goto out;
1217 for (x = 0; x < nptrs; x++) {
1218 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
1219 bh + x);
1220 if (error) {
1221 while (x--)
1222 brelse(bh[x]);
1223 goto fail;
1225 dataptrs++;
1228 for (x = 0; x < nptrs; x++) {
1229 error = gfs2_meta_wait(sdp, bh[x]);
1230 if (error) {
1231 for (; x < nptrs; x++)
1232 brelse(bh[x]);
1233 goto fail;
1235 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
1236 for (; x < nptrs; x++)
1237 brelse(bh[x]);
1238 error = -EIO;
1239 goto fail;
1242 gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
1244 memcpy(bh[x]->b_data + sizeof(struct gfs2_meta_header), data,
1245 (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
1247 amount -= sdp->sd_jbsize;
1248 data += sdp->sd_jbsize;
1250 brelse(bh[x]);
1253 out:
1254 kfree(bh);
1255 return error;
1257 fail:
1258 gfs2_trans_end(sdp);
1259 kfree(bh);
1260 return error;
1263 int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
1264 struct iattr *attr, char *data)
1266 struct buffer_head *dibh;
1267 int error;
1269 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
1270 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1271 if (error)
1272 return error;
1274 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1275 memcpy(GFS2_EA2DATA(el->el_ea), data,
1276 GFS2_EA_DATA_LEN(el->el_ea));
1277 } else
1278 error = ea_acl_chmod_unstuffed(ip, el->el_ea, data);
1280 if (error)
1281 return error;
1283 error = gfs2_meta_inode_buffer(ip, &dibh);
1284 if (!error) {
1285 error = inode_setattr(&ip->i_inode, attr);
1286 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1287 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1288 gfs2_dinode_out(ip, dibh->b_data);
1289 brelse(dibh);
1292 gfs2_trans_end(GFS2_SB(&ip->i_inode));
1294 return error;
1297 static int ea_dealloc_indirect(struct gfs2_inode *ip)
1299 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1300 struct gfs2_rgrp_list rlist;
1301 struct buffer_head *indbh, *dibh;
1302 __be64 *eablk, *end;
1303 unsigned int rg_blocks = 0;
1304 u64 bstart = 0;
1305 unsigned int blen = 0;
1306 unsigned int blks = 0;
1307 unsigned int x;
1308 int error;
1310 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1312 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &indbh);
1313 if (error)
1314 return error;
1316 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1317 error = -EIO;
1318 goto out;
1321 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1322 end = eablk + sdp->sd_inptrs;
1324 for (; eablk < end; eablk++) {
1325 u64 bn;
1327 if (!*eablk)
1328 break;
1329 bn = be64_to_cpu(*eablk);
1331 if (bstart + blen == bn)
1332 blen++;
1333 else {
1334 if (bstart)
1335 gfs2_rlist_add(sdp, &rlist, bstart);
1336 bstart = bn;
1337 blen = 1;
1339 blks++;
1341 if (bstart)
1342 gfs2_rlist_add(sdp, &rlist, bstart);
1343 else
1344 goto out;
1346 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
1348 for (x = 0; x < rlist.rl_rgrps; x++) {
1349 struct gfs2_rgrpd *rgd;
1350 rgd = rlist.rl_ghs[x].gh_gl->gl_object;
1351 rg_blocks += rgd->rd_length;
1354 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
1355 if (error)
1356 goto out_rlist_free;
1358 error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT +
1359 RES_STATFS + RES_QUOTA, blks);
1360 if (error)
1361 goto out_gunlock;
1363 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1365 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1366 bstart = 0;
1367 blen = 0;
1369 for (; eablk < end; eablk++) {
1370 u64 bn;
1372 if (!*eablk)
1373 break;
1374 bn = be64_to_cpu(*eablk);
1376 if (bstart + blen == bn)
1377 blen++;
1378 else {
1379 if (bstart)
1380 gfs2_free_meta(ip, bstart, blen);
1381 bstart = bn;
1382 blen = 1;
1385 *eablk = 0;
1386 gfs2_add_inode_blocks(&ip->i_inode, -1);
1388 if (bstart)
1389 gfs2_free_meta(ip, bstart, blen);
1391 ip->i_di.di_flags &= ~GFS2_DIF_EA_INDIRECT;
1393 error = gfs2_meta_inode_buffer(ip, &dibh);
1394 if (!error) {
1395 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1396 gfs2_dinode_out(ip, dibh->b_data);
1397 brelse(dibh);
1400 gfs2_trans_end(sdp);
1402 out_gunlock:
1403 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
1404 out_rlist_free:
1405 gfs2_rlist_free(&rlist);
1406 out:
1407 brelse(indbh);
1408 return error;
1411 static int ea_dealloc_block(struct gfs2_inode *ip)
1413 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1414 struct gfs2_alloc *al = ip->i_alloc;
1415 struct gfs2_rgrpd *rgd;
1416 struct buffer_head *dibh;
1417 int error;
1419 rgd = gfs2_blk2rgrpd(sdp, ip->i_di.di_eattr);
1420 if (!rgd) {
1421 gfs2_consist_inode(ip);
1422 return -EIO;
1425 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
1426 &al->al_rgd_gh);
1427 if (error)
1428 return error;
1430 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE + RES_STATFS +
1431 RES_QUOTA, 1);
1432 if (error)
1433 goto out_gunlock;
1435 gfs2_free_meta(ip, ip->i_di.di_eattr, 1);
1437 ip->i_di.di_eattr = 0;
1438 gfs2_add_inode_blocks(&ip->i_inode, -1);
1440 error = gfs2_meta_inode_buffer(ip, &dibh);
1441 if (!error) {
1442 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1443 gfs2_dinode_out(ip, dibh->b_data);
1444 brelse(dibh);
1447 gfs2_trans_end(sdp);
1449 out_gunlock:
1450 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1451 return error;
1455 * gfs2_ea_dealloc - deallocate the extended attribute fork
1456 * @ip: the inode
1458 * Returns: errno
1461 int gfs2_ea_dealloc(struct gfs2_inode *ip)
1463 struct gfs2_alloc *al;
1464 int error;
1466 al = gfs2_alloc_get(ip);
1467 if (!al)
1468 return -ENOMEM;
1470 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1471 if (error)
1472 goto out_alloc;
1474 error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
1475 if (error)
1476 goto out_quota;
1478 error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
1479 if (error)
1480 goto out_rindex;
1482 if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
1483 error = ea_dealloc_indirect(ip);
1484 if (error)
1485 goto out_rindex;
1488 error = ea_dealloc_block(ip);
1490 out_rindex:
1491 gfs2_glock_dq_uninit(&al->al_ri_gh);
1492 out_quota:
1493 gfs2_quota_unhold(ip);
1494 out_alloc:
1495 gfs2_alloc_put(ip);
1496 return error;