[GFS2] Change all types to uX style
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / gfs2 / eattr.c
blobf6b5e306a6c8ca34c7a95fc88f534135de5e4725
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/xattr.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <asm/uaccess.h>
19 #include "gfs2.h"
20 #include "lm_interface.h"
21 #include "incore.h"
22 #include "acl.h"
23 #include "eaops.h"
24 #include "eattr.h"
25 #include "glock.h"
26 #include "inode.h"
27 #include "meta_io.h"
28 #include "quota.h"
29 #include "rgrp.h"
30 #include "trans.h"
31 #include "util.h"
33 /**
34 * ea_calc_size - returns the acutal number of bytes the request will take up
35 * (not counting any unstuffed data blocks)
36 * @sdp:
37 * @er:
38 * @size:
40 * Returns: 1 if the EA should be stuffed
43 static int ea_calc_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er,
44 unsigned int *size)
46 *size = GFS2_EAREQ_SIZE_STUFFED(er);
47 if (*size <= sdp->sd_jbsize)
48 return 1;
50 *size = GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er);
52 return 0;
55 static int ea_check_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er)
57 unsigned int size;
59 if (er->er_data_len > GFS2_EA_MAX_DATA_LEN)
60 return -ERANGE;
62 ea_calc_size(sdp, er, &size);
64 /* This can only happen with 512 byte blocks */
65 if (size > sdp->sd_jbsize)
66 return -ERANGE;
68 return 0;
71 typedef int (*ea_call_t) (struct gfs2_inode *ip,
72 struct buffer_head *bh,
73 struct gfs2_ea_header *ea,
74 struct gfs2_ea_header *prev,
75 void *private);
77 static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
78 ea_call_t ea_call, void *data)
80 struct gfs2_ea_header *ea, *prev = NULL;
81 int error = 0;
83 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA))
84 return -EIO;
86 for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
87 if (!GFS2_EA_REC_LEN(ea))
88 goto fail;
89 if (!(bh->b_data <= (char *)ea &&
90 (char *)GFS2_EA2NEXT(ea) <=
91 bh->b_data + bh->b_size))
92 goto fail;
93 if (!GFS2_EATYPE_VALID(ea->ea_type))
94 goto fail;
96 error = ea_call(ip, bh, ea, prev, data);
97 if (error)
98 return error;
100 if (GFS2_EA_IS_LAST(ea)) {
101 if ((char *)GFS2_EA2NEXT(ea) !=
102 bh->b_data + bh->b_size)
103 goto fail;
104 break;
108 return error;
110 fail:
111 gfs2_consist_inode(ip);
112 return -EIO;
115 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
117 struct buffer_head *bh, *eabh;
118 u64 *eablk, *end;
119 int error;
121 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr,
122 DIO_START | DIO_WAIT, &bh);
123 if (error)
124 return error;
126 if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT)) {
127 error = ea_foreach_i(ip, bh, ea_call, data);
128 goto out;
131 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) {
132 error = -EIO;
133 goto out;
136 eablk = (u64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
137 end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
139 for (; eablk < end; eablk++) {
140 u64 bn;
142 if (!*eablk)
143 break;
144 bn = be64_to_cpu(*eablk);
146 error = gfs2_meta_read(ip->i_gl, bn, DIO_START | DIO_WAIT,
147 &eabh);
148 if (error)
149 break;
150 error = ea_foreach_i(ip, eabh, ea_call, data);
151 brelse(eabh);
152 if (error)
153 break;
155 out:
156 brelse(bh);
157 return error;
160 struct ea_find {
161 struct gfs2_ea_request *ef_er;
162 struct gfs2_ea_location *ef_el;
165 static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
166 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
167 void *private)
169 struct ea_find *ef = private;
170 struct gfs2_ea_request *er = ef->ef_er;
172 if (ea->ea_type == GFS2_EATYPE_UNUSED)
173 return 0;
175 if (ea->ea_type == er->er_type) {
176 if (ea->ea_name_len == er->er_name_len &&
177 !memcmp(GFS2_EA2NAME(ea), er->er_name, ea->ea_name_len)) {
178 struct gfs2_ea_location *el = ef->ef_el;
179 get_bh(bh);
180 el->el_bh = bh;
181 el->el_ea = ea;
182 el->el_prev = prev;
183 return 1;
187 #if 0
188 else if ((ip->i_di.di_flags & GFS2_DIF_EA_PACKED) &&
189 er->er_type == GFS2_EATYPE_SYS)
190 return 1;
191 #endif
193 return 0;
196 int gfs2_ea_find(struct gfs2_inode *ip, struct gfs2_ea_request *er,
197 struct gfs2_ea_location *el)
199 struct ea_find ef;
200 int error;
202 ef.ef_er = er;
203 ef.ef_el = el;
205 memset(el, 0, sizeof(struct gfs2_ea_location));
207 error = ea_foreach(ip, ea_find_i, &ef);
208 if (error > 0)
209 return 0;
211 return error;
215 * ea_dealloc_unstuffed -
216 * @ip:
217 * @bh:
218 * @ea:
219 * @prev:
220 * @private:
222 * Take advantage of the fact that all unstuffed blocks are
223 * allocated from the same RG. But watch, this may not always
224 * be true.
226 * Returns: errno
229 static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
230 struct gfs2_ea_header *ea,
231 struct gfs2_ea_header *prev, void *private)
233 int *leave = private;
234 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
235 struct gfs2_rgrpd *rgd;
236 struct gfs2_holder rg_gh;
237 struct buffer_head *dibh;
238 u64 *dataptrs, bn = 0;
239 u64 bstart = 0;
240 unsigned int blen = 0;
241 unsigned int blks = 0;
242 unsigned int x;
243 int error;
245 if (GFS2_EA_IS_STUFFED(ea))
246 return 0;
248 dataptrs = GFS2_EA2DATAPTRS(ea);
249 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++)
250 if (*dataptrs) {
251 blks++;
252 bn = be64_to_cpu(*dataptrs);
254 if (!blks)
255 return 0;
257 rgd = gfs2_blk2rgrpd(sdp, bn);
258 if (!rgd) {
259 gfs2_consist_inode(ip);
260 return -EIO;
263 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
264 if (error)
265 return error;
267 error = gfs2_trans_begin(sdp, rgd->rd_ri.ri_length +
268 RES_DINODE + RES_EATTR + RES_STATFS +
269 RES_QUOTA, blks);
270 if (error)
271 goto out_gunlock;
273 gfs2_trans_add_bh(ip->i_gl, bh, 1);
275 dataptrs = GFS2_EA2DATAPTRS(ea);
276 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
277 if (!*dataptrs)
278 break;
279 bn = be64_to_cpu(*dataptrs);
281 if (bstart + blen == bn)
282 blen++;
283 else {
284 if (bstart)
285 gfs2_free_meta(ip, bstart, blen);
286 bstart = bn;
287 blen = 1;
290 *dataptrs = 0;
291 if (!ip->i_di.di_blocks)
292 gfs2_consist_inode(ip);
293 ip->i_di.di_blocks--;
295 if (bstart)
296 gfs2_free_meta(ip, bstart, blen);
298 if (prev && !leave) {
299 u32 len;
301 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
302 prev->ea_rec_len = cpu_to_be32(len);
304 if (GFS2_EA_IS_LAST(ea))
305 prev->ea_flags |= GFS2_EAFLAG_LAST;
306 } else {
307 ea->ea_type = GFS2_EATYPE_UNUSED;
308 ea->ea_num_ptrs = 0;
311 error = gfs2_meta_inode_buffer(ip, &dibh);
312 if (!error) {
313 ip->i_di.di_ctime = get_seconds();
314 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
315 gfs2_dinode_out(&ip->i_di, dibh->b_data);
316 brelse(dibh);
319 gfs2_trans_end(sdp);
321 out_gunlock:
322 gfs2_glock_dq_uninit(&rg_gh);
323 return error;
326 static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
327 struct gfs2_ea_header *ea,
328 struct gfs2_ea_header *prev, int leave)
330 struct gfs2_alloc *al;
331 int error;
333 al = gfs2_alloc_get(ip);
335 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
336 if (error)
337 goto out_alloc;
339 error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
340 if (error)
341 goto out_quota;
343 error = ea_dealloc_unstuffed(ip,
344 bh, ea, prev,
345 (leave) ? &error : NULL);
347 gfs2_glock_dq_uninit(&al->al_ri_gh);
349 out_quota:
350 gfs2_quota_unhold(ip);
351 out_alloc:
352 gfs2_alloc_put(ip);
353 return error;
356 struct ea_list {
357 struct gfs2_ea_request *ei_er;
358 unsigned int ei_size;
361 static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
362 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
363 void *private)
365 struct ea_list *ei = private;
366 struct gfs2_ea_request *er = ei->ei_er;
367 unsigned int ea_size = gfs2_ea_strlen(ea);
369 if (ea->ea_type == GFS2_EATYPE_UNUSED)
370 return 0;
372 if (er->er_data_len) {
373 char *prefix = NULL;
374 unsigned int l = 0;
375 char c = 0;
377 if (ei->ei_size + ea_size > er->er_data_len)
378 return -ERANGE;
380 switch (ea->ea_type) {
381 case GFS2_EATYPE_USR:
382 prefix = "user.";
383 l = 5;
384 break;
385 case GFS2_EATYPE_SYS:
386 prefix = "system.";
387 l = 7;
388 break;
389 case GFS2_EATYPE_SECURITY:
390 prefix = "security.";
391 l = 9;
392 break;
395 BUG_ON(l == 0);
397 memcpy(er->er_data + ei->ei_size, prefix, l);
398 memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
399 ea->ea_name_len);
400 memcpy(er->er_data + ei->ei_size + ea_size - 1, &c, 1);
403 ei->ei_size += ea_size;
405 return 0;
409 * gfs2_ea_list -
410 * @ip:
411 * @er:
413 * Returns: actual size of data on success, -errno on error
416 int gfs2_ea_list(struct gfs2_inode *ip, struct gfs2_ea_request *er)
418 struct gfs2_holder i_gh;
419 int error;
421 if (!er->er_data || !er->er_data_len) {
422 er->er_data = NULL;
423 er->er_data_len = 0;
426 error = gfs2_glock_nq_init(ip->i_gl,
427 LM_ST_SHARED, LM_FLAG_ANY,
428 &i_gh);
429 if (error)
430 return error;
432 if (ip->i_di.di_eattr) {
433 struct ea_list ei = { .ei_er = er, .ei_size = 0 };
435 error = ea_foreach(ip, ea_list_i, &ei);
436 if (!error)
437 error = ei.ei_size;
440 gfs2_glock_dq_uninit(&i_gh);
442 return error;
446 * ea_get_unstuffed - actually copies the unstuffed data into the
447 * request buffer
448 * @ip:
449 * @ea:
450 * @data:
452 * Returns: errno
455 static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
456 char *data)
458 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
459 struct buffer_head **bh;
460 unsigned int amount = GFS2_EA_DATA_LEN(ea);
461 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
462 u64 *dataptrs = GFS2_EA2DATAPTRS(ea);
463 unsigned int x;
464 int error = 0;
466 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
467 if (!bh)
468 return -ENOMEM;
470 for (x = 0; x < nptrs; x++) {
471 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs),
472 DIO_START, bh + x);
473 if (error) {
474 while (x--)
475 brelse(bh[x]);
476 goto out;
478 dataptrs++;
481 for (x = 0; x < nptrs; x++) {
482 error = gfs2_meta_reread(sdp, bh[x], DIO_WAIT);
483 if (error) {
484 for (; x < nptrs; x++)
485 brelse(bh[x]);
486 goto out;
488 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
489 for (; x < nptrs; x++)
490 brelse(bh[x]);
491 error = -EIO;
492 goto out;
495 memcpy(data,
496 bh[x]->b_data + sizeof(struct gfs2_meta_header),
497 (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
499 amount -= sdp->sd_jbsize;
500 data += sdp->sd_jbsize;
502 brelse(bh[x]);
505 out:
506 kfree(bh);
507 return error;
510 int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
511 char *data)
513 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
514 memcpy(data,
515 GFS2_EA2DATA(el->el_ea),
516 GFS2_EA_DATA_LEN(el->el_ea));
517 return 0;
518 } else
519 return ea_get_unstuffed(ip, el->el_ea, data);
523 * gfs2_ea_get_i -
524 * @ip:
525 * @er:
527 * Returns: actual size of data on success, -errno on error
530 int gfs2_ea_get_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
532 struct gfs2_ea_location el;
533 int error;
535 if (!ip->i_di.di_eattr)
536 return -ENODATA;
538 error = gfs2_ea_find(ip, er, &el);
539 if (error)
540 return error;
541 if (!el.el_ea)
542 return -ENODATA;
544 if (er->er_data_len) {
545 if (GFS2_EA_DATA_LEN(el.el_ea) > er->er_data_len)
546 error = -ERANGE;
547 else
548 error = gfs2_ea_get_copy(ip, &el, er->er_data);
550 if (!error)
551 error = GFS2_EA_DATA_LEN(el.el_ea);
553 brelse(el.el_bh);
555 return error;
559 * gfs2_ea_get -
560 * @ip:
561 * @er:
563 * Returns: actual size of data on success, -errno on error
566 int gfs2_ea_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
568 struct gfs2_holder i_gh;
569 int error;
571 if (!er->er_name_len ||
572 er->er_name_len > GFS2_EA_MAX_NAME_LEN)
573 return -EINVAL;
574 if (!er->er_data || !er->er_data_len) {
575 er->er_data = NULL;
576 er->er_data_len = 0;
579 error = gfs2_glock_nq_init(ip->i_gl,
580 LM_ST_SHARED, LM_FLAG_ANY,
581 &i_gh);
582 if (error)
583 return error;
585 error = gfs2_ea_ops[er->er_type]->eo_get(ip, er);
587 gfs2_glock_dq_uninit(&i_gh);
589 return error;
593 * ea_alloc_blk - allocates a new block for extended attributes.
594 * @ip: A pointer to the inode that's getting extended attributes
595 * @bhp:
597 * Returns: errno
600 static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
602 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
603 struct gfs2_ea_header *ea;
604 u64 block;
606 block = gfs2_alloc_meta(ip);
608 *bhp = gfs2_meta_new(ip->i_gl, block);
609 gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
610 gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
611 gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
613 ea = GFS2_EA_BH2FIRST(*bhp);
614 ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
615 ea->ea_type = GFS2_EATYPE_UNUSED;
616 ea->ea_flags = GFS2_EAFLAG_LAST;
617 ea->ea_num_ptrs = 0;
619 ip->i_di.di_blocks++;
621 return 0;
625 * ea_write - writes the request info to an ea, creating new blocks if
626 * necessary
627 * @ip: inode that is being modified
628 * @ea: the location of the new ea in a block
629 * @er: the write request
631 * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
633 * returns : errno
636 static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
637 struct gfs2_ea_request *er)
639 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
641 ea->ea_data_len = cpu_to_be32(er->er_data_len);
642 ea->ea_name_len = er->er_name_len;
643 ea->ea_type = er->er_type;
644 ea->__pad = 0;
646 memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
648 if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
649 ea->ea_num_ptrs = 0;
650 memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
651 } else {
652 u64 *dataptr = GFS2_EA2DATAPTRS(ea);
653 const char *data = er->er_data;
654 unsigned int data_len = er->er_data_len;
655 unsigned int copy;
656 unsigned int x;
658 ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
659 for (x = 0; x < ea->ea_num_ptrs; x++) {
660 struct buffer_head *bh;
661 u64 block;
662 int mh_size = sizeof(struct gfs2_meta_header);
664 block = gfs2_alloc_meta(ip);
666 bh = gfs2_meta_new(ip->i_gl, block);
667 gfs2_trans_add_bh(ip->i_gl, bh, 1);
668 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
670 ip->i_di.di_blocks++;
672 copy = (data_len > sdp->sd_jbsize) ? sdp->sd_jbsize :
673 data_len;
674 memcpy(bh->b_data + mh_size, data, copy);
675 if (copy < sdp->sd_jbsize)
676 memset(bh->b_data + mh_size + copy, 0,
677 sdp->sd_jbsize - copy);
679 *dataptr++ = cpu_to_be64((u64)bh->b_blocknr);
680 data += copy;
681 data_len -= copy;
683 brelse(bh);
686 gfs2_assert_withdraw(sdp, !data_len);
689 return 0;
692 typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
693 struct gfs2_ea_request *er,
694 void *private);
696 static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
697 unsigned int blks,
698 ea_skeleton_call_t skeleton_call,
699 void *private)
701 struct gfs2_alloc *al;
702 struct buffer_head *dibh;
703 int error;
705 al = gfs2_alloc_get(ip);
707 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
708 if (error)
709 goto out;
711 error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
712 if (error)
713 goto out_gunlock_q;
715 al->al_requested = blks;
717 error = gfs2_inplace_reserve(ip);
718 if (error)
719 goto out_gunlock_q;
721 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
722 blks + al->al_rgd->rd_ri.ri_length +
723 RES_DINODE + RES_STATFS + RES_QUOTA, 0);
724 if (error)
725 goto out_ipres;
727 error = skeleton_call(ip, er, private);
728 if (error)
729 goto out_end_trans;
731 error = gfs2_meta_inode_buffer(ip, &dibh);
732 if (!error) {
733 if (er->er_flags & GFS2_ERF_MODE) {
734 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
735 (ip->i_di.di_mode & S_IFMT) ==
736 (er->er_mode & S_IFMT));
737 ip->i_di.di_mode = er->er_mode;
739 ip->i_di.di_ctime = get_seconds();
740 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
741 gfs2_dinode_out(&ip->i_di, dibh->b_data);
742 brelse(dibh);
745 out_end_trans:
746 gfs2_trans_end(GFS2_SB(&ip->i_inode));
747 out_ipres:
748 gfs2_inplace_release(ip);
749 out_gunlock_q:
750 gfs2_quota_unlock(ip);
751 out:
752 gfs2_alloc_put(ip);
753 return error;
756 static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
757 void *private)
759 struct buffer_head *bh;
760 int error;
762 error = ea_alloc_blk(ip, &bh);
763 if (error)
764 return error;
766 ip->i_di.di_eattr = bh->b_blocknr;
767 error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
769 brelse(bh);
771 return error;
775 * ea_init - initializes a new eattr block
776 * @ip:
777 * @er:
779 * Returns: errno
782 static int ea_init(struct gfs2_inode *ip, struct gfs2_ea_request *er)
784 unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize;
785 unsigned int blks = 1;
787 if (GFS2_EAREQ_SIZE_STUFFED(er) > jbsize)
788 blks += DIV_ROUND_UP(er->er_data_len, jbsize);
790 return ea_alloc_skeleton(ip, er, blks, ea_init_i, NULL);
793 static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
795 u32 ea_size = GFS2_EA_SIZE(ea);
796 struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
797 ea_size);
798 u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size;
799 int last = ea->ea_flags & GFS2_EAFLAG_LAST;
801 ea->ea_rec_len = cpu_to_be32(ea_size);
802 ea->ea_flags ^= last;
804 new->ea_rec_len = cpu_to_be32(new_size);
805 new->ea_flags = last;
807 return new;
810 static void ea_set_remove_stuffed(struct gfs2_inode *ip,
811 struct gfs2_ea_location *el)
813 struct gfs2_ea_header *ea = el->el_ea;
814 struct gfs2_ea_header *prev = el->el_prev;
815 u32 len;
817 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
819 if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
820 ea->ea_type = GFS2_EATYPE_UNUSED;
821 return;
822 } else if (GFS2_EA2NEXT(prev) != ea) {
823 prev = GFS2_EA2NEXT(prev);
824 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea);
827 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
828 prev->ea_rec_len = cpu_to_be32(len);
830 if (GFS2_EA_IS_LAST(ea))
831 prev->ea_flags |= GFS2_EAFLAG_LAST;
834 struct ea_set {
835 int ea_split;
837 struct gfs2_ea_request *es_er;
838 struct gfs2_ea_location *es_el;
840 struct buffer_head *es_bh;
841 struct gfs2_ea_header *es_ea;
844 static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
845 struct gfs2_ea_header *ea, struct ea_set *es)
847 struct gfs2_ea_request *er = es->es_er;
848 struct buffer_head *dibh;
849 int error;
851 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
852 if (error)
853 return error;
855 gfs2_trans_add_bh(ip->i_gl, bh, 1);
857 if (es->ea_split)
858 ea = ea_split_ea(ea);
860 ea_write(ip, ea, er);
862 if (es->es_el)
863 ea_set_remove_stuffed(ip, es->es_el);
865 error = gfs2_meta_inode_buffer(ip, &dibh);
866 if (error)
867 goto out;
869 if (er->er_flags & GFS2_ERF_MODE) {
870 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
871 (ip->i_di.di_mode & S_IFMT) == (er->er_mode & S_IFMT));
872 ip->i_di.di_mode = er->er_mode;
874 ip->i_di.di_ctime = get_seconds();
875 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
876 gfs2_dinode_out(&ip->i_di, dibh->b_data);
877 brelse(dibh);
878 out:
879 gfs2_trans_end(GFS2_SB(&ip->i_inode));
880 return error;
883 static int ea_set_simple_alloc(struct gfs2_inode *ip,
884 struct gfs2_ea_request *er, void *private)
886 struct ea_set *es = private;
887 struct gfs2_ea_header *ea = es->es_ea;
888 int error;
890 gfs2_trans_add_bh(ip->i_gl, es->es_bh, 1);
892 if (es->ea_split)
893 ea = ea_split_ea(ea);
895 error = ea_write(ip, ea, er);
896 if (error)
897 return error;
899 if (es->es_el)
900 ea_set_remove_stuffed(ip, es->es_el);
902 return 0;
905 static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
906 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
907 void *private)
909 struct ea_set *es = private;
910 unsigned int size;
911 int stuffed;
912 int error;
914 stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er, &size);
916 if (ea->ea_type == GFS2_EATYPE_UNUSED) {
917 if (GFS2_EA_REC_LEN(ea) < size)
918 return 0;
919 if (!GFS2_EA_IS_STUFFED(ea)) {
920 error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
921 if (error)
922 return error;
924 es->ea_split = 0;
925 } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
926 es->ea_split = 1;
927 else
928 return 0;
930 if (stuffed) {
931 error = ea_set_simple_noalloc(ip, bh, ea, es);
932 if (error)
933 return error;
934 } else {
935 unsigned int blks;
937 es->es_bh = bh;
938 es->es_ea = ea;
939 blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
940 GFS2_SB(&ip->i_inode)->sd_jbsize);
942 error = ea_alloc_skeleton(ip, es->es_er, blks,
943 ea_set_simple_alloc, es);
944 if (error)
945 return error;
948 return 1;
951 static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
952 void *private)
954 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
955 struct buffer_head *indbh, *newbh;
956 u64 *eablk;
957 int error;
958 int mh_size = sizeof(struct gfs2_meta_header);
960 if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
961 u64 *end;
963 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr,
964 DIO_START | DIO_WAIT, &indbh);
965 if (error)
966 return error;
968 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
969 error = -EIO;
970 goto out;
973 eablk = (u64 *)(indbh->b_data + mh_size);
974 end = eablk + sdp->sd_inptrs;
976 for (; eablk < end; eablk++)
977 if (!*eablk)
978 break;
980 if (eablk == end) {
981 error = -ENOSPC;
982 goto out;
985 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
986 } else {
987 u64 blk;
989 blk = gfs2_alloc_meta(ip);
991 indbh = gfs2_meta_new(ip->i_gl, blk);
992 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
993 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
994 gfs2_buffer_clear_tail(indbh, mh_size);
996 eablk = (u64 *)(indbh->b_data + mh_size);
997 *eablk = cpu_to_be64(ip->i_di.di_eattr);
998 ip->i_di.di_eattr = blk;
999 ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
1000 ip->i_di.di_blocks++;
1002 eablk++;
1005 error = ea_alloc_blk(ip, &newbh);
1006 if (error)
1007 goto out;
1009 *eablk = cpu_to_be64((u64)newbh->b_blocknr);
1010 error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
1011 brelse(newbh);
1012 if (error)
1013 goto out;
1015 if (private)
1016 ea_set_remove_stuffed(ip, (struct gfs2_ea_location *)private);
1018 out:
1019 brelse(indbh);
1020 return error;
1023 static int ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
1024 struct gfs2_ea_location *el)
1026 struct ea_set es;
1027 unsigned int blks = 2;
1028 int error;
1030 memset(&es, 0, sizeof(struct ea_set));
1031 es.es_er = er;
1032 es.es_el = el;
1034 error = ea_foreach(ip, ea_set_simple, &es);
1035 if (error > 0)
1036 return 0;
1037 if (error)
1038 return error;
1040 if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT))
1041 blks++;
1042 if (GFS2_EAREQ_SIZE_STUFFED(er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
1043 blks += DIV_ROUND_UP(er->er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
1045 return ea_alloc_skeleton(ip, er, blks, ea_set_block, el);
1048 static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
1049 struct gfs2_ea_location *el)
1051 if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
1052 el->el_prev = GFS2_EA2NEXT(el->el_prev);
1053 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
1054 GFS2_EA2NEXT(el->el_prev) == el->el_ea);
1057 return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev,0);
1060 int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1062 struct gfs2_ea_location el;
1063 int error;
1065 if (!ip->i_di.di_eattr) {
1066 if (er->er_flags & XATTR_REPLACE)
1067 return -ENODATA;
1068 return ea_init(ip, er);
1071 error = gfs2_ea_find(ip, er, &el);
1072 if (error)
1073 return error;
1075 if (el.el_ea) {
1076 if (ip->i_di.di_flags & GFS2_DIF_APPENDONLY) {
1077 brelse(el.el_bh);
1078 return -EPERM;
1081 error = -EEXIST;
1082 if (!(er->er_flags & XATTR_CREATE)) {
1083 int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
1084 error = ea_set_i(ip, er, &el);
1085 if (!error && unstuffed)
1086 ea_set_remove_unstuffed(ip, &el);
1089 brelse(el.el_bh);
1090 } else {
1091 error = -ENODATA;
1092 if (!(er->er_flags & XATTR_REPLACE))
1093 error = ea_set_i(ip, er, NULL);
1096 return error;
1099 int gfs2_ea_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1101 struct gfs2_holder i_gh;
1102 int error;
1104 if (!er->er_name_len ||
1105 er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1106 return -EINVAL;
1107 if (!er->er_data || !er->er_data_len) {
1108 er->er_data = NULL;
1109 er->er_data_len = 0;
1111 error = ea_check_size(GFS2_SB(&ip->i_inode), er);
1112 if (error)
1113 return error;
1115 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1116 if (error)
1117 return error;
1119 if (IS_IMMUTABLE(&ip->i_inode))
1120 error = -EPERM;
1121 else
1122 error = gfs2_ea_ops[er->er_type]->eo_set(ip, er);
1124 gfs2_glock_dq_uninit(&i_gh);
1126 return error;
1129 static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1131 struct gfs2_ea_header *ea = el->el_ea;
1132 struct gfs2_ea_header *prev = el->el_prev;
1133 struct buffer_head *dibh;
1134 int error;
1136 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1137 if (error)
1138 return error;
1140 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1142 if (prev) {
1143 u32 len;
1145 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
1146 prev->ea_rec_len = cpu_to_be32(len);
1148 if (GFS2_EA_IS_LAST(ea))
1149 prev->ea_flags |= GFS2_EAFLAG_LAST;
1150 } else
1151 ea->ea_type = GFS2_EATYPE_UNUSED;
1153 error = gfs2_meta_inode_buffer(ip, &dibh);
1154 if (!error) {
1155 ip->i_di.di_ctime = get_seconds();
1156 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1157 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1158 brelse(dibh);
1161 gfs2_trans_end(GFS2_SB(&ip->i_inode));
1163 return error;
1166 int gfs2_ea_remove_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1168 struct gfs2_ea_location el;
1169 int error;
1171 if (!ip->i_di.di_eattr)
1172 return -ENODATA;
1174 error = gfs2_ea_find(ip, er, &el);
1175 if (error)
1176 return error;
1177 if (!el.el_ea)
1178 return -ENODATA;
1180 if (GFS2_EA_IS_STUFFED(el.el_ea))
1181 error = ea_remove_stuffed(ip, &el);
1182 else
1183 error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev,
1186 brelse(el.el_bh);
1188 return error;
1192 * gfs2_ea_remove - sets (or creates or replaces) an extended attribute
1193 * @ip: pointer to the inode of the target file
1194 * @er: request information
1196 * Returns: errno
1199 int gfs2_ea_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1201 struct gfs2_holder i_gh;
1202 int error;
1204 if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1205 return -EINVAL;
1207 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1208 if (error)
1209 return error;
1211 if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
1212 error = -EPERM;
1213 else
1214 error = gfs2_ea_ops[er->er_type]->eo_remove(ip, er);
1216 gfs2_glock_dq_uninit(&i_gh);
1218 return error;
1221 static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
1222 struct gfs2_ea_header *ea, char *data)
1224 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1225 struct buffer_head **bh;
1226 unsigned int amount = GFS2_EA_DATA_LEN(ea);
1227 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
1228 u64 *dataptrs = GFS2_EA2DATAPTRS(ea);
1229 unsigned int x;
1230 int error;
1232 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
1233 if (!bh)
1234 return -ENOMEM;
1236 error = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
1237 if (error)
1238 goto out;
1240 for (x = 0; x < nptrs; x++) {
1241 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs),
1242 DIO_START, bh + x);
1243 if (error) {
1244 while (x--)
1245 brelse(bh[x]);
1246 goto fail;
1248 dataptrs++;
1251 for (x = 0; x < nptrs; x++) {
1252 error = gfs2_meta_reread(sdp, bh[x], DIO_WAIT);
1253 if (error) {
1254 for (; x < nptrs; x++)
1255 brelse(bh[x]);
1256 goto fail;
1258 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
1259 for (; x < nptrs; x++)
1260 brelse(bh[x]);
1261 error = -EIO;
1262 goto fail;
1265 gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
1267 memcpy(bh[x]->b_data + sizeof(struct gfs2_meta_header),
1268 data,
1269 (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
1271 amount -= sdp->sd_jbsize;
1272 data += sdp->sd_jbsize;
1274 brelse(bh[x]);
1277 out:
1278 kfree(bh);
1279 return error;
1281 fail:
1282 gfs2_trans_end(sdp);
1283 kfree(bh);
1284 return error;
1287 int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
1288 struct iattr *attr, char *data)
1290 struct buffer_head *dibh;
1291 int error;
1293 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
1294 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1295 if (error)
1296 return error;
1298 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1299 memcpy(GFS2_EA2DATA(el->el_ea),
1300 data,
1301 GFS2_EA_DATA_LEN(el->el_ea));
1302 } else
1303 error = ea_acl_chmod_unstuffed(ip, el->el_ea, data);
1305 if (error)
1306 return error;
1308 error = gfs2_meta_inode_buffer(ip, &dibh);
1309 if (!error) {
1310 error = inode_setattr(&ip->i_inode, attr);
1311 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1312 gfs2_inode_attr_out(ip);
1313 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1314 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1315 brelse(dibh);
1318 gfs2_trans_end(GFS2_SB(&ip->i_inode));
1320 return error;
1323 static int ea_dealloc_indirect(struct gfs2_inode *ip)
1325 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1326 struct gfs2_rgrp_list rlist;
1327 struct buffer_head *indbh, *dibh;
1328 u64 *eablk, *end;
1329 unsigned int rg_blocks = 0;
1330 u64 bstart = 0;
1331 unsigned int blen = 0;
1332 unsigned int blks = 0;
1333 unsigned int x;
1334 int error;
1336 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1338 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr,
1339 DIO_START | DIO_WAIT, &indbh);
1340 if (error)
1341 return error;
1343 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1344 error = -EIO;
1345 goto out;
1348 eablk = (u64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1349 end = eablk + sdp->sd_inptrs;
1351 for (; eablk < end; eablk++) {
1352 u64 bn;
1354 if (!*eablk)
1355 break;
1356 bn = be64_to_cpu(*eablk);
1358 if (bstart + blen == bn)
1359 blen++;
1360 else {
1361 if (bstart)
1362 gfs2_rlist_add(sdp, &rlist, bstart);
1363 bstart = bn;
1364 blen = 1;
1366 blks++;
1368 if (bstart)
1369 gfs2_rlist_add(sdp, &rlist, bstart);
1370 else
1371 goto out;
1373 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0);
1375 for (x = 0; x < rlist.rl_rgrps; x++) {
1376 struct gfs2_rgrpd *rgd;
1377 rgd = rlist.rl_ghs[x].gh_gl->gl_object;
1378 rg_blocks += rgd->rd_ri.ri_length;
1381 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
1382 if (error)
1383 goto out_rlist_free;
1385 error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE +
1386 RES_INDIRECT + RES_STATFS +
1387 RES_QUOTA, blks);
1388 if (error)
1389 goto out_gunlock;
1391 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1393 eablk = (u64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1394 bstart = 0;
1395 blen = 0;
1397 for (; eablk < end; eablk++) {
1398 u64 bn;
1400 if (!*eablk)
1401 break;
1402 bn = be64_to_cpu(*eablk);
1404 if (bstart + blen == bn)
1405 blen++;
1406 else {
1407 if (bstart)
1408 gfs2_free_meta(ip, bstart, blen);
1409 bstart = bn;
1410 blen = 1;
1413 *eablk = 0;
1414 if (!ip->i_di.di_blocks)
1415 gfs2_consist_inode(ip);
1416 ip->i_di.di_blocks--;
1418 if (bstart)
1419 gfs2_free_meta(ip, bstart, blen);
1421 ip->i_di.di_flags &= ~GFS2_DIF_EA_INDIRECT;
1423 error = gfs2_meta_inode_buffer(ip, &dibh);
1424 if (!error) {
1425 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1426 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1427 brelse(dibh);
1430 gfs2_trans_end(sdp);
1432 out_gunlock:
1433 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
1434 out_rlist_free:
1435 gfs2_rlist_free(&rlist);
1436 out:
1437 brelse(indbh);
1438 return error;
1441 static int ea_dealloc_block(struct gfs2_inode *ip)
1443 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1444 struct gfs2_alloc *al = &ip->i_alloc;
1445 struct gfs2_rgrpd *rgd;
1446 struct buffer_head *dibh;
1447 int error;
1449 rgd = gfs2_blk2rgrpd(sdp, ip->i_di.di_eattr);
1450 if (!rgd) {
1451 gfs2_consist_inode(ip);
1452 return -EIO;
1455 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
1456 &al->al_rgd_gh);
1457 if (error)
1458 return error;
1460 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE +
1461 RES_STATFS + RES_QUOTA, 1);
1462 if (error)
1463 goto out_gunlock;
1465 gfs2_free_meta(ip, ip->i_di.di_eattr, 1);
1467 ip->i_di.di_eattr = 0;
1468 if (!ip->i_di.di_blocks)
1469 gfs2_consist_inode(ip);
1470 ip->i_di.di_blocks--;
1472 error = gfs2_meta_inode_buffer(ip, &dibh);
1473 if (!error) {
1474 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1475 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1476 brelse(dibh);
1479 gfs2_trans_end(sdp);
1481 out_gunlock:
1482 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1483 return error;
1487 * gfs2_ea_dealloc - deallocate the extended attribute fork
1488 * @ip: the inode
1490 * Returns: errno
1493 int gfs2_ea_dealloc(struct gfs2_inode *ip)
1495 struct gfs2_alloc *al;
1496 int error;
1498 al = gfs2_alloc_get(ip);
1500 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1501 if (error)
1502 goto out_alloc;
1504 error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
1505 if (error)
1506 goto out_quota;
1508 error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
1509 if (error)
1510 goto out_rindex;
1512 if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
1513 error = ea_dealloc_indirect(ip);
1514 if (error)
1515 goto out_rindex;
1518 error = ea_dealloc_block(ip);
1520 out_rindex:
1521 gfs2_glock_dq_uninit(&al->al_ri_gh);
1522 out_quota:
1523 gfs2_quota_unhold(ip);
1524 out_alloc:
1525 gfs2_alloc_put(ip);
1526 return error;