- pre3:
[davej-history.git] / fs / ext2 / inode.c
bloba6567685e9c5ec4b229572bfc6d5d2712edcbb1f
1 /*
2 * linux/fs/ext2/inode.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * from
11 * linux/fs/minix/inode.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * Goal-directed block allocation by Stephen Tweedie
16 * (sct@dcs.ed.ac.uk), 1993, 1998
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
19 * 64-bit file support on 64-bit platforms by Jakub Jelinek
20 * (jj@sunsite.ms.mff.cuni.cz)
22 * Assorted race fixes, rewrite of ext2_get_block() by Al Viro, 2000
25 #include <linux/fs.h>
26 #include <linux/locks.h>
27 #include <linux/smp_lock.h>
28 #include <linux/sched.h>
29 #include <linux/highuid.h>
31 static int ext2_update_inode(struct inode * inode, int do_sync);
34 * Called at each iput()
36 void ext2_put_inode (struct inode * inode)
38 ext2_discard_prealloc (inode);
42 * Called at the last iput() if i_nlink is zero.
44 void ext2_delete_inode (struct inode * inode)
46 lock_kernel();
48 if (is_bad_inode(inode) ||
49 inode->i_ino == EXT2_ACL_IDX_INO ||
50 inode->i_ino == EXT2_ACL_DATA_INO)
51 goto no_delete;
52 inode->u.ext2_i.i_dtime = CURRENT_TIME;
53 mark_inode_dirty(inode);
54 ext2_update_inode(inode, IS_SYNC(inode));
55 inode->i_size = 0;
56 if (inode->i_blocks)
57 ext2_truncate (inode);
58 ext2_free_inode (inode);
60 unlock_kernel();
61 return;
62 no_delete:
63 unlock_kernel();
64 clear_inode(inode); /* We must guarantee clearing of inode... */
67 void ext2_discard_prealloc (struct inode * inode)
69 #ifdef EXT2_PREALLOCATE
70 lock_kernel();
71 /* Writer: ->i_prealloc* */
72 if (inode->u.ext2_i.i_prealloc_count) {
73 unsigned short total = inode->u.ext2_i.i_prealloc_count;
74 unsigned long block = inode->u.ext2_i.i_prealloc_block;
75 inode->u.ext2_i.i_prealloc_count = 0;
76 inode->u.ext2_i.i_prealloc_block = 0;
77 /* Writer: end */
78 ext2_free_blocks (inode, block, total);
80 unlock_kernel();
81 #endif
84 static int ext2_alloc_block (struct inode * inode, unsigned long goal, int *err)
86 #ifdef EXT2FS_DEBUG
87 static unsigned long alloc_hits = 0, alloc_attempts = 0;
88 #endif
89 unsigned long result;
92 #ifdef EXT2_PREALLOCATE
93 /* Writer: ->i_prealloc* */
94 if (inode->u.ext2_i.i_prealloc_count &&
95 (goal == inode->u.ext2_i.i_prealloc_block ||
96 goal + 1 == inode->u.ext2_i.i_prealloc_block))
98 result = inode->u.ext2_i.i_prealloc_block++;
99 inode->u.ext2_i.i_prealloc_count--;
100 /* Writer: end */
101 #ifdef EXT2FS_DEBUG
102 ext2_debug ("preallocation hit (%lu/%lu).\n",
103 ++alloc_hits, ++alloc_attempts);
104 #endif
105 } else {
106 ext2_discard_prealloc (inode);
107 #ifdef EXT2FS_DEBUG
108 ext2_debug ("preallocation miss (%lu/%lu).\n",
109 alloc_hits, ++alloc_attempts);
110 #endif
111 if (S_ISREG(inode->i_mode))
112 result = ext2_new_block (inode, goal,
113 &inode->u.ext2_i.i_prealloc_count,
114 &inode->u.ext2_i.i_prealloc_block, err);
115 else
116 result = ext2_new_block (inode, goal, 0, 0, err);
118 #else
119 result = ext2_new_block (inode, goal, 0, 0, err);
120 #endif
121 return result;
124 typedef struct {
125 u32 *p;
126 u32 key;
127 struct buffer_head *bh;
128 } Indirect;
130 static inline void add_chain(Indirect *p, struct buffer_head *bh, u32 *v)
132 p->key = *(p->p = v);
133 p->bh = bh;
136 static inline int verify_chain(Indirect *from, Indirect *to)
138 while (from <= to && from->key == *from->p)
139 from++;
140 return (from > to);
144 * ext2_block_to_path - parse the block number into array of offsets
145 * @inode: inode in question (we are only interested in its superblock)
146 * @i_block: block number to be parsed
147 * @offsets: array to store the offsets in
149 * To store the locations of file's data ext2 uses a data structure common
150 * for UNIX filesystems - tree of pointers anchored in the inode, with
151 * data blocks at leaves and indirect blocks in intermediate nodes.
152 * This function translates the block number into path in that tree -
153 * return value is the path length and @offsets[n] is the offset of
154 * pointer to (n+1)th node in the nth one. If @block is out of range
155 * (negative or too large) warning is printed and zero returned.
157 * Note: function doesn't find node addresses, so no IO is needed. All
158 * we need to know is the capacity of indirect blocks (taken from the
159 * inode->i_sb).
163 * Portability note: the last comparison (check that we fit into triple
164 * indirect block) is spelled differently, because otherwise on an
165 * architecture with 32-bit longs and 8Kb pages we might get into trouble
166 * if our filesystem had 8Kb blocks. We might use long long, but that would
167 * kill us on x86. Oh, well, at least the sign propagation does not matter -
168 * i_block would have to be negative in the very beginning, so we would not
169 * get there at all.
172 static int ext2_block_to_path(struct inode *inode, long i_block, int offsets[4])
174 int ptrs = EXT2_ADDR_PER_BLOCK(inode->i_sb);
175 int ptrs_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
176 const long direct_blocks = EXT2_NDIR_BLOCKS,
177 indirect_blocks = ptrs,
178 double_blocks = (1 << (ptrs_bits * 2));
179 int n = 0;
181 if (i_block < 0) {
182 ext2_warning (inode->i_sb, "ext2_block_to_path", "block < 0");
183 } else if (i_block < direct_blocks) {
184 offsets[n++] = i_block;
185 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
186 offsets[n++] = EXT2_IND_BLOCK;
187 offsets[n++] = i_block;
188 } else if ((i_block -= indirect_blocks) < double_blocks) {
189 offsets[n++] = EXT2_DIND_BLOCK;
190 offsets[n++] = i_block >> ptrs_bits;
191 offsets[n++] = i_block & (ptrs - 1);
192 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
193 offsets[n++] = EXT2_TIND_BLOCK;
194 offsets[n++] = i_block >> (ptrs_bits * 2);
195 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
196 offsets[n++] = i_block & (ptrs - 1);
197 } else {
198 ext2_warning (inode->i_sb, "ext2_block_to_path", "block > big");
200 return n;
204 * ext2_get_branch - read the chain of indirect blocks leading to data
205 * @inode: inode in question
206 * @depth: depth of the chain (1 - direct pointer, etc.)
207 * @offsets: offsets of pointers in inode/indirect blocks
208 * @chain: place to store the result
209 * @err: here we store the error value
211 * Function fills the array of triples <key, p, bh> and returns %NULL
212 * if everything went OK or the pointer to the last filled triple
213 * (incomplete one) otherwise. Upon the return chain[i].key contains
214 * the number of (i+1)-th block in the chain (as it is stored in memory,
215 * i.e. little-endian 32-bit), chain[i].p contains the address of that
216 * number (it points into struct inode for i==0 and into the bh->b_data
217 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
218 * block for i>0 and NULL for i==0. In other words, it holds the block
219 * numbers of the chain, addresses they were taken from (and where we can
220 * verify that chain did not change) and buffer_heads hosting these
221 * numbers.
223 * Function stops when it stumbles upon zero pointer (absent block)
224 * (pointer to last triple returned, *@err == 0)
225 * or when it gets an IO error reading an indirect block
226 * (ditto, *@err == -EIO)
227 * or when it notices that chain had been changed while it was reading
228 * (ditto, *@err == -EAGAIN)
229 * or when it reads all @depth-1 indirect blocks successfully and finds
230 * the whole chain, all way to the data (returns %NULL, *err == 0).
232 static inline Indirect *ext2_get_branch(struct inode *inode,
233 int depth,
234 int *offsets,
235 Indirect chain[4],
236 int *err)
238 kdev_t dev = inode->i_dev;
239 int size = inode->i_sb->s_blocksize;
240 Indirect *p = chain;
241 struct buffer_head *bh;
243 *err = 0;
244 /* i_data is not going away, no lock needed */
245 add_chain (chain, NULL, inode->u.ext2_i.i_data + *offsets);
246 if (!p->key)
247 goto no_block;
249 * switch below is merely an unrolled loop - body should be
250 * repeated depth-1 times. Maybe loop would be actually better,
251 * but that way we get straight execution path in normal cases.
252 * Easy to change, anyway - all cases in switch are literally
253 * identical.
255 switch (depth) {
256 case 4:
257 bh = bread(dev, le32_to_cpu(p->key), size);
258 if (!bh)
259 goto failure;
260 /* Reader: pointers */
261 if (!verify_chain(chain, p))
262 goto changed;
263 add_chain(++p, bh, (u32*)bh->b_data + *++offsets);
264 /* Reader: end */
265 if (!p->key)
266 goto no_block;
267 case 3:
268 bh = bread(dev, le32_to_cpu(p->key), size);
269 if (!bh)
270 goto failure;
271 /* Reader: pointers */
272 if (!verify_chain(chain, p))
273 goto changed;
274 add_chain(++p, bh, (u32*)bh->b_data + *++offsets);
275 /* Reader: end */
276 if (!p->key)
277 goto no_block;
278 case 2:
279 bh = bread(dev, le32_to_cpu(p->key), size);
280 if (!bh)
281 goto failure;
282 /* Reader: pointers */
283 if (!verify_chain(chain, p))
284 goto changed;
285 add_chain(++p, bh, (u32*)bh->b_data + *++offsets);
286 /* Reader: end */
287 if (!p->key)
288 goto no_block;
290 return NULL;
292 changed:
293 *err = -EAGAIN;
294 goto no_block;
295 failure:
296 *err = -EIO;
297 no_block:
298 return p;
302 * ext2_find_near - find a place for allocation with sufficient locality
303 * @inode: owner
304 * @ind: descriptor of indirect block.
306 * This function returns the prefered place for block allocation.
307 * It is used when heuristic for sequential allocation fails.
308 * Rules are:
309 * + if there is a block to the left of our position - allocate near it.
310 * + if pointer will live in indirect block - allocate near that block.
311 * + if pointer will live in inode - allocate in the same cylinder group.
312 * Caller must make sure that @ind is valid and will stay that way.
315 static inline unsigned long ext2_find_near(struct inode *inode, Indirect *ind)
317 u32 *start = ind->bh ? (u32*) ind->bh->b_data : inode->u.ext2_i.i_data;
318 u32 *p;
320 /* Try to find previous block */
321 for (p = ind->p - 1; p >= start; p--)
322 if (*p)
323 return le32_to_cpu(*p);
325 /* No such thing, so let's try location of indirect block */
326 if (ind->bh)
327 return ind->bh->b_blocknr;
330 * It is going to be refered from inode itself? OK, just put it into
331 * the same cylinder group then.
333 return (inode->u.ext2_i.i_block_group *
334 EXT2_BLOCKS_PER_GROUP(inode->i_sb)) +
335 le32_to_cpu(inode->i_sb->u.ext2_sb.s_es->s_first_data_block);
339 * ext2_find_goal - find a prefered place for allocation.
340 * @inode: owner
341 * @block: block we want
342 * @chain: chain of indirect blocks
343 * @partial: pointer to the last triple within a chain
344 * @goal: place to store the result.
346 * Normally this function find the prefered place for block allocation,
347 * stores it in *@goal and returns zero. If the branch had been changed
348 * under us we return -EAGAIN.
351 static inline int ext2_find_goal(struct inode *inode,
352 long block,
353 Indirect chain[4],
354 Indirect *partial,
355 unsigned long *goal)
357 /* Writer: ->i_next_alloc* */
358 if (block == inode->u.ext2_i.i_next_alloc_block + 1) {
359 inode->u.ext2_i.i_next_alloc_block++;
360 inode->u.ext2_i.i_next_alloc_goal++;
362 /* Writer: end */
363 /* Reader: pointers, ->i_next_alloc* */
364 if (verify_chain(chain, partial)) {
366 * try the heuristic for sequential allocation,
367 * failing that at least try to get decent locality.
369 if (block == inode->u.ext2_i.i_next_alloc_block)
370 *goal = inode->u.ext2_i.i_next_alloc_goal;
371 if (!*goal)
372 *goal = ext2_find_near(inode, partial);
373 return 0;
375 /* Reader: end */
376 return -EAGAIN;
380 * ext2_alloc_branch - allocate and set up a chain of blocks.
381 * @inode: owner
382 * @num: depth of the chain (number of blocks to allocate)
383 * @offsets: offsets (in the blocks) to store the pointers to next.
384 * @branch: place to store the chain in.
386 * This function allocates @num blocks, zeroes out all but the last one,
387 * links them into chain and (if we are synchronous) writes them to disk.
388 * In other words, it prepares a branch that can be spliced onto the
389 * inode. It stores the information about that chain in the branch[], in
390 * the same format as ext2_get_branch() would do. We are calling it after
391 * we had read the existing part of chain and partial points to the last
392 * triple of that (one with zero ->key). Upon the exit we have the same
393 * picture as after the successful ext2_get_block(), excpet that in one
394 * place chain is disconnected - *branch->p is still zero (we did not
395 * set the last link), but branch->key contains the number that should
396 * be placed into *branch->p to fill that gap.
398 * If allocation fails we free all blocks we've allocated (and forget
399 * ther buffer_heads) and return the error value the from failed
400 * ext2_alloc_block() (normally -ENOSPC). Otherwise we set the chain
401 * as described above and return 0.
404 static int ext2_alloc_branch(struct inode *inode,
405 int num,
406 unsigned long goal,
407 int *offsets,
408 Indirect *branch)
410 int blocksize = inode->i_sb->s_blocksize;
411 int n = 0;
412 int err;
413 int i;
414 int parent = ext2_alloc_block(inode, goal, &err);
416 branch[0].key = cpu_to_le32(parent);
417 if (parent) for (n = 1; n < num; n++) {
418 struct buffer_head *bh;
419 /* Allocate the next block */
420 int nr = ext2_alloc_block(inode, parent, &err);
421 if (!nr)
422 break;
423 branch[n].key = cpu_to_le32(nr);
425 * Get buffer_head for parent block, zero it out and set
426 * the pointer to new one, then send parent to disk.
428 bh = getblk(inode->i_dev, parent, blocksize);
429 if (!buffer_uptodate(bh))
430 wait_on_buffer(bh);
431 memset(bh->b_data, 0, blocksize);
432 branch[n].bh = bh;
433 branch[n].p = (u32*) bh->b_data + offsets[n];
434 *branch[n].p = branch[n].key;
435 mark_buffer_uptodate(bh, 1);
436 mark_buffer_dirty(bh, 1);
437 if (IS_SYNC(inode) || inode->u.ext2_i.i_osync) {
438 ll_rw_block (WRITE, 1, &bh);
439 wait_on_buffer (bh);
441 parent = nr;
443 if (n == num)
444 return 0;
446 /* Allocation failed, free what we already allocated */
447 for (i = 1; i < n; i++)
448 bforget(branch[i].bh);
449 for (i = 0; i < n; i++)
450 ext2_free_blocks(inode, le32_to_cpu(branch[i].key), 1);
451 return err;
455 * ext2_splice_branch - splice the allocated branch onto inode.
456 * @inode: owner
457 * @block: (logical) number of block we are adding
458 * @chain: chain of indirect blocks (with a missing link - see
459 * ext2_alloc_branch)
460 * @where: location of missing link
461 * @num: number of blocks we are adding
463 * This function verifies that chain (up to the missing link) had not
464 * changed, fills the missing link and does all housekeeping needed in
465 * inode (->i_blocks, etc.). In case of success we end up with the full
466 * chain to new block and return 0. Otherwise (== chain had been changed)
467 * we free the new blocks (forgetting their buffer_heads, indeed) and
468 * return -EAGAIN.
471 static inline int ext2_splice_branch(struct inode *inode,
472 long block,
473 Indirect chain[4],
474 Indirect *where,
475 int num)
477 int i;
479 /* Verify that place we are splicing to is still there and vacant */
481 /* Writer: pointers, ->i_next_alloc*, ->i_blocks */
482 if (!verify_chain(chain, where-1) || *where->p)
483 /* Writer: end */
484 goto changed;
486 /* That's it */
488 *where->p = where->key;
489 inode->u.ext2_i.i_next_alloc_block = block;
490 inode->u.ext2_i.i_next_alloc_goal = le32_to_cpu(where[num-1].key);
491 inode->i_blocks += num * inode->i_sb->s_blocksize/512;
493 /* Writer: end */
495 /* We are done with atomic stuff, now do the rest of housekeeping */
497 inode->i_ctime = CURRENT_TIME;
499 /* had we spliced it onto indirect block? */
500 if (where->bh) {
501 mark_buffer_dirty(where->bh, 1);
502 if (IS_SYNC(inode) || inode->u.ext2_i.i_osync) {
503 ll_rw_block (WRITE, 1, &where->bh);
504 wait_on_buffer(where->bh);
508 if (IS_SYNC(inode) || inode->u.ext2_i.i_osync)
509 ext2_sync_inode (inode);
510 else
511 mark_inode_dirty(inode);
512 return 0;
514 changed:
515 for (i = 1; i < num; i++)
516 bforget(where[i].bh);
517 for (i = 0; i < num; i++)
518 ext2_free_blocks(inode, le32_to_cpu(where[i].key), 1);
519 return -EAGAIN;
523 * Allocation strategy is simple: if we have to allocate something, we will
524 * have to go the whole way to leaf. So let's do it before attaching anything
525 * to tree, set linkage between the newborn blocks, write them if sync is
526 * required, recheck the path, free and repeat if check fails, otherwise
527 * set the last missing link (that will protect us from any truncate-generated
528 * removals - all blocks on the path are immune now) and possibly force the
529 * write on the parent block.
530 * That has a nice additional property: no special recovery from the failed
531 * allocations is needed - we simply release blocks and do not touch anything
532 * reachable from inode.
535 static int ext2_get_block(struct inode *inode, long iblock, struct buffer_head *bh_result, int create)
537 int err = -EIO;
538 int offsets[4];
539 Indirect chain[4];
540 Indirect *partial;
541 unsigned long goal;
542 int left;
543 int depth = ext2_block_to_path(inode, iblock, offsets);
545 if (depth == 0)
546 goto out;
548 lock_kernel();
549 reread:
550 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
552 /* Simplest case - block found, no allocation needed */
553 if (!partial) {
554 got_it:
555 bh_result->b_dev = inode->i_dev;
556 bh_result->b_blocknr = le32_to_cpu(chain[depth-1].key);
557 bh_result->b_state |= (1UL << BH_Mapped);
558 /* Clean up and exit */
559 partial = chain+depth-1; /* the whole chain */
560 goto cleanup;
563 /* Next simple case - plain lookup or failed read of indirect block */
564 if (!create || err == -EIO) {
565 cleanup:
566 while (partial > chain) {
567 brelse(partial->bh);
568 partial--;
570 unlock_kernel();
571 out:
572 return err;
576 * Indirect block might be removed by truncate while we were
577 * reading it. Handling of that case (forget what we've got and
578 * reread) is taken out of the main path.
580 if (err == -EAGAIN)
581 goto changed;
583 if (ext2_find_goal(inode, iblock, chain, partial, &goal) < 0)
584 goto changed;
586 left = (chain + depth) - partial;
587 err = ext2_alloc_branch(inode, left, goal,
588 offsets+(partial-chain), partial);
589 if (err)
590 goto cleanup;
592 if (ext2_splice_branch(inode, iblock, chain, partial, left) < 0)
593 goto changed;
595 bh_result->b_state |= (1UL << BH_New);
596 goto got_it;
598 changed:
599 while (partial > chain) {
600 bforget(partial->bh);
601 partial--;
603 goto reread;
606 struct buffer_head * ext2_getblk(struct inode * inode, long block, int create, int * err)
608 struct buffer_head dummy;
609 int error;
611 dummy.b_state = 0;
612 dummy.b_blocknr = -1000;
613 error = ext2_get_block(inode, block, &dummy, create);
614 *err = error;
615 if (!error && buffer_mapped(&dummy)) {
616 struct buffer_head *bh;
617 bh = getblk(dummy.b_dev, dummy.b_blocknr, inode->i_sb->s_blocksize);
618 if (buffer_new(&dummy)) {
619 if (!buffer_uptodate(bh))
620 wait_on_buffer(bh);
621 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
622 mark_buffer_uptodate(bh, 1);
623 mark_buffer_dirty(bh, 1);
625 return bh;
627 return NULL;
630 struct buffer_head * ext2_bread (struct inode * inode, int block,
631 int create, int *err)
633 struct buffer_head * bh;
634 int prev_blocks;
636 prev_blocks = inode->i_blocks;
638 bh = ext2_getblk (inode, block, create, err);
639 if (!bh)
640 return bh;
643 * If the inode has grown, and this is a directory, then perform
644 * preallocation of a few more blocks to try to keep directory
645 * fragmentation down.
647 if (create &&
648 S_ISDIR(inode->i_mode) &&
649 inode->i_blocks > prev_blocks &&
650 EXT2_HAS_COMPAT_FEATURE(inode->i_sb,
651 EXT2_FEATURE_COMPAT_DIR_PREALLOC)) {
652 int i;
653 struct buffer_head *tmp_bh;
655 for (i = 1;
656 i < EXT2_SB(inode->i_sb)->s_es->s_prealloc_dir_blocks;
657 i++) {
659 * ext2_getblk will zero out the contents of the
660 * directory for us
662 tmp_bh = ext2_getblk(inode, block+i, create, err);
663 if (!tmp_bh) {
664 brelse (bh);
665 return 0;
667 brelse (tmp_bh);
671 if (buffer_uptodate(bh))
672 return bh;
673 ll_rw_block (READ, 1, &bh);
674 wait_on_buffer (bh);
675 if (buffer_uptodate(bh))
676 return bh;
677 brelse (bh);
678 *err = -EIO;
679 return NULL;
682 static int ext2_writepage(struct file *file, struct page *page)
684 return block_write_full_page(page,ext2_get_block);
686 static int ext2_readpage(struct file *file, struct page *page)
688 return block_read_full_page(page,ext2_get_block);
690 static int ext2_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
692 return block_prepare_write(page,from,to,ext2_get_block);
694 static int ext2_bmap(struct address_space *mapping, long block)
696 return generic_block_bmap(mapping,block,ext2_get_block);
698 struct address_space_operations ext2_aops = {
699 readpage: ext2_readpage,
700 writepage: ext2_writepage,
701 sync_page: block_sync_page,
702 prepare_write: ext2_prepare_write,
703 commit_write: generic_commit_write,
704 bmap: ext2_bmap
708 * Probably it should be a library function... search for first non-zero word
709 * or memcmp with zero_page, whatever is better for particular architecture.
710 * Linus?
712 static inline int all_zeroes(u32 *p, u32 *q)
714 while (p < q)
715 if (*p++)
716 return 1;
717 return 0;
721 * ext2_find_shared - find the indirect blocks for partial truncation.
722 * @inode: inode in question
723 * @depth: depth of the affected branch
724 * @offsets: offsets of pointers in that branch (see ext2_block_to_path)
725 * @chain: place to store the pointers to partial indirect blocks
726 * @top: place to the (detached) top of branch
728 * This is a helper function used by ext2_truncate().
730 * When we do truncate() we may have to clean the ends of several indirect
731 * blocks but leave the blocks themselves alive. Block is partially
732 * truncated if some data below the new i_size is refered from it (and
733 * it is on the path to the first completely truncated data block, indeed).
734 * We have to free the top of that path along with everything to the right
735 * of the path. Since no allocation past the truncation point is possible
736 * until ext2_truncate() finishes, we may safely do the latter, but top
737 * of branch may require special attention - pageout below the truncation
738 * point might try to populate it.
740 * We atomically detach the top of branch from the tree, store the block
741 * number of its root in *@top, pointers to buffer_heads of partially
742 * truncated blocks - in @chain[].bh and pointers to their last elements
743 * that should not be removed - in @chain[].p. Return value is the pointer
744 * to last filled element of @chain.
746 * The work left to caller to do the actual freeing of subtrees:
747 * a) free the subtree starting from *@top
748 * b) free the subtrees whose roots are stored in
749 * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
750 * c) free the subtrees growing from the inode past the @chain[0].p
751 * (no partially truncated stuff there).
754 static Indirect *ext2_find_shared(struct inode *inode,
755 int depth,
756 int offsets[4],
757 Indirect chain[4],
758 u32 *top)
760 Indirect *partial, *p;
761 int k, err;
763 *top = 0;
764 for (k = depth; k > 1 && !offsets[k-1]; k--)
766 partial = ext2_get_branch(inode, k, offsets, chain, &err);
767 /* Writer: pointers */
768 if (!partial)
769 partial = chain + k-1;
771 * If the branch acquired continuation since we've looked at it -
772 * fine, it should all survive and (new) top doesn't belong to us.
774 if (!partial->key && *partial->p)
775 /* Writer: end */
776 goto no_top;
777 for (p=partial; p>chain && all_zeroes((u32*)p->bh->b_data,p->p); p--)
780 * OK, we've found the last block that must survive. The rest of our
781 * branch should be detached before unlocking. However, if that rest
782 * of branch is all ours and does not grow immediately from the inode
783 * it's easier to cheat and just decrement partial->p.
785 if (p == chain + k - 1 && p > chain) {
786 p->p--;
787 } else {
788 *top = *p->p;
789 *p->p = 0;
791 /* Writer: end */
793 while(partial > p)
795 brelse(partial->bh);
796 partial--;
798 no_top:
799 return partial;
803 * ext2_free_data - free a list of data blocks
804 * @inode: inode we are dealing with
805 * @p: array of block numbers
806 * @q: points immediately past the end of array
808 * We are freeing all blocks refered from that array (numbers are
809 * stored as little-endian 32-bit) and updating @inode->i_blocks
810 * appropriately.
812 static inline void ext2_free_data(struct inode *inode, u32 *p, u32 *q)
814 int blocks = inode->i_sb->s_blocksize / 512;
815 unsigned long block_to_free = 0, count = 0;
816 unsigned long nr;
818 for ( ; p < q ; p++) {
819 nr = le32_to_cpu(*p);
820 if (nr) {
821 *p = 0;
822 /* accumulate blocks to free if they're contiguous */
823 if (count == 0)
824 goto free_this;
825 else if (block_to_free == nr - count)
826 count++;
827 else {
828 /* Writer: ->i_blocks */
829 inode->i_blocks -= blocks * count;
830 /* Writer: end */
831 ext2_free_blocks (inode, block_to_free, count);
832 mark_inode_dirty(inode);
833 free_this:
834 block_to_free = nr;
835 count = 1;
839 if (count > 0) {
840 /* Writer: ->i_blocks */
841 inode->i_blocks -= blocks * count;
842 /* Writer: end */
843 ext2_free_blocks (inode, block_to_free, count);
844 mark_inode_dirty(inode);
849 * ext2_free_branches - free an array of branches
850 * @inode: inode we are dealing with
851 * @p: array of block numbers
852 * @q: pointer immediately past the end of array
853 * @depth: depth of the branches to free
855 * We are freeing all blocks refered from these branches (numbers are
856 * stored as little-endian 32-bit) and updating @inode->i_blocks
857 * appropriately.
859 static void ext2_free_branches(struct inode *inode, u32 *p, u32 *q, int depth)
861 struct buffer_head * bh;
862 unsigned long nr;
864 if (depth--) {
865 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
866 for ( ; p < q ; p++) {
867 nr = le32_to_cpu(*p);
868 if (!nr)
869 continue;
870 *p = 0;
871 bh = bread (inode->i_dev, nr, inode->i_sb->s_blocksize);
873 * A read failure? Report error and clear slot
874 * (should be rare).
876 if (!bh) {
877 ext2_error(inode->i_sb, "ext2_free_branches",
878 "Read failure, inode=%ld, block=%ld",
879 inode->i_ino, nr);
880 continue;
882 ext2_free_branches(inode,
883 (u32*)bh->b_data,
884 (u32*)bh->b_data + addr_per_block,
885 depth);
886 bforget(bh);
887 /* Writer: ->i_blocks */
888 inode->i_blocks -= inode->i_sb->s_blocksize / 512;
889 /* Writer: end */
890 ext2_free_blocks(inode, nr, 1);
891 mark_inode_dirty(inode);
893 } else
894 ext2_free_data(inode, p, q);
897 void ext2_truncate (struct inode * inode)
899 u32 *i_data = inode->u.ext2_i.i_data;
900 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
901 int offsets[4];
902 Indirect chain[4];
903 Indirect *partial;
904 int nr = 0;
905 int n;
906 long iblock;
907 unsigned blocksize, tail;
909 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
910 S_ISLNK(inode->i_mode)))
911 return;
912 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
913 return;
915 ext2_discard_prealloc(inode);
917 blocksize = inode->i_sb->s_blocksize;
918 iblock = (inode->i_size + blocksize-1)
919 >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
920 tail = (iblock << EXT2_BLOCK_SIZE_BITS(inode->i_sb)) - inode->i_size;
922 if (block_zero_page(inode->i_mapping, inode->i_size, tail) != 0)
923 return;
925 n = ext2_block_to_path(inode, iblock, offsets);
926 if (n == 0)
927 return;
929 if (n == 1) {
930 ext2_free_data(inode, i_data+offsets[0],
931 i_data + EXT2_NDIR_BLOCKS);
932 goto do_indirects;
935 partial = ext2_find_shared(inode, n, offsets, chain, &nr);
936 /* Kill the top of shared branch (already detached) */
937 if (nr) {
938 if (partial == chain)
939 mark_inode_dirty(inode);
940 else
941 mark_buffer_dirty(partial->bh, 1);
942 ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
944 /* Clear the ends of indirect blocks on the shared branch */
945 while (partial > chain) {
946 ext2_free_branches(inode,
947 partial->p + 1,
948 (u32*)partial->bh->b_data + addr_per_block,
949 (chain+n-1) - partial);
950 mark_buffer_dirty(partial->bh, 1);
951 if (IS_SYNC(inode)) {
952 ll_rw_block (WRITE, 1, &partial->bh);
953 wait_on_buffer (partial->bh);
955 brelse (partial->bh);
956 partial--;
958 do_indirects:
959 /* Kill the remaining (whole) subtrees */
960 switch (offsets[0]) {
961 default:
962 nr = i_data[EXT2_IND_BLOCK];
963 if (nr) {
964 i_data[EXT2_IND_BLOCK] = 0;
965 mark_inode_dirty(inode);
966 ext2_free_branches(inode, &nr, &nr+1, 1);
968 case EXT2_IND_BLOCK:
969 nr = i_data[EXT2_DIND_BLOCK];
970 if (nr) {
971 i_data[EXT2_DIND_BLOCK] = 0;
972 mark_inode_dirty(inode);
973 ext2_free_branches(inode, &nr, &nr+1, 2);
975 case EXT2_DIND_BLOCK:
976 nr = i_data[EXT2_TIND_BLOCK];
977 if (nr) {
978 i_data[EXT2_TIND_BLOCK] = 0;
979 mark_inode_dirty(inode);
980 ext2_free_branches(inode, &nr, &nr+1, 3);
982 case EXT2_TIND_BLOCK:
985 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
986 if (IS_SYNC(inode))
987 ext2_sync_inode (inode);
988 else
989 mark_inode_dirty(inode);
992 void ext2_read_inode (struct inode * inode)
994 struct buffer_head * bh;
995 struct ext2_inode * raw_inode;
996 unsigned long block_group;
997 unsigned long group_desc;
998 unsigned long desc;
999 unsigned long block;
1000 unsigned long offset;
1001 struct ext2_group_desc * gdp;
1003 if ((inode->i_ino != EXT2_ROOT_INO && inode->i_ino != EXT2_ACL_IDX_INO &&
1004 inode->i_ino != EXT2_ACL_DATA_INO &&
1005 inode->i_ino < EXT2_FIRST_INO(inode->i_sb)) ||
1006 inode->i_ino > le32_to_cpu(inode->i_sb->u.ext2_sb.s_es->s_inodes_count)) {
1007 ext2_error (inode->i_sb, "ext2_read_inode",
1008 "bad inode number: %lu", inode->i_ino);
1009 goto bad_inode;
1011 block_group = (inode->i_ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
1012 if (block_group >= inode->i_sb->u.ext2_sb.s_groups_count) {
1013 ext2_error (inode->i_sb, "ext2_read_inode",
1014 "group >= groups count");
1015 goto bad_inode;
1017 group_desc = block_group >> EXT2_DESC_PER_BLOCK_BITS(inode->i_sb);
1018 desc = block_group & (EXT2_DESC_PER_BLOCK(inode->i_sb) - 1);
1019 bh = inode->i_sb->u.ext2_sb.s_group_desc[group_desc];
1020 if (!bh) {
1021 ext2_error (inode->i_sb, "ext2_read_inode",
1022 "Descriptor not loaded");
1023 goto bad_inode;
1026 gdp = (struct ext2_group_desc *) bh->b_data;
1028 * Figure out the offset within the block group inode table
1030 offset = ((inode->i_ino - 1) % EXT2_INODES_PER_GROUP(inode->i_sb)) *
1031 EXT2_INODE_SIZE(inode->i_sb);
1032 block = le32_to_cpu(gdp[desc].bg_inode_table) +
1033 (offset >> EXT2_BLOCK_SIZE_BITS(inode->i_sb));
1034 if (!(bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize))) {
1035 ext2_error (inode->i_sb, "ext2_read_inode",
1036 "unable to read inode block - "
1037 "inode=%lu, block=%lu", inode->i_ino, block);
1038 goto bad_inode;
1040 offset &= (EXT2_BLOCK_SIZE(inode->i_sb) - 1);
1041 raw_inode = (struct ext2_inode *) (bh->b_data + offset);
1043 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
1044 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
1045 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
1046 if(!(test_opt (inode->i_sb, NO_UID32))) {
1047 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
1048 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
1050 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
1051 inode->i_size = le32_to_cpu(raw_inode->i_size);
1052 inode->i_atime = le32_to_cpu(raw_inode->i_atime);
1053 inode->i_ctime = le32_to_cpu(raw_inode->i_ctime);
1054 inode->i_mtime = le32_to_cpu(raw_inode->i_mtime);
1055 inode->u.ext2_i.i_dtime = le32_to_cpu(raw_inode->i_dtime);
1056 /* We now have enough fields to check if the inode was active or not.
1057 * This is needed because nfsd might try to access dead inodes
1058 * the test is that same one that e2fsck uses
1059 * NeilBrown 1999oct15
1061 if (inode->i_nlink == 0 && (inode->i_mode == 0 || inode->u.ext2_i.i_dtime)) {
1062 /* this inode is deleted */
1063 brelse (bh);
1064 goto bad_inode;
1066 inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat), not the fs block size */
1067 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
1068 inode->i_version = ++event;
1069 inode->u.ext2_i.i_flags = le32_to_cpu(raw_inode->i_flags);
1070 inode->u.ext2_i.i_faddr = le32_to_cpu(raw_inode->i_faddr);
1071 inode->u.ext2_i.i_frag_no = raw_inode->i_frag;
1072 inode->u.ext2_i.i_frag_size = raw_inode->i_fsize;
1073 inode->u.ext2_i.i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
1074 if (S_ISDIR(inode->i_mode))
1075 inode->u.ext2_i.i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
1076 else {
1077 inode->u.ext2_i.i_high_size = le32_to_cpu(raw_inode->i_size_high);
1078 inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
1080 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
1081 inode->u.ext2_i.i_block_group = block_group;
1084 * NOTE! The in-memory inode i_data array is in little-endian order
1085 * even on big-endian machines: we do NOT byteswap the block numbers!
1087 for (block = 0; block < EXT2_N_BLOCKS; block++)
1088 inode->u.ext2_i.i_data[block] = raw_inode->i_block[block];
1090 if (inode->i_ino == EXT2_ACL_IDX_INO ||
1091 inode->i_ino == EXT2_ACL_DATA_INO)
1092 /* Nothing to do */ ;
1093 else if (S_ISREG(inode->i_mode)) {
1094 inode->i_op = &ext2_file_inode_operations;
1095 inode->i_fop = &ext2_file_operations;
1096 inode->i_mapping->a_ops = &ext2_aops;
1097 } else if (S_ISDIR(inode->i_mode)) {
1098 inode->i_op = &ext2_dir_inode_operations;
1099 inode->i_fop = &ext2_dir_operations;
1100 } else if (S_ISLNK(inode->i_mode)) {
1101 if (!inode->i_blocks)
1102 inode->i_op = &ext2_fast_symlink_inode_operations;
1103 else {
1104 inode->i_op = &page_symlink_inode_operations;
1105 inode->i_mapping->a_ops = &ext2_aops;
1107 } else
1108 init_special_inode(inode, inode->i_mode,
1109 le32_to_cpu(raw_inode->i_block[0]));
1110 brelse (bh);
1111 inode->i_attr_flags = 0;
1112 if (inode->u.ext2_i.i_flags & EXT2_SYNC_FL) {
1113 inode->i_attr_flags |= ATTR_FLAG_SYNCRONOUS;
1114 inode->i_flags |= S_SYNC;
1116 if (inode->u.ext2_i.i_flags & EXT2_APPEND_FL) {
1117 inode->i_attr_flags |= ATTR_FLAG_APPEND;
1118 inode->i_flags |= S_APPEND;
1120 if (inode->u.ext2_i.i_flags & EXT2_IMMUTABLE_FL) {
1121 inode->i_attr_flags |= ATTR_FLAG_IMMUTABLE;
1122 inode->i_flags |= S_IMMUTABLE;
1124 if (inode->u.ext2_i.i_flags & EXT2_NOATIME_FL) {
1125 inode->i_attr_flags |= ATTR_FLAG_NOATIME;
1126 inode->i_flags |= S_NOATIME;
1128 return;
1130 bad_inode:
1131 make_bad_inode(inode);
1132 return;
1135 static int ext2_update_inode(struct inode * inode, int do_sync)
1137 struct buffer_head * bh;
1138 struct ext2_inode * raw_inode;
1139 unsigned long block_group;
1140 unsigned long group_desc;
1141 unsigned long desc;
1142 unsigned long block;
1143 unsigned long offset;
1144 int err = 0;
1145 struct ext2_group_desc * gdp;
1147 if ((inode->i_ino != EXT2_ROOT_INO &&
1148 inode->i_ino < EXT2_FIRST_INO(inode->i_sb)) ||
1149 inode->i_ino > le32_to_cpu(inode->i_sb->u.ext2_sb.s_es->s_inodes_count)) {
1150 ext2_error (inode->i_sb, "ext2_write_inode",
1151 "bad inode number: %lu", inode->i_ino);
1152 return -EIO;
1154 block_group = (inode->i_ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
1155 if (block_group >= inode->i_sb->u.ext2_sb.s_groups_count) {
1156 ext2_error (inode->i_sb, "ext2_write_inode",
1157 "group >= groups count");
1158 return -EIO;
1160 group_desc = block_group >> EXT2_DESC_PER_BLOCK_BITS(inode->i_sb);
1161 desc = block_group & (EXT2_DESC_PER_BLOCK(inode->i_sb) - 1);
1162 bh = inode->i_sb->u.ext2_sb.s_group_desc[group_desc];
1163 if (!bh) {
1164 ext2_error (inode->i_sb, "ext2_write_inode",
1165 "Descriptor not loaded");
1166 return -EIO;
1168 gdp = (struct ext2_group_desc *) bh->b_data;
1170 * Figure out the offset within the block group inode table
1172 offset = ((inode->i_ino - 1) % EXT2_INODES_PER_GROUP(inode->i_sb)) *
1173 EXT2_INODE_SIZE(inode->i_sb);
1174 block = le32_to_cpu(gdp[desc].bg_inode_table) +
1175 (offset >> EXT2_BLOCK_SIZE_BITS(inode->i_sb));
1176 if (!(bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize))) {
1177 ext2_error (inode->i_sb, "ext2_write_inode",
1178 "unable to read inode block - "
1179 "inode=%lu, block=%lu", inode->i_ino, block);
1180 return -EIO;
1182 offset &= EXT2_BLOCK_SIZE(inode->i_sb) - 1;
1183 raw_inode = (struct ext2_inode *) (bh->b_data + offset);
1185 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
1186 if(!(test_opt(inode->i_sb, NO_UID32))) {
1187 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
1188 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
1190 * Fix up interoperability with old kernels. Otherwise, old inodes get
1191 * re-used with the upper 16 bits of the uid/gid intact
1193 if(!inode->u.ext2_i.i_dtime) {
1194 raw_inode->i_uid_high = cpu_to_le16(high_16_bits(inode->i_uid));
1195 raw_inode->i_gid_high = cpu_to_le16(high_16_bits(inode->i_gid));
1196 } else {
1197 raw_inode->i_uid_high = 0;
1198 raw_inode->i_gid_high = 0;
1200 } else {
1201 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(inode->i_uid));
1202 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(inode->i_gid));
1203 raw_inode->i_uid_high = 0;
1204 raw_inode->i_gid_high = 0;
1206 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
1207 raw_inode->i_size = cpu_to_le32(inode->i_size);
1208 raw_inode->i_atime = cpu_to_le32(inode->i_atime);
1209 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime);
1210 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime);
1211 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
1212 raw_inode->i_dtime = cpu_to_le32(inode->u.ext2_i.i_dtime);
1213 raw_inode->i_flags = cpu_to_le32(inode->u.ext2_i.i_flags);
1214 raw_inode->i_faddr = cpu_to_le32(inode->u.ext2_i.i_faddr);
1215 raw_inode->i_frag = inode->u.ext2_i.i_frag_no;
1216 raw_inode->i_fsize = inode->u.ext2_i.i_frag_size;
1217 raw_inode->i_file_acl = cpu_to_le32(inode->u.ext2_i.i_file_acl);
1218 if (S_ISDIR(inode->i_mode))
1219 raw_inode->i_dir_acl = cpu_to_le32(inode->u.ext2_i.i_dir_acl);
1220 else {
1221 raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
1222 if (raw_inode->i_size_high) {
1223 struct super_block *sb = inode->i_sb;
1224 struct ext2_super_block *es = sb->u.ext2_sb.s_es;
1225 if (!(es->s_feature_ro_compat & cpu_to_le32(EXT2_FEATURE_RO_COMPAT_LARGE_FILE))) {
1226 /* If this is the first large file
1227 * created, add a flag to the superblock.
1229 lock_kernel();
1230 es->s_feature_ro_compat |= cpu_to_le32(EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
1231 unlock_kernel();
1232 ext2_write_super(sb);
1237 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
1238 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1239 raw_inode->i_block[0] = cpu_to_le32(kdev_t_to_nr(inode->i_rdev));
1240 else for (block = 0; block < EXT2_N_BLOCKS; block++)
1241 raw_inode->i_block[block] = inode->u.ext2_i.i_data[block];
1242 mark_buffer_dirty(bh, 1);
1243 if (do_sync) {
1244 ll_rw_block (WRITE, 1, &bh);
1245 wait_on_buffer (bh);
1246 if (buffer_req(bh) && !buffer_uptodate(bh)) {
1247 printk ("IO error syncing ext2 inode ["
1248 "%s:%08lx]\n",
1249 bdevname(inode->i_dev), inode->i_ino);
1250 err = -EIO;
1253 brelse (bh);
1254 return err;
1257 void ext2_write_inode (struct inode * inode, int wait)
1259 lock_kernel();
1260 ext2_update_inode (inode, wait);
1261 unlock_kernel();
1264 int ext2_sync_inode (struct inode *inode)
1266 return ext2_update_inode (inode, 1);
1269 int ext2_notify_change(struct dentry *dentry, struct iattr *iattr)
1271 struct inode *inode = dentry->d_inode;
1272 int retval;
1273 unsigned int flags;
1275 retval = -EPERM;
1276 if (iattr->ia_valid & ATTR_ATTR_FLAG &&
1277 ((!(iattr->ia_attr_flags & ATTR_FLAG_APPEND) !=
1278 !(inode->u.ext2_i.i_flags & EXT2_APPEND_FL)) ||
1279 (!(iattr->ia_attr_flags & ATTR_FLAG_IMMUTABLE) !=
1280 !(inode->u.ext2_i.i_flags & EXT2_IMMUTABLE_FL)))) {
1281 if (!capable(CAP_LINUX_IMMUTABLE))
1282 goto out;
1283 } else if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
1284 goto out;
1286 retval = inode_change_ok(inode, iattr);
1287 if (retval != 0)
1288 goto out;
1290 inode_setattr(inode, iattr);
1292 flags = iattr->ia_attr_flags;
1293 if (flags & ATTR_FLAG_SYNCRONOUS) {
1294 inode->i_flags |= S_SYNC;
1295 inode->u.ext2_i.i_flags |= EXT2_SYNC_FL;
1296 } else {
1297 inode->i_flags &= ~S_SYNC;
1298 inode->u.ext2_i.i_flags &= ~EXT2_SYNC_FL;
1300 if (flags & ATTR_FLAG_NOATIME) {
1301 inode->i_flags |= S_NOATIME;
1302 inode->u.ext2_i.i_flags |= EXT2_NOATIME_FL;
1303 } else {
1304 inode->i_flags &= ~S_NOATIME;
1305 inode->u.ext2_i.i_flags &= ~EXT2_NOATIME_FL;
1307 if (flags & ATTR_FLAG_APPEND) {
1308 inode->i_flags |= S_APPEND;
1309 inode->u.ext2_i.i_flags |= EXT2_APPEND_FL;
1310 } else {
1311 inode->i_flags &= ~S_APPEND;
1312 inode->u.ext2_i.i_flags &= ~EXT2_APPEND_FL;
1314 if (flags & ATTR_FLAG_IMMUTABLE) {
1315 inode->i_flags |= S_IMMUTABLE;
1316 inode->u.ext2_i.i_flags |= EXT2_IMMUTABLE_FL;
1317 } else {
1318 inode->i_flags &= ~S_IMMUTABLE;
1319 inode->u.ext2_i.i_flags &= ~EXT2_IMMUTABLE_FL;
1321 mark_inode_dirty(inode);
1322 out:
1323 return retval;