Import 2.3.7pre9
[davej-history.git] / fs / ext2 / inode.c
blob1258a39b63e6026c3eae3f99cd988f6c5ccbdf32
1 /*
2 * linux/fs/ext2/inode.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * from
11 * linux/fs/minix/inode.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * Goal-directed block allocation by Stephen Tweedie
16 * (sct@dcs.ed.ac.uk), 1993, 1998
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
19 * 64-bit file support on 64-bit platforms by Jakub Jelinek
20 * (jj@sunsite.ms.mff.cuni.cz)
23 #include <asm/uaccess.h>
24 #include <asm/system.h>
26 #include <linux/errno.h>
27 #include <linux/fs.h>
28 #include <linux/ext2_fs.h>
29 #include <linux/sched.h>
30 #include <linux/stat.h>
31 #include <linux/string.h>
32 #include <linux/locks.h>
33 #include <linux/mm.h>
35 static int ext2_update_inode(struct inode * inode, int do_sync);
38 * Called at each iput()
40 void ext2_put_inode (struct inode * inode)
42 ext2_discard_prealloc (inode);
46 * Called at the last iput() if i_nlink is zero.
48 void ext2_delete_inode (struct inode * inode)
50 if (inode->i_ino == EXT2_ACL_IDX_INO ||
51 inode->i_ino == EXT2_ACL_DATA_INO)
52 return;
53 inode->u.ext2_i.i_dtime = CURRENT_TIME;
54 mark_inode_dirty(inode);
55 ext2_update_inode(inode, IS_SYNC(inode));
56 inode->i_size = 0;
57 if (inode->i_blocks)
58 ext2_truncate (inode);
59 ext2_free_inode (inode);
62 #define inode_bmap(inode, nr) (le32_to_cpu((inode)->u.ext2_i.i_data[(nr)]))
64 static inline int block_bmap (struct buffer_head * bh, int nr)
66 int tmp;
68 if (!bh)
69 return 0;
70 tmp = le32_to_cpu(((u32 *) bh->b_data)[nr]);
71 brelse (bh);
72 return tmp;
75 /*
76 * ext2_discard_prealloc and ext2_alloc_block are atomic wrt. the
77 * superblock in the same manner as are ext2_free_blocks and
78 * ext2_new_block. We just wait on the super rather than locking it
79 * here, since ext2_new_block will do the necessary locking and we
80 * can't block until then.
82 void ext2_discard_prealloc (struct inode * inode)
84 #ifdef EXT2_PREALLOCATE
85 unsigned short total;
87 if (inode->u.ext2_i.i_prealloc_count) {
88 total = inode->u.ext2_i.i_prealloc_count;
89 inode->u.ext2_i.i_prealloc_count = 0;
90 ext2_free_blocks (inode, inode->u.ext2_i.i_prealloc_block, total);
92 #endif
95 static int ext2_alloc_block (struct inode * inode, unsigned long goal, int *err)
97 #ifdef EXT2FS_DEBUG
98 static unsigned long alloc_hits = 0, alloc_attempts = 0;
99 #endif
100 unsigned long result;
102 wait_on_super (inode->i_sb);
104 #ifdef EXT2_PREALLOCATE
105 if (inode->u.ext2_i.i_prealloc_count &&
106 (goal == inode->u.ext2_i.i_prealloc_block ||
107 goal + 1 == inode->u.ext2_i.i_prealloc_block))
109 result = inode->u.ext2_i.i_prealloc_block++;
110 inode->u.ext2_i.i_prealloc_count--;
111 ext2_debug ("preallocation hit (%lu/%lu).\n",
112 ++alloc_hits, ++alloc_attempts);
114 } else {
115 ext2_discard_prealloc (inode);
116 ext2_debug ("preallocation miss (%lu/%lu).\n",
117 alloc_hits, ++alloc_attempts);
118 if (S_ISREG(inode->i_mode))
119 result = ext2_new_block (inode, goal,
120 &inode->u.ext2_i.i_prealloc_count,
121 &inode->u.ext2_i.i_prealloc_block, err);
122 else
123 result = ext2_new_block (inode, goal, 0, 0, err);
125 #else
126 result = ext2_new_block (inode, goal, 0, 0, err);
127 #endif
128 return result;
132 int ext2_bmap (struct inode * inode, int block)
134 int i;
135 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
136 int addr_per_block_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
138 if (block < 0) {
139 ext2_warning (inode->i_sb, "ext2_bmap", "block < 0");
140 return 0;
142 if (block >= EXT2_NDIR_BLOCKS + addr_per_block +
143 (1 << (addr_per_block_bits * 2)) +
144 ((1 << (addr_per_block_bits * 2)) << addr_per_block_bits)) {
145 ext2_warning (inode->i_sb, "ext2_bmap", "block > big");
146 return 0;
148 if (block < EXT2_NDIR_BLOCKS)
149 return inode_bmap (inode, block);
150 block -= EXT2_NDIR_BLOCKS;
151 if (block < addr_per_block) {
152 i = inode_bmap (inode, EXT2_IND_BLOCK);
153 if (!i)
154 return 0;
155 return block_bmap (bread (inode->i_dev, i,
156 inode->i_sb->s_blocksize), block);
158 block -= addr_per_block;
159 if (block < (1 << (addr_per_block_bits * 2))) {
160 i = inode_bmap (inode, EXT2_DIND_BLOCK);
161 if (!i)
162 return 0;
163 i = block_bmap (bread (inode->i_dev, i,
164 inode->i_sb->s_blocksize),
165 block >> addr_per_block_bits);
166 if (!i)
167 return 0;
168 return block_bmap (bread (inode->i_dev, i,
169 inode->i_sb->s_blocksize),
170 block & (addr_per_block - 1));
172 block -= (1 << (addr_per_block_bits * 2));
173 i = inode_bmap (inode, EXT2_TIND_BLOCK);
174 if (!i)
175 return 0;
176 i = block_bmap (bread (inode->i_dev, i, inode->i_sb->s_blocksize),
177 block >> (addr_per_block_bits * 2));
178 if (!i)
179 return 0;
180 i = block_bmap (bread (inode->i_dev, i, inode->i_sb->s_blocksize),
181 (block >> addr_per_block_bits) & (addr_per_block - 1));
182 if (!i)
183 return 0;
184 return block_bmap (bread (inode->i_dev, i, inode->i_sb->s_blocksize),
185 block & (addr_per_block - 1));
188 int ext2_bmap_create (struct inode * inode, int block)
190 int i;
191 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
192 int addr_per_block_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
194 if (block < 0) {
195 ext2_warning (inode->i_sb, "ext2_bmap", "block < 0");
196 return 0;
198 if (block >= EXT2_NDIR_BLOCKS + addr_per_block +
199 (1 << (addr_per_block_bits * 2)) +
200 ((1 << (addr_per_block_bits * 2)) << addr_per_block_bits)) {
201 ext2_warning (inode->i_sb, "ext2_bmap", "block > big");
202 return 0;
204 if (block < EXT2_NDIR_BLOCKS)
205 return inode_bmap (inode, block);
206 block -= EXT2_NDIR_BLOCKS;
207 if (block < addr_per_block) {
208 i = inode_bmap (inode, EXT2_IND_BLOCK);
209 if (!i)
210 return 0;
211 return block_bmap (bread (inode->i_dev, i,
212 inode->i_sb->s_blocksize), block);
214 block -= addr_per_block;
215 if (block < (1 << (addr_per_block_bits * 2))) {
216 i = inode_bmap (inode, EXT2_DIND_BLOCK);
217 if (!i)
218 return 0;
219 i = block_bmap (bread (inode->i_dev, i,
220 inode->i_sb->s_blocksize),
221 block >> addr_per_block_bits);
222 if (!i)
223 return 0;
224 return block_bmap (bread (inode->i_dev, i,
225 inode->i_sb->s_blocksize),
226 block & (addr_per_block - 1));
228 block -= (1 << (addr_per_block_bits * 2));
229 i = inode_bmap (inode, EXT2_TIND_BLOCK);
230 if (!i)
231 return 0;
232 i = block_bmap (bread (inode->i_dev, i, inode->i_sb->s_blocksize),
233 block >> (addr_per_block_bits * 2));
234 if (!i)
235 return 0;
236 i = block_bmap (bread (inode->i_dev, i, inode->i_sb->s_blocksize),
237 (block >> addr_per_block_bits) & (addr_per_block - 1));
238 if (!i)
239 return 0;
240 return block_bmap (bread (inode->i_dev, i, inode->i_sb->s_blocksize),
241 block & (addr_per_block - 1));
244 static struct buffer_head * inode_getblk (struct inode * inode, int nr,
245 int create, int new_block, int * err, int metadata,
246 int *phys_block, int *created)
248 u32 * p;
249 int tmp, goal = 0;
250 struct buffer_head * result;
251 int blocks = inode->i_sb->s_blocksize / 512;
253 p = inode->u.ext2_i.i_data + nr;
254 repeat:
255 tmp = le32_to_cpu(*p);
256 if (tmp) {
257 if (metadata) {
258 struct buffer_head * result = getblk (inode->i_dev, tmp, inode->i_sb->s_blocksize);
259 if (tmp == le32_to_cpu(*p))
260 return result;
261 brelse (result);
262 goto repeat;
263 } else {
264 *phys_block = tmp;
265 return NULL;
268 *err = -EFBIG;
269 if (!create)
270 goto dont_create;
272 /* Check file limits.. */
274 unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
275 if (limit < RLIM_INFINITY) {
276 limit >>= EXT2_BLOCK_SIZE_BITS(inode->i_sb);
277 if (new_block >= limit) {
278 send_sig(SIGXFSZ, current, 0);
279 dont_create:
280 *err = -EFBIG;
281 return NULL;
286 if (inode->u.ext2_i.i_next_alloc_block == new_block)
287 goal = inode->u.ext2_i.i_next_alloc_goal;
289 ext2_debug ("hint = %d,", goal);
291 if (!goal) {
292 for (tmp = nr - 1; tmp >= 0; tmp--) {
293 if (inode->u.ext2_i.i_data[tmp]) {
294 goal = le32_to_cpu(inode->u.ext2_i.i_data[tmp]);
295 break;
298 if (!goal)
299 goal = (inode->u.ext2_i.i_block_group *
300 EXT2_BLOCKS_PER_GROUP(inode->i_sb)) +
301 le32_to_cpu(inode->i_sb->u.ext2_sb.s_es->s_first_data_block);
304 ext2_debug ("goal = %d.\n", goal);
306 tmp = ext2_alloc_block (inode, goal, err);
307 if (!tmp)
308 return NULL;
309 if (metadata) {
310 result = getblk (inode->i_dev, tmp, inode->i_sb->s_blocksize);
311 if (*p) {
312 ext2_free_blocks (inode, tmp, 1);
313 brelse (result);
314 goto repeat;
316 memset(result->b_data, 0, inode->i_sb->s_blocksize);
317 mark_buffer_uptodate(result, 1);
318 mark_buffer_dirty(result, 1);
319 } else {
320 if (*p) {
321 ext2_free_blocks (inode, tmp, 1);
322 goto repeat;
324 *phys_block = tmp;
325 result = NULL;
326 *err = 0;
327 *created = 1;
329 *p = cpu_to_le32(tmp);
331 inode->u.ext2_i.i_next_alloc_block = new_block;
332 inode->u.ext2_i.i_next_alloc_goal = tmp;
333 inode->i_ctime = CURRENT_TIME;
334 inode->i_blocks += blocks;
335 if (IS_SYNC(inode) || inode->u.ext2_i.i_osync)
336 ext2_sync_inode (inode);
337 else
338 mark_inode_dirty(inode);
339 return result;
343 * metadata / data
344 * possibly create / access
345 * can fail due to: - not present
346 * - out of space
348 * NULL return in the data case is mandatory.
350 static struct buffer_head * block_getblk (struct inode * inode,
351 struct buffer_head * bh, int nr, int create, int blocksize,
352 int new_block, int * err, int metadata, int *phys_block, int *created)
354 int tmp, goal = 0;
355 u32 * p;
356 struct buffer_head * result;
357 int blocks = inode->i_sb->s_blocksize / 512;
358 unsigned long limit;
360 if (!bh)
361 return NULL;
362 if (!buffer_uptodate(bh)) {
363 ll_rw_block (READ, 1, &bh);
364 wait_on_buffer (bh);
365 if (!buffer_uptodate(bh)) {
366 brelse (bh);
367 return NULL;
370 p = (u32 *) bh->b_data + nr;
371 repeat:
372 tmp = le32_to_cpu(*p);
373 if (tmp) {
374 if (metadata) {
375 result = getblk (bh->b_dev, tmp, blocksize);
376 if (tmp == le32_to_cpu(*p)) {
377 brelse (bh);
378 return result;
380 brelse (result);
381 goto repeat;
382 } else {
383 *phys_block = tmp;
384 brelse (bh);
385 return NULL;
388 *err = -EFBIG;
389 if (!create) {
390 brelse (bh);
391 return NULL;
394 limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
395 if (limit < RLIM_INFINITY) {
396 limit >>= EXT2_BLOCK_SIZE_BITS(inode->i_sb);
397 if (new_block >= limit) {
398 brelse (bh);
399 send_sig(SIGXFSZ, current, 0);
400 return NULL;
404 if (inode->u.ext2_i.i_next_alloc_block == new_block)
405 goal = inode->u.ext2_i.i_next_alloc_goal;
406 if (!goal) {
407 for (tmp = nr - 1; tmp >= 0; tmp--) {
408 if (le32_to_cpu(((u32 *) bh->b_data)[tmp])) {
409 goal = le32_to_cpu(((u32 *)bh->b_data)[tmp]);
410 break;
413 if (!goal)
414 goal = bh->b_blocknr;
416 tmp = ext2_alloc_block (inode, goal, err);
417 if (!tmp) {
418 brelse (bh);
419 return NULL;
421 if (metadata) {
422 result = getblk (bh->b_dev, tmp, blocksize);
423 if (*p) {
424 ext2_free_blocks (inode, tmp, 1);
425 brelse (result);
426 goto repeat;
428 memset(result->b_data, 0, inode->i_sb->s_blocksize);
429 mark_buffer_uptodate(result, 1);
430 mark_buffer_dirty(result, 1);
431 } else {
432 *phys_block = tmp;
433 result = NULL;
434 *err = 0;
435 *created = 1;
437 if (le32_to_cpu(*p)) {
438 ext2_free_blocks (inode, tmp, 1);
439 brelse (result);
440 goto repeat;
442 *p = le32_to_cpu(tmp);
443 mark_buffer_dirty(bh, 1);
444 if (IS_SYNC(inode) || inode->u.ext2_i.i_osync) {
445 ll_rw_block (WRITE, 1, &bh);
446 wait_on_buffer (bh);
448 inode->i_ctime = CURRENT_TIME;
449 inode->i_blocks += blocks;
450 mark_inode_dirty(inode);
451 inode->u.ext2_i.i_next_alloc_block = new_block;
452 inode->u.ext2_i.i_next_alloc_goal = tmp;
453 brelse (bh);
454 return result;
457 int ext2_getblk_block (struct inode * inode, long block,
458 int create, int * err, int * created)
460 struct buffer_head * bh, *tmp;
461 unsigned long b;
462 unsigned long addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
463 int addr_per_block_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
464 int phys_block;
466 *err = -EIO;
467 if (block < 0) {
468 ext2_warning (inode->i_sb, "ext2_getblk", "block < 0");
469 return 0;
471 if (block > EXT2_NDIR_BLOCKS + addr_per_block +
472 (1 << (addr_per_block_bits * 2)) +
473 ((1 << (addr_per_block_bits * 2)) << addr_per_block_bits)) {
474 ext2_warning (inode->i_sb, "ext2_getblk", "block > big");
475 return 0;
478 * If this is a sequential block allocation, set the next_alloc_block
479 * to this block now so that all the indblock and data block
480 * allocations use the same goal zone
483 ext2_debug ("block %lu, next %lu, goal %lu.\n", block,
484 inode->u.ext2_i.i_next_alloc_block,
485 inode->u.ext2_i.i_next_alloc_goal);
487 if (block == inode->u.ext2_i.i_next_alloc_block + 1) {
488 inode->u.ext2_i.i_next_alloc_block++;
489 inode->u.ext2_i.i_next_alloc_goal++;
492 *err = 0; // -ENOSPC;
493 b = block;
494 *created = 0;
495 if (block < EXT2_NDIR_BLOCKS) {
497 * data page.
499 tmp = inode_getblk (inode, block, create, b,
500 err, 0, &phys_block, created);
501 goto out;
503 block -= EXT2_NDIR_BLOCKS;
504 if (block < addr_per_block) {
505 bh = inode_getblk (inode, EXT2_IND_BLOCK, create, b, err, 1, NULL, NULL);
506 tmp = block_getblk (inode, bh, block, create,
507 inode->i_sb->s_blocksize, b, err, 0, &phys_block, created);
508 goto out;
510 block -= addr_per_block;
511 if (block < (1 << (addr_per_block_bits * 2))) {
512 bh = inode_getblk (inode, EXT2_DIND_BLOCK, create, b, err, 1, NULL, NULL);
513 bh = block_getblk (inode, bh, block >> addr_per_block_bits,
514 create, inode->i_sb->s_blocksize, b, err, 1, NULL, NULL);
515 tmp = block_getblk (inode, bh, block & (addr_per_block - 1),
516 create, inode->i_sb->s_blocksize, b, err, 0, &phys_block, created);
517 goto out;
519 block -= (1 << (addr_per_block_bits * 2));
520 bh = inode_getblk (inode, EXT2_TIND_BLOCK, create, b, err, 1, NULL,NULL);
521 bh = block_getblk (inode, bh, block >> (addr_per_block_bits * 2),
522 create, inode->i_sb->s_blocksize, b, err, 1, NULL,NULL);
523 bh = block_getblk (inode, bh, (block >> addr_per_block_bits) &
524 (addr_per_block - 1), create, inode->i_sb->s_blocksize,
525 b, err, 1, NULL,NULL);
526 tmp = block_getblk (inode, bh, block & (addr_per_block - 1), create,
527 inode->i_sb->s_blocksize, b, err, 0, &phys_block, created);
529 out:
530 if (!phys_block) {
531 return 0;
533 if (*err) {
534 return 0;
536 return phys_block;
539 struct buffer_head * ext2_getblk (struct inode * inode, long block,
540 int create, int * err)
542 struct buffer_head *tmp = NULL;
543 int phys_block;
544 int created;
546 phys_block = ext2_getblk_block (inode, block, create, err, &created);
548 if (phys_block) {
549 tmp = getblk (inode->i_dev, phys_block, inode->i_sb->s_blocksize);
550 if (created) {
551 memset(tmp->b_data, 0, inode->i_sb->s_blocksize);
552 mark_buffer_uptodate(tmp, 1);
553 mark_buffer_dirty(tmp, 1);
556 return tmp;
559 struct buffer_head * ext2_bread (struct inode * inode, int block,
560 int create, int *err)
562 struct buffer_head * bh;
563 int prev_blocks;
565 prev_blocks = inode->i_blocks;
567 bh = ext2_getblk (inode, block, create, err);
568 if (!bh)
569 return bh;
572 * If the inode has grown, and this is a directory, then perform
573 * preallocation of a few more blocks to try to keep directory
574 * fragmentation down.
576 if (create &&
577 S_ISDIR(inode->i_mode) &&
578 inode->i_blocks > prev_blocks &&
579 EXT2_HAS_COMPAT_FEATURE(inode->i_sb,
580 EXT2_FEATURE_COMPAT_DIR_PREALLOC)) {
581 int i;
582 struct buffer_head *tmp_bh;
584 for (i = 1;
585 i < EXT2_SB(inode->i_sb)->s_es->s_prealloc_dir_blocks;
586 i++) {
588 * ext2_getblk will zero out the contents of the
589 * directory for us
591 tmp_bh = ext2_getblk(inode, block+i, create, err);
592 if (!tmp_bh) {
593 brelse (bh);
594 return 0;
596 brelse (tmp_bh);
600 if (buffer_uptodate(bh))
601 return bh;
602 ll_rw_block (READ, 1, &bh);
603 wait_on_buffer (bh);
604 if (buffer_uptodate(bh))
605 return bh;
606 brelse (bh);
607 *err = -EIO;
608 return NULL;
611 void ext2_read_inode (struct inode * inode)
613 struct buffer_head * bh;
614 struct ext2_inode * raw_inode;
615 unsigned long block_group;
616 unsigned long group_desc;
617 unsigned long desc;
618 unsigned long block;
619 unsigned long offset;
620 struct ext2_group_desc * gdp;
622 if ((inode->i_ino != EXT2_ROOT_INO && inode->i_ino != EXT2_ACL_IDX_INO &&
623 inode->i_ino != EXT2_ACL_DATA_INO &&
624 inode->i_ino < EXT2_FIRST_INO(inode->i_sb)) ||
625 inode->i_ino > le32_to_cpu(inode->i_sb->u.ext2_sb.s_es->s_inodes_count)) {
626 ext2_error (inode->i_sb, "ext2_read_inode",
627 "bad inode number: %lu", inode->i_ino);
628 goto bad_inode;
630 block_group = (inode->i_ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
631 if (block_group >= inode->i_sb->u.ext2_sb.s_groups_count) {
632 ext2_error (inode->i_sb, "ext2_read_inode",
633 "group >= groups count");
634 goto bad_inode;
636 group_desc = block_group >> EXT2_DESC_PER_BLOCK_BITS(inode->i_sb);
637 desc = block_group & (EXT2_DESC_PER_BLOCK(inode->i_sb) - 1);
638 bh = inode->i_sb->u.ext2_sb.s_group_desc[group_desc];
639 if (!bh) {
640 ext2_error (inode->i_sb, "ext2_read_inode",
641 "Descriptor not loaded");
642 goto bad_inode;
645 gdp = (struct ext2_group_desc *) bh->b_data;
647 * Figure out the offset within the block group inode table
649 offset = ((inode->i_ino - 1) % EXT2_INODES_PER_GROUP(inode->i_sb)) *
650 EXT2_INODE_SIZE(inode->i_sb);
651 block = le32_to_cpu(gdp[desc].bg_inode_table) +
652 (offset >> EXT2_BLOCK_SIZE_BITS(inode->i_sb));
653 if (!(bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize))) {
654 ext2_error (inode->i_sb, "ext2_read_inode",
655 "unable to read inode block - "
656 "inode=%lu, block=%lu", inode->i_ino, block);
657 goto bad_inode;
659 offset &= (EXT2_BLOCK_SIZE(inode->i_sb) - 1);
660 raw_inode = (struct ext2_inode *) (bh->b_data + offset);
662 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
663 inode->i_uid = le16_to_cpu(raw_inode->i_uid);
664 inode->i_gid = le16_to_cpu(raw_inode->i_gid);
665 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
666 inode->i_size = le32_to_cpu(raw_inode->i_size);
667 inode->i_atime = le32_to_cpu(raw_inode->i_atime);
668 inode->i_ctime = le32_to_cpu(raw_inode->i_ctime);
669 inode->i_mtime = le32_to_cpu(raw_inode->i_mtime);
670 inode->u.ext2_i.i_dtime = le32_to_cpu(raw_inode->i_dtime);
671 inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat), not the fs block size */
672 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
673 inode->i_version = ++event;
674 inode->u.ext2_i.i_new_inode = 0;
675 inode->u.ext2_i.i_flags = le32_to_cpu(raw_inode->i_flags);
676 inode->u.ext2_i.i_faddr = le32_to_cpu(raw_inode->i_faddr);
677 inode->u.ext2_i.i_frag_no = raw_inode->i_frag;
678 inode->u.ext2_i.i_frag_size = raw_inode->i_fsize;
679 inode->u.ext2_i.i_osync = 0;
680 inode->u.ext2_i.i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
681 if (S_ISDIR(inode->i_mode))
682 inode->u.ext2_i.i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
683 else {
684 inode->u.ext2_i.i_dir_acl = 0;
685 inode->u.ext2_i.i_high_size =
686 le32_to_cpu(raw_inode->i_size_high);
687 #if BITS_PER_LONG < 64
688 if (raw_inode->i_size_high)
689 inode->i_size = (__u32)-1;
690 #else
691 inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high))
692 << 32;
693 #endif
695 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
696 inode->u.ext2_i.i_block_group = block_group;
697 inode->u.ext2_i.i_next_alloc_block = 0;
698 inode->u.ext2_i.i_next_alloc_goal = 0;
699 if (inode->u.ext2_i.i_prealloc_count)
700 ext2_error (inode->i_sb, "ext2_read_inode",
701 "New inode has non-zero prealloc count!");
704 * NOTE! The in-memory inode i_blocks array is in little-endian order
705 * even on big-endian machines: we do NOT byteswap the block numbers!
707 for (block = 0; block < EXT2_N_BLOCKS; block++)
708 inode->u.ext2_i.i_data[block] = raw_inode->i_block[block];
710 if (inode->i_ino == EXT2_ACL_IDX_INO ||
711 inode->i_ino == EXT2_ACL_DATA_INO)
712 /* Nothing to do */ ;
713 else if (S_ISREG(inode->i_mode))
714 inode->i_op = &ext2_file_inode_operations;
715 else if (S_ISDIR(inode->i_mode))
716 inode->i_op = &ext2_dir_inode_operations;
717 else if (S_ISLNK(inode->i_mode))
718 inode->i_op = &ext2_symlink_inode_operations;
719 else
720 init_special_inode(inode, inode->i_mode,
721 le32_to_cpu(raw_inode->i_block[0]));
722 brelse (bh);
723 inode->i_attr_flags = 0;
724 if (inode->u.ext2_i.i_flags & EXT2_SYNC_FL) {
725 inode->i_attr_flags |= ATTR_FLAG_SYNCRONOUS;
726 inode->i_flags |= MS_SYNCHRONOUS;
728 if (inode->u.ext2_i.i_flags & EXT2_APPEND_FL) {
729 inode->i_attr_flags |= ATTR_FLAG_APPEND;
730 inode->i_flags |= S_APPEND;
732 if (inode->u.ext2_i.i_flags & EXT2_IMMUTABLE_FL) {
733 inode->i_attr_flags |= ATTR_FLAG_IMMUTABLE;
734 inode->i_flags |= S_IMMUTABLE;
736 if (inode->u.ext2_i.i_flags & EXT2_NOATIME_FL) {
737 inode->i_attr_flags |= ATTR_FLAG_NOATIME;
738 inode->i_flags |= MS_NOATIME;
740 return;
742 bad_inode:
743 make_bad_inode(inode);
744 return;
747 static int ext2_update_inode(struct inode * inode, int do_sync)
749 struct buffer_head * bh;
750 struct ext2_inode * raw_inode;
751 unsigned long block_group;
752 unsigned long group_desc;
753 unsigned long desc;
754 unsigned long block;
755 unsigned long offset;
756 int err = 0;
757 struct ext2_group_desc * gdp;
759 if ((inode->i_ino != EXT2_ROOT_INO &&
760 inode->i_ino < EXT2_FIRST_INO(inode->i_sb)) ||
761 inode->i_ino > le32_to_cpu(inode->i_sb->u.ext2_sb.s_es->s_inodes_count)) {
762 ext2_error (inode->i_sb, "ext2_write_inode",
763 "bad inode number: %lu", inode->i_ino);
764 return -EIO;
766 block_group = (inode->i_ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
767 if (block_group >= inode->i_sb->u.ext2_sb.s_groups_count) {
768 ext2_error (inode->i_sb, "ext2_write_inode",
769 "group >= groups count");
770 return -EIO;
772 group_desc = block_group >> EXT2_DESC_PER_BLOCK_BITS(inode->i_sb);
773 desc = block_group & (EXT2_DESC_PER_BLOCK(inode->i_sb) - 1);
774 bh = inode->i_sb->u.ext2_sb.s_group_desc[group_desc];
775 if (!bh) {
776 ext2_error (inode->i_sb, "ext2_write_inode",
777 "Descriptor not loaded");
778 return -EIO;
780 gdp = (struct ext2_group_desc *) bh->b_data;
782 * Figure out the offset within the block group inode table
784 offset = ((inode->i_ino - 1) % EXT2_INODES_PER_GROUP(inode->i_sb)) *
785 EXT2_INODE_SIZE(inode->i_sb);
786 block = le32_to_cpu(gdp[desc].bg_inode_table) +
787 (offset >> EXT2_BLOCK_SIZE_BITS(inode->i_sb));
788 if (!(bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize))) {
789 ext2_error (inode->i_sb, "ext2_write_inode",
790 "unable to read inode block - "
791 "inode=%lu, block=%lu", inode->i_ino, block);
792 return -EIO;
794 offset &= EXT2_BLOCK_SIZE(inode->i_sb) - 1;
795 raw_inode = (struct ext2_inode *) (bh->b_data + offset);
797 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
798 raw_inode->i_uid = cpu_to_le16(inode->i_uid);
799 raw_inode->i_gid = cpu_to_le16(inode->i_gid);
800 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
801 raw_inode->i_size = cpu_to_le32(inode->i_size);
802 raw_inode->i_atime = cpu_to_le32(inode->i_atime);
803 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime);
804 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime);
805 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
806 raw_inode->i_dtime = cpu_to_le32(inode->u.ext2_i.i_dtime);
807 raw_inode->i_flags = cpu_to_le32(inode->u.ext2_i.i_flags);
808 raw_inode->i_faddr = cpu_to_le32(inode->u.ext2_i.i_faddr);
809 raw_inode->i_frag = inode->u.ext2_i.i_frag_no;
810 raw_inode->i_fsize = inode->u.ext2_i.i_frag_size;
811 raw_inode->i_file_acl = cpu_to_le32(inode->u.ext2_i.i_file_acl);
812 if (S_ISDIR(inode->i_mode))
813 raw_inode->i_dir_acl = cpu_to_le32(inode->u.ext2_i.i_dir_acl);
814 else {
815 #if BITS_PER_LONG < 64
816 raw_inode->i_size_high =
817 cpu_to_le32(inode->u.ext2_i.i_high_size);
818 #else
819 raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
820 #endif
822 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
823 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
824 raw_inode->i_block[0] = cpu_to_le32(kdev_t_to_nr(inode->i_rdev));
825 else for (block = 0; block < EXT2_N_BLOCKS; block++)
826 raw_inode->i_block[block] = inode->u.ext2_i.i_data[block];
827 mark_buffer_dirty(bh, 1);
828 if (do_sync) {
829 ll_rw_block (WRITE, 1, &bh);
830 wait_on_buffer (bh);
831 if (buffer_req(bh) && !buffer_uptodate(bh)) {
832 printk ("IO error syncing ext2 inode ["
833 "%s:%08lx]\n",
834 bdevname(inode->i_dev), inode->i_ino);
835 err = -EIO;
838 brelse (bh);
839 return err;
842 void ext2_write_inode (struct inode * inode)
844 ext2_update_inode (inode, 0);
847 int ext2_sync_inode (struct inode *inode)
849 return ext2_update_inode (inode, 1);
852 int ext2_notify_change(struct dentry *dentry, struct iattr *iattr)
854 struct inode *inode = dentry->d_inode;
855 int retval;
856 unsigned int flags;
858 retval = -EPERM;
859 if ((iattr->ia_attr_flags &
860 (ATTR_FLAG_APPEND | ATTR_FLAG_IMMUTABLE)) ^
861 (inode->u.ext2_i.i_flags &
862 (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL))) {
863 if (!capable(CAP_LINUX_IMMUTABLE))
864 goto out;
865 } else if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
866 goto out;
868 retval = inode_change_ok(inode, iattr);
869 if (retval != 0)
870 goto out;
872 inode_setattr(inode, iattr);
874 flags = iattr->ia_attr_flags;
875 if (flags & ATTR_FLAG_SYNCRONOUS) {
876 inode->i_flags |= MS_SYNCHRONOUS;
877 inode->u.ext2_i.i_flags = EXT2_SYNC_FL;
878 } else {
879 inode->i_flags &= ~MS_SYNCHRONOUS;
880 inode->u.ext2_i.i_flags &= ~EXT2_SYNC_FL;
882 if (flags & ATTR_FLAG_NOATIME) {
883 inode->i_flags |= MS_NOATIME;
884 inode->u.ext2_i.i_flags = EXT2_NOATIME_FL;
885 } else {
886 inode->i_flags &= ~MS_NOATIME;
887 inode->u.ext2_i.i_flags &= ~EXT2_NOATIME_FL;
889 if (flags & ATTR_FLAG_APPEND) {
890 inode->i_flags |= S_APPEND;
891 inode->u.ext2_i.i_flags = EXT2_APPEND_FL;
892 } else {
893 inode->i_flags &= ~S_APPEND;
894 inode->u.ext2_i.i_flags &= ~EXT2_APPEND_FL;
896 if (flags & ATTR_FLAG_IMMUTABLE) {
897 inode->i_flags |= S_IMMUTABLE;
898 inode->u.ext2_i.i_flags = EXT2_IMMUTABLE_FL;
899 } else {
900 inode->i_flags &= ~S_IMMUTABLE;
901 inode->u.ext2_i.i_flags &= ~EXT2_IMMUTABLE_FL;
903 mark_inode_dirty(inode);
904 out:
905 return retval;