Linux-2.3.7.. Let's be careful out there..
[davej-history.git] / fs / ext2 / inode.c
blob02fb5b7b7eb51340707fade9d1e91ff90fb2facf
1 /*
2 * linux/fs/ext2/inode.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * from
11 * linux/fs/minix/inode.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * Goal-directed block allocation by Stephen Tweedie
16 * (sct@dcs.ed.ac.uk), 1993, 1998
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
19 * 64-bit file support on 64-bit platforms by Jakub Jelinek
20 * (jj@sunsite.ms.mff.cuni.cz)
23 #include <asm/uaccess.h>
24 #include <asm/system.h>
26 #include <linux/errno.h>
27 #include <linux/fs.h>
28 #include <linux/ext2_fs.h>
29 #include <linux/sched.h>
30 #include <linux/stat.h>
31 #include <linux/string.h>
32 #include <linux/locks.h>
33 #include <linux/mm.h>
34 #include <linux/smp_lock.h>
36 static int ext2_update_inode(struct inode * inode, int do_sync);
39 * Called at each iput()
41 void ext2_put_inode (struct inode * inode)
43 ext2_discard_prealloc (inode);
47 * Called at the last iput() if i_nlink is zero.
49 void ext2_delete_inode (struct inode * inode)
51 if (inode->i_ino == EXT2_ACL_IDX_INO ||
52 inode->i_ino == EXT2_ACL_DATA_INO)
53 return;
54 inode->u.ext2_i.i_dtime = CURRENT_TIME;
55 mark_inode_dirty(inode);
56 ext2_update_inode(inode, IS_SYNC(inode));
57 inode->i_size = 0;
58 if (inode->i_blocks)
59 ext2_truncate (inode);
60 ext2_free_inode (inode);
63 #define inode_bmap(inode, nr) (le32_to_cpu((inode)->u.ext2_i.i_data[(nr)]))
65 static inline int block_bmap (struct buffer_head * bh, int nr)
67 int tmp;
69 if (!bh)
70 return 0;
71 tmp = le32_to_cpu(((u32 *) bh->b_data)[nr]);
72 brelse (bh);
73 return tmp;
76 /*
77 * ext2_discard_prealloc and ext2_alloc_block are atomic wrt. the
78 * superblock in the same manner as are ext2_free_blocks and
79 * ext2_new_block. We just wait on the super rather than locking it
80 * here, since ext2_new_block will do the necessary locking and we
81 * can't block until then.
83 void ext2_discard_prealloc (struct inode * inode)
85 #ifdef EXT2_PREALLOCATE
86 unsigned short total;
88 if (inode->u.ext2_i.i_prealloc_count) {
89 total = inode->u.ext2_i.i_prealloc_count;
90 inode->u.ext2_i.i_prealloc_count = 0;
91 ext2_free_blocks (inode, inode->u.ext2_i.i_prealloc_block, total);
93 #endif
96 static int ext2_alloc_block (struct inode * inode, unsigned long goal, int *err)
98 #ifdef EXT2FS_DEBUG
99 static unsigned long alloc_hits = 0, alloc_attempts = 0;
100 #endif
101 unsigned long result;
103 wait_on_super (inode->i_sb);
105 #ifdef EXT2_PREALLOCATE
106 if (inode->u.ext2_i.i_prealloc_count &&
107 (goal == inode->u.ext2_i.i_prealloc_block ||
108 goal + 1 == inode->u.ext2_i.i_prealloc_block))
110 result = inode->u.ext2_i.i_prealloc_block++;
111 inode->u.ext2_i.i_prealloc_count--;
112 ext2_debug ("preallocation hit (%lu/%lu).\n",
113 ++alloc_hits, ++alloc_attempts);
115 } else {
116 ext2_discard_prealloc (inode);
117 ext2_debug ("preallocation miss (%lu/%lu).\n",
118 alloc_hits, ++alloc_attempts);
119 if (S_ISREG(inode->i_mode))
120 result = ext2_new_block (inode, goal,
121 &inode->u.ext2_i.i_prealloc_count,
122 &inode->u.ext2_i.i_prealloc_block, err);
123 else
124 result = ext2_new_block (inode, goal, 0, 0, err);
126 #else
127 result = ext2_new_block (inode, goal, 0, 0, err);
128 #endif
129 return result;
133 int ext2_bmap (struct inode * inode, int block)
135 int i, ret;
136 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
137 int addr_per_block_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
139 ret = 0;
140 lock_kernel();
141 if (block < 0) {
142 ext2_warning (inode->i_sb, "ext2_bmap", "block < 0");
143 goto out;
145 if (block >= EXT2_NDIR_BLOCKS + addr_per_block +
146 (1 << (addr_per_block_bits * 2)) +
147 ((1 << (addr_per_block_bits * 2)) << addr_per_block_bits)) {
148 ext2_warning (inode->i_sb, "ext2_bmap", "block > big");
149 goto out;
151 if (block < EXT2_NDIR_BLOCKS) {
152 ret = inode_bmap (inode, block);
153 goto out;
155 block -= EXT2_NDIR_BLOCKS;
156 if (block < addr_per_block) {
157 i = inode_bmap (inode, EXT2_IND_BLOCK);
158 if (!i)
159 goto out;
160 ret = block_bmap (bread (inode->i_dev, i,
161 inode->i_sb->s_blocksize), block);
162 goto out;
164 block -= addr_per_block;
165 if (block < (1 << (addr_per_block_bits * 2))) {
166 i = inode_bmap (inode, EXT2_DIND_BLOCK);
167 if (!i)
168 goto out;
169 i = block_bmap (bread (inode->i_dev, i,
170 inode->i_sb->s_blocksize),
171 block >> addr_per_block_bits);
172 if (!i)
173 goto out;
174 ret = block_bmap (bread (inode->i_dev, i,
175 inode->i_sb->s_blocksize),
176 block & (addr_per_block - 1));
178 block -= (1 << (addr_per_block_bits * 2));
179 i = inode_bmap (inode, EXT2_TIND_BLOCK);
180 if (!i)
181 goto out;
182 i = block_bmap (bread (inode->i_dev, i, inode->i_sb->s_blocksize),
183 block >> (addr_per_block_bits * 2));
184 if (!i)
185 goto out;
186 i = block_bmap (bread (inode->i_dev, i, inode->i_sb->s_blocksize),
187 (block >> addr_per_block_bits) & (addr_per_block - 1));
188 if (!i)
189 goto out;
190 ret = block_bmap (bread (inode->i_dev, i, inode->i_sb->s_blocksize),
191 block & (addr_per_block - 1));
192 out:
193 unlock_kernel();
194 return ret;
197 int ext2_bmap_create (struct inode * inode, int block)
199 int i;
200 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
201 int addr_per_block_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
203 if (block < 0) {
204 ext2_warning (inode->i_sb, "ext2_bmap", "block < 0");
205 return 0;
207 if (block >= EXT2_NDIR_BLOCKS + addr_per_block +
208 (1 << (addr_per_block_bits * 2)) +
209 ((1 << (addr_per_block_bits * 2)) << addr_per_block_bits)) {
210 ext2_warning (inode->i_sb, "ext2_bmap", "block > big");
211 return 0;
213 if (block < EXT2_NDIR_BLOCKS)
214 return inode_bmap (inode, block);
215 block -= EXT2_NDIR_BLOCKS;
216 if (block < addr_per_block) {
217 i = inode_bmap (inode, EXT2_IND_BLOCK);
218 if (!i)
219 return 0;
220 return block_bmap (bread (inode->i_dev, i,
221 inode->i_sb->s_blocksize), block);
223 block -= addr_per_block;
224 if (block < (1 << (addr_per_block_bits * 2))) {
225 i = inode_bmap (inode, EXT2_DIND_BLOCK);
226 if (!i)
227 return 0;
228 i = block_bmap (bread (inode->i_dev, i,
229 inode->i_sb->s_blocksize),
230 block >> addr_per_block_bits);
231 if (!i)
232 return 0;
233 return block_bmap (bread (inode->i_dev, i,
234 inode->i_sb->s_blocksize),
235 block & (addr_per_block - 1));
237 block -= (1 << (addr_per_block_bits * 2));
238 i = inode_bmap (inode, EXT2_TIND_BLOCK);
239 if (!i)
240 return 0;
241 i = block_bmap (bread (inode->i_dev, i, inode->i_sb->s_blocksize),
242 block >> (addr_per_block_bits * 2));
243 if (!i)
244 return 0;
245 i = block_bmap (bread (inode->i_dev, i, inode->i_sb->s_blocksize),
246 (block >> addr_per_block_bits) & (addr_per_block - 1));
247 if (!i)
248 return 0;
249 return block_bmap (bread (inode->i_dev, i, inode->i_sb->s_blocksize),
250 block & (addr_per_block - 1));
253 static struct buffer_head * inode_getblk (struct inode * inode, int nr,
254 int create, int new_block, int * err, int metadata,
255 int *phys_block, int *created)
257 u32 * p;
258 int tmp, goal = 0;
259 struct buffer_head * result;
260 int blocks = inode->i_sb->s_blocksize / 512;
262 p = inode->u.ext2_i.i_data + nr;
263 repeat:
264 tmp = le32_to_cpu(*p);
265 if (tmp) {
266 if (metadata) {
267 struct buffer_head * result = getblk (inode->i_dev, tmp, inode->i_sb->s_blocksize);
268 if (tmp == le32_to_cpu(*p))
269 return result;
270 brelse (result);
271 goto repeat;
272 } else {
273 *phys_block = tmp;
274 return NULL;
277 *err = -EFBIG;
278 if (!create)
279 goto dont_create;
281 /* Check file limits.. */
283 unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
284 if (limit < RLIM_INFINITY) {
285 limit >>= EXT2_BLOCK_SIZE_BITS(inode->i_sb);
286 if (new_block >= limit) {
287 send_sig(SIGXFSZ, current, 0);
288 dont_create:
289 *err = -EFBIG;
290 return NULL;
295 if (inode->u.ext2_i.i_next_alloc_block == new_block)
296 goal = inode->u.ext2_i.i_next_alloc_goal;
298 ext2_debug ("hint = %d,", goal);
300 if (!goal) {
301 for (tmp = nr - 1; tmp >= 0; tmp--) {
302 if (inode->u.ext2_i.i_data[tmp]) {
303 goal = le32_to_cpu(inode->u.ext2_i.i_data[tmp]);
304 break;
307 if (!goal)
308 goal = (inode->u.ext2_i.i_block_group *
309 EXT2_BLOCKS_PER_GROUP(inode->i_sb)) +
310 le32_to_cpu(inode->i_sb->u.ext2_sb.s_es->s_first_data_block);
313 ext2_debug ("goal = %d.\n", goal);
315 tmp = ext2_alloc_block (inode, goal, err);
316 if (!tmp)
317 return NULL;
318 if (metadata) {
319 result = getblk (inode->i_dev, tmp, inode->i_sb->s_blocksize);
320 if (*p) {
321 ext2_free_blocks (inode, tmp, 1);
322 brelse (result);
323 goto repeat;
325 memset(result->b_data, 0, inode->i_sb->s_blocksize);
326 mark_buffer_uptodate(result, 1);
327 mark_buffer_dirty(result, 1);
328 } else {
329 if (*p) {
330 ext2_free_blocks (inode, tmp, 1);
331 goto repeat;
333 *phys_block = tmp;
334 result = NULL;
335 *err = 0;
336 *created = 1;
338 *p = cpu_to_le32(tmp);
340 inode->u.ext2_i.i_next_alloc_block = new_block;
341 inode->u.ext2_i.i_next_alloc_goal = tmp;
342 inode->i_ctime = CURRENT_TIME;
343 inode->i_blocks += blocks;
344 if (IS_SYNC(inode) || inode->u.ext2_i.i_osync)
345 ext2_sync_inode (inode);
346 else
347 mark_inode_dirty(inode);
348 return result;
352 * metadata / data
353 * possibly create / access
354 * can fail due to: - not present
355 * - out of space
357 * NULL return in the data case is mandatory.
359 static struct buffer_head * block_getblk (struct inode * inode,
360 struct buffer_head * bh, int nr, int create, int blocksize,
361 int new_block, int * err, int metadata, int *phys_block, int *created)
363 int tmp, goal = 0;
364 u32 * p;
365 struct buffer_head * result;
366 int blocks = inode->i_sb->s_blocksize / 512;
367 unsigned long limit;
369 if (!bh)
370 return NULL;
371 if (!buffer_uptodate(bh)) {
372 ll_rw_block (READ, 1, &bh);
373 wait_on_buffer (bh);
374 if (!buffer_uptodate(bh)) {
375 brelse (bh);
376 return NULL;
379 p = (u32 *) bh->b_data + nr;
380 repeat:
381 tmp = le32_to_cpu(*p);
382 if (tmp) {
383 if (metadata) {
384 result = getblk (bh->b_dev, tmp, blocksize);
385 if (tmp == le32_to_cpu(*p)) {
386 brelse (bh);
387 return result;
389 brelse (result);
390 goto repeat;
391 } else {
392 *phys_block = tmp;
393 brelse (bh);
394 return NULL;
397 *err = -EFBIG;
398 if (!create) {
399 brelse (bh);
400 return NULL;
403 limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
404 if (limit < RLIM_INFINITY) {
405 limit >>= EXT2_BLOCK_SIZE_BITS(inode->i_sb);
406 if (new_block >= limit) {
407 brelse (bh);
408 send_sig(SIGXFSZ, current, 0);
409 return NULL;
413 if (inode->u.ext2_i.i_next_alloc_block == new_block)
414 goal = inode->u.ext2_i.i_next_alloc_goal;
415 if (!goal) {
416 for (tmp = nr - 1; tmp >= 0; tmp--) {
417 if (le32_to_cpu(((u32 *) bh->b_data)[tmp])) {
418 goal = le32_to_cpu(((u32 *)bh->b_data)[tmp]);
419 break;
422 if (!goal)
423 goal = bh->b_blocknr;
425 tmp = ext2_alloc_block (inode, goal, err);
426 if (!tmp) {
427 brelse (bh);
428 return NULL;
430 if (metadata) {
431 result = getblk (bh->b_dev, tmp, blocksize);
432 if (*p) {
433 ext2_free_blocks (inode, tmp, 1);
434 brelse (result);
435 goto repeat;
437 memset(result->b_data, 0, inode->i_sb->s_blocksize);
438 mark_buffer_uptodate(result, 1);
439 mark_buffer_dirty(result, 1);
440 } else {
441 *phys_block = tmp;
442 result = NULL;
443 *err = 0;
444 *created = 1;
446 if (le32_to_cpu(*p)) {
447 ext2_free_blocks (inode, tmp, 1);
448 brelse (result);
449 goto repeat;
451 *p = le32_to_cpu(tmp);
452 mark_buffer_dirty(bh, 1);
453 if (IS_SYNC(inode) || inode->u.ext2_i.i_osync) {
454 ll_rw_block (WRITE, 1, &bh);
455 wait_on_buffer (bh);
457 inode->i_ctime = CURRENT_TIME;
458 inode->i_blocks += blocks;
459 mark_inode_dirty(inode);
460 inode->u.ext2_i.i_next_alloc_block = new_block;
461 inode->u.ext2_i.i_next_alloc_goal = tmp;
462 brelse (bh);
463 return result;
466 int ext2_getblk_block (struct inode * inode, long block,
467 int create, int * err, int * created)
469 struct buffer_head * bh, *tmp;
470 unsigned long b;
471 unsigned long addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
472 int addr_per_block_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
473 int phys_block, ret;
475 lock_kernel();
476 ret = 0;
477 *err = -EIO;
478 if (block < 0) {
479 ext2_warning (inode->i_sb, "ext2_getblk", "block < 0");
480 goto abort;
482 if (block > EXT2_NDIR_BLOCKS + addr_per_block +
483 (1 << (addr_per_block_bits * 2)) +
484 ((1 << (addr_per_block_bits * 2)) << addr_per_block_bits)) {
485 ext2_warning (inode->i_sb, "ext2_getblk", "block > big");
486 goto abort;
489 * If this is a sequential block allocation, set the next_alloc_block
490 * to this block now so that all the indblock and data block
491 * allocations use the same goal zone
494 ext2_debug ("block %lu, next %lu, goal %lu.\n", block,
495 inode->u.ext2_i.i_next_alloc_block,
496 inode->u.ext2_i.i_next_alloc_goal);
498 if (block == inode->u.ext2_i.i_next_alloc_block + 1) {
499 inode->u.ext2_i.i_next_alloc_block++;
500 inode->u.ext2_i.i_next_alloc_goal++;
503 *err = 0; // -ENOSPC;
504 b = block;
505 *created = 0;
506 if (block < EXT2_NDIR_BLOCKS) {
508 * data page.
510 tmp = inode_getblk (inode, block, create, b,
511 err, 0, &phys_block, created);
512 goto out;
514 block -= EXT2_NDIR_BLOCKS;
515 if (block < addr_per_block) {
516 bh = inode_getblk (inode, EXT2_IND_BLOCK, create, b, err, 1, NULL, NULL);
517 tmp = block_getblk (inode, bh, block, create,
518 inode->i_sb->s_blocksize, b, err, 0, &phys_block, created);
519 goto out;
521 block -= addr_per_block;
522 if (block < (1 << (addr_per_block_bits * 2))) {
523 bh = inode_getblk (inode, EXT2_DIND_BLOCK, create, b, err, 1, NULL, NULL);
524 bh = block_getblk (inode, bh, block >> addr_per_block_bits,
525 create, inode->i_sb->s_blocksize, b, err, 1, NULL, NULL);
526 tmp = block_getblk (inode, bh, block & (addr_per_block - 1),
527 create, inode->i_sb->s_blocksize, b, err, 0, &phys_block, created);
528 goto out;
530 block -= (1 << (addr_per_block_bits * 2));
531 bh = inode_getblk (inode, EXT2_TIND_BLOCK, create, b, err, 1, NULL,NULL);
532 bh = block_getblk (inode, bh, block >> (addr_per_block_bits * 2),
533 create, inode->i_sb->s_blocksize, b, err, 1, NULL,NULL);
534 bh = block_getblk (inode, bh, (block >> addr_per_block_bits) &
535 (addr_per_block - 1), create, inode->i_sb->s_blocksize,
536 b, err, 1, NULL,NULL);
537 tmp = block_getblk (inode, bh, block & (addr_per_block - 1), create,
538 inode->i_sb->s_blocksize, b, err, 0, &phys_block, created);
540 out:
541 if (!phys_block)
542 goto abort;
543 if (*err)
544 goto abort;
545 ret = phys_block;
546 abort:
547 unlock_kernel();
548 return ret;
551 struct buffer_head * ext2_getblk (struct inode * inode, long block,
552 int create, int * err)
554 struct buffer_head *tmp = NULL;
555 int phys_block;
556 int created;
558 phys_block = ext2_getblk_block (inode, block, create, err, &created);
560 if (phys_block) {
561 tmp = getblk (inode->i_dev, phys_block, inode->i_sb->s_blocksize);
562 if (created) {
563 memset(tmp->b_data, 0, inode->i_sb->s_blocksize);
564 mark_buffer_uptodate(tmp, 1);
565 mark_buffer_dirty(tmp, 1);
568 return tmp;
571 struct buffer_head * ext2_bread (struct inode * inode, int block,
572 int create, int *err)
574 struct buffer_head * bh;
575 int prev_blocks;
577 prev_blocks = inode->i_blocks;
579 bh = ext2_getblk (inode, block, create, err);
580 if (!bh)
581 return bh;
584 * If the inode has grown, and this is a directory, then perform
585 * preallocation of a few more blocks to try to keep directory
586 * fragmentation down.
588 if (create &&
589 S_ISDIR(inode->i_mode) &&
590 inode->i_blocks > prev_blocks &&
591 EXT2_HAS_COMPAT_FEATURE(inode->i_sb,
592 EXT2_FEATURE_COMPAT_DIR_PREALLOC)) {
593 int i;
594 struct buffer_head *tmp_bh;
596 for (i = 1;
597 i < EXT2_SB(inode->i_sb)->s_es->s_prealloc_dir_blocks;
598 i++) {
600 * ext2_getblk will zero out the contents of the
601 * directory for us
603 tmp_bh = ext2_getblk(inode, block+i, create, err);
604 if (!tmp_bh) {
605 brelse (bh);
606 return 0;
608 brelse (tmp_bh);
612 if (buffer_uptodate(bh))
613 return bh;
614 ll_rw_block (READ, 1, &bh);
615 wait_on_buffer (bh);
616 if (buffer_uptodate(bh))
617 return bh;
618 brelse (bh);
619 *err = -EIO;
620 return NULL;
623 void ext2_read_inode (struct inode * inode)
625 struct buffer_head * bh;
626 struct ext2_inode * raw_inode;
627 unsigned long block_group;
628 unsigned long group_desc;
629 unsigned long desc;
630 unsigned long block;
631 unsigned long offset;
632 struct ext2_group_desc * gdp;
634 if ((inode->i_ino != EXT2_ROOT_INO && inode->i_ino != EXT2_ACL_IDX_INO &&
635 inode->i_ino != EXT2_ACL_DATA_INO &&
636 inode->i_ino < EXT2_FIRST_INO(inode->i_sb)) ||
637 inode->i_ino > le32_to_cpu(inode->i_sb->u.ext2_sb.s_es->s_inodes_count)) {
638 ext2_error (inode->i_sb, "ext2_read_inode",
639 "bad inode number: %lu", inode->i_ino);
640 goto bad_inode;
642 block_group = (inode->i_ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
643 if (block_group >= inode->i_sb->u.ext2_sb.s_groups_count) {
644 ext2_error (inode->i_sb, "ext2_read_inode",
645 "group >= groups count");
646 goto bad_inode;
648 group_desc = block_group >> EXT2_DESC_PER_BLOCK_BITS(inode->i_sb);
649 desc = block_group & (EXT2_DESC_PER_BLOCK(inode->i_sb) - 1);
650 bh = inode->i_sb->u.ext2_sb.s_group_desc[group_desc];
651 if (!bh) {
652 ext2_error (inode->i_sb, "ext2_read_inode",
653 "Descriptor not loaded");
654 goto bad_inode;
657 gdp = (struct ext2_group_desc *) bh->b_data;
659 * Figure out the offset within the block group inode table
661 offset = ((inode->i_ino - 1) % EXT2_INODES_PER_GROUP(inode->i_sb)) *
662 EXT2_INODE_SIZE(inode->i_sb);
663 block = le32_to_cpu(gdp[desc].bg_inode_table) +
664 (offset >> EXT2_BLOCK_SIZE_BITS(inode->i_sb));
665 if (!(bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize))) {
666 ext2_error (inode->i_sb, "ext2_read_inode",
667 "unable to read inode block - "
668 "inode=%lu, block=%lu", inode->i_ino, block);
669 goto bad_inode;
671 offset &= (EXT2_BLOCK_SIZE(inode->i_sb) - 1);
672 raw_inode = (struct ext2_inode *) (bh->b_data + offset);
674 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
675 inode->i_uid = le16_to_cpu(raw_inode->i_uid);
676 inode->i_gid = le16_to_cpu(raw_inode->i_gid);
677 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
678 inode->i_size = le32_to_cpu(raw_inode->i_size);
679 inode->i_atime = le32_to_cpu(raw_inode->i_atime);
680 inode->i_ctime = le32_to_cpu(raw_inode->i_ctime);
681 inode->i_mtime = le32_to_cpu(raw_inode->i_mtime);
682 inode->u.ext2_i.i_dtime = le32_to_cpu(raw_inode->i_dtime);
683 inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat), not the fs block size */
684 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
685 inode->i_version = ++event;
686 inode->u.ext2_i.i_new_inode = 0;
687 inode->u.ext2_i.i_flags = le32_to_cpu(raw_inode->i_flags);
688 inode->u.ext2_i.i_faddr = le32_to_cpu(raw_inode->i_faddr);
689 inode->u.ext2_i.i_frag_no = raw_inode->i_frag;
690 inode->u.ext2_i.i_frag_size = raw_inode->i_fsize;
691 inode->u.ext2_i.i_osync = 0;
692 inode->u.ext2_i.i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
693 if (S_ISDIR(inode->i_mode))
694 inode->u.ext2_i.i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
695 else {
696 inode->u.ext2_i.i_dir_acl = 0;
697 inode->u.ext2_i.i_high_size =
698 le32_to_cpu(raw_inode->i_size_high);
699 #if BITS_PER_LONG < 64
700 if (raw_inode->i_size_high)
701 inode->i_size = (__u32)-1;
702 #else
703 inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high))
704 << 32;
705 #endif
707 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
708 inode->u.ext2_i.i_block_group = block_group;
709 inode->u.ext2_i.i_next_alloc_block = 0;
710 inode->u.ext2_i.i_next_alloc_goal = 0;
711 if (inode->u.ext2_i.i_prealloc_count)
712 ext2_error (inode->i_sb, "ext2_read_inode",
713 "New inode has non-zero prealloc count!");
716 * NOTE! The in-memory inode i_blocks array is in little-endian order
717 * even on big-endian machines: we do NOT byteswap the block numbers!
719 for (block = 0; block < EXT2_N_BLOCKS; block++)
720 inode->u.ext2_i.i_data[block] = raw_inode->i_block[block];
722 if (inode->i_ino == EXT2_ACL_IDX_INO ||
723 inode->i_ino == EXT2_ACL_DATA_INO)
724 /* Nothing to do */ ;
725 else if (S_ISREG(inode->i_mode))
726 inode->i_op = &ext2_file_inode_operations;
727 else if (S_ISDIR(inode->i_mode))
728 inode->i_op = &ext2_dir_inode_operations;
729 else if (S_ISLNK(inode->i_mode))
730 inode->i_op = &ext2_symlink_inode_operations;
731 else
732 init_special_inode(inode, inode->i_mode,
733 le32_to_cpu(raw_inode->i_block[0]));
734 brelse (bh);
735 inode->i_attr_flags = 0;
736 if (inode->u.ext2_i.i_flags & EXT2_SYNC_FL) {
737 inode->i_attr_flags |= ATTR_FLAG_SYNCRONOUS;
738 inode->i_flags |= MS_SYNCHRONOUS;
740 if (inode->u.ext2_i.i_flags & EXT2_APPEND_FL) {
741 inode->i_attr_flags |= ATTR_FLAG_APPEND;
742 inode->i_flags |= S_APPEND;
744 if (inode->u.ext2_i.i_flags & EXT2_IMMUTABLE_FL) {
745 inode->i_attr_flags |= ATTR_FLAG_IMMUTABLE;
746 inode->i_flags |= S_IMMUTABLE;
748 if (inode->u.ext2_i.i_flags & EXT2_NOATIME_FL) {
749 inode->i_attr_flags |= ATTR_FLAG_NOATIME;
750 inode->i_flags |= MS_NOATIME;
752 return;
754 bad_inode:
755 make_bad_inode(inode);
756 return;
759 static int ext2_update_inode(struct inode * inode, int do_sync)
761 struct buffer_head * bh;
762 struct ext2_inode * raw_inode;
763 unsigned long block_group;
764 unsigned long group_desc;
765 unsigned long desc;
766 unsigned long block;
767 unsigned long offset;
768 int err = 0;
769 struct ext2_group_desc * gdp;
771 if ((inode->i_ino != EXT2_ROOT_INO &&
772 inode->i_ino < EXT2_FIRST_INO(inode->i_sb)) ||
773 inode->i_ino > le32_to_cpu(inode->i_sb->u.ext2_sb.s_es->s_inodes_count)) {
774 ext2_error (inode->i_sb, "ext2_write_inode",
775 "bad inode number: %lu", inode->i_ino);
776 return -EIO;
778 block_group = (inode->i_ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
779 if (block_group >= inode->i_sb->u.ext2_sb.s_groups_count) {
780 ext2_error (inode->i_sb, "ext2_write_inode",
781 "group >= groups count");
782 return -EIO;
784 group_desc = block_group >> EXT2_DESC_PER_BLOCK_BITS(inode->i_sb);
785 desc = block_group & (EXT2_DESC_PER_BLOCK(inode->i_sb) - 1);
786 bh = inode->i_sb->u.ext2_sb.s_group_desc[group_desc];
787 if (!bh) {
788 ext2_error (inode->i_sb, "ext2_write_inode",
789 "Descriptor not loaded");
790 return -EIO;
792 gdp = (struct ext2_group_desc *) bh->b_data;
794 * Figure out the offset within the block group inode table
796 offset = ((inode->i_ino - 1) % EXT2_INODES_PER_GROUP(inode->i_sb)) *
797 EXT2_INODE_SIZE(inode->i_sb);
798 block = le32_to_cpu(gdp[desc].bg_inode_table) +
799 (offset >> EXT2_BLOCK_SIZE_BITS(inode->i_sb));
800 if (!(bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize))) {
801 ext2_error (inode->i_sb, "ext2_write_inode",
802 "unable to read inode block - "
803 "inode=%lu, block=%lu", inode->i_ino, block);
804 return -EIO;
806 offset &= EXT2_BLOCK_SIZE(inode->i_sb) - 1;
807 raw_inode = (struct ext2_inode *) (bh->b_data + offset);
809 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
810 raw_inode->i_uid = cpu_to_le16(inode->i_uid);
811 raw_inode->i_gid = cpu_to_le16(inode->i_gid);
812 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
813 raw_inode->i_size = cpu_to_le32(inode->i_size);
814 raw_inode->i_atime = cpu_to_le32(inode->i_atime);
815 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime);
816 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime);
817 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
818 raw_inode->i_dtime = cpu_to_le32(inode->u.ext2_i.i_dtime);
819 raw_inode->i_flags = cpu_to_le32(inode->u.ext2_i.i_flags);
820 raw_inode->i_faddr = cpu_to_le32(inode->u.ext2_i.i_faddr);
821 raw_inode->i_frag = inode->u.ext2_i.i_frag_no;
822 raw_inode->i_fsize = inode->u.ext2_i.i_frag_size;
823 raw_inode->i_file_acl = cpu_to_le32(inode->u.ext2_i.i_file_acl);
824 if (S_ISDIR(inode->i_mode))
825 raw_inode->i_dir_acl = cpu_to_le32(inode->u.ext2_i.i_dir_acl);
826 else {
827 #if BITS_PER_LONG < 64
828 raw_inode->i_size_high =
829 cpu_to_le32(inode->u.ext2_i.i_high_size);
830 #else
831 raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
832 #endif
834 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
835 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
836 raw_inode->i_block[0] = cpu_to_le32(kdev_t_to_nr(inode->i_rdev));
837 else for (block = 0; block < EXT2_N_BLOCKS; block++)
838 raw_inode->i_block[block] = inode->u.ext2_i.i_data[block];
839 mark_buffer_dirty(bh, 1);
840 if (do_sync) {
841 ll_rw_block (WRITE, 1, &bh);
842 wait_on_buffer (bh);
843 if (buffer_req(bh) && !buffer_uptodate(bh)) {
844 printk ("IO error syncing ext2 inode ["
845 "%s:%08lx]\n",
846 bdevname(inode->i_dev), inode->i_ino);
847 err = -EIO;
850 brelse (bh);
851 return err;
854 void ext2_write_inode (struct inode * inode)
856 ext2_update_inode (inode, 0);
859 int ext2_sync_inode (struct inode *inode)
861 return ext2_update_inode (inode, 1);
864 int ext2_notify_change(struct dentry *dentry, struct iattr *iattr)
866 struct inode *inode = dentry->d_inode;
867 int retval;
868 unsigned int flags;
870 retval = -EPERM;
871 if ((iattr->ia_attr_flags &
872 (ATTR_FLAG_APPEND | ATTR_FLAG_IMMUTABLE)) ^
873 (inode->u.ext2_i.i_flags &
874 (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL))) {
875 if (!capable(CAP_LINUX_IMMUTABLE))
876 goto out;
877 } else if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
878 goto out;
880 retval = inode_change_ok(inode, iattr);
881 if (retval != 0)
882 goto out;
884 inode_setattr(inode, iattr);
886 flags = iattr->ia_attr_flags;
887 if (flags & ATTR_FLAG_SYNCRONOUS) {
888 inode->i_flags |= MS_SYNCHRONOUS;
889 inode->u.ext2_i.i_flags = EXT2_SYNC_FL;
890 } else {
891 inode->i_flags &= ~MS_SYNCHRONOUS;
892 inode->u.ext2_i.i_flags &= ~EXT2_SYNC_FL;
894 if (flags & ATTR_FLAG_NOATIME) {
895 inode->i_flags |= MS_NOATIME;
896 inode->u.ext2_i.i_flags = EXT2_NOATIME_FL;
897 } else {
898 inode->i_flags &= ~MS_NOATIME;
899 inode->u.ext2_i.i_flags &= ~EXT2_NOATIME_FL;
901 if (flags & ATTR_FLAG_APPEND) {
902 inode->i_flags |= S_APPEND;
903 inode->u.ext2_i.i_flags = EXT2_APPEND_FL;
904 } else {
905 inode->i_flags &= ~S_APPEND;
906 inode->u.ext2_i.i_flags &= ~EXT2_APPEND_FL;
908 if (flags & ATTR_FLAG_IMMUTABLE) {
909 inode->i_flags |= S_IMMUTABLE;
910 inode->u.ext2_i.i_flags = EXT2_IMMUTABLE_FL;
911 } else {
912 inode->i_flags &= ~S_IMMUTABLE;
913 inode->u.ext2_i.i_flags &= ~EXT2_IMMUTABLE_FL;
915 mark_inode_dirty(inode);
916 out:
917 return retval;