2 * linux/fs/ext2/inode.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
11 * linux/fs/minix/inode.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * Goal-directed block allocation by Stephen Tweedie
16 * (sct@dcs.ed.ac.uk), 1993, 1998
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
19 * 64-bit file support on 64-bit platforms by Jakub Jelinek
20 * (jj@sunsite.ms.mff.cuni.cz)
23 #include <asm/uaccess.h>
24 #include <asm/system.h>
26 #include <linux/errno.h>
28 #include <linux/ext2_fs.h>
29 #include <linux/sched.h>
30 #include <linux/stat.h>
31 #include <linux/string.h>
32 #include <linux/locks.h>
35 static int ext2_update_inode(struct inode
* inode
, int do_sync
);
38 * Called at each iput()
40 void ext2_put_inode (struct inode
* inode
)
42 ext2_discard_prealloc (inode
);
46 * Called at the last iput() if i_nlink is zero.
48 void ext2_delete_inode (struct inode
* inode
)
50 if (inode
->i_ino
== EXT2_ACL_IDX_INO
||
51 inode
->i_ino
== EXT2_ACL_DATA_INO
)
53 inode
->u
.ext2_i
.i_dtime
= CURRENT_TIME
;
54 mark_inode_dirty(inode
);
55 ext2_update_inode(inode
, IS_SYNC(inode
));
58 ext2_truncate (inode
);
59 ext2_free_inode (inode
);
62 #define inode_bmap(inode, nr) (le32_to_cpu((inode)->u.ext2_i.i_data[(nr)]))
64 static inline int block_bmap (struct buffer_head
* bh
, int nr
)
70 tmp
= le32_to_cpu(((u32
*) bh
->b_data
)[nr
]);
76 * ext2_discard_prealloc and ext2_alloc_block are atomic wrt. the
77 * superblock in the same manner as are ext2_free_blocks and
78 * ext2_new_block. We just wait on the super rather than locking it
79 * here, since ext2_new_block will do the necessary locking and we
80 * can't block until then.
82 void ext2_discard_prealloc (struct inode
* inode
)
84 #ifdef EXT2_PREALLOCATE
87 if (inode
->u
.ext2_i
.i_prealloc_count
) {
88 total
= inode
->u
.ext2_i
.i_prealloc_count
;
89 inode
->u
.ext2_i
.i_prealloc_count
= 0;
90 ext2_free_blocks (inode
, inode
->u
.ext2_i
.i_prealloc_block
, total
);
95 static int ext2_alloc_block (struct inode
* inode
, unsigned long goal
, int *err
)
98 static unsigned long alloc_hits
= 0, alloc_attempts
= 0;
100 unsigned long result
;
102 wait_on_super (inode
->i_sb
);
104 #ifdef EXT2_PREALLOCATE
105 if (inode
->u
.ext2_i
.i_prealloc_count
&&
106 (goal
== inode
->u
.ext2_i
.i_prealloc_block
||
107 goal
+ 1 == inode
->u
.ext2_i
.i_prealloc_block
))
109 result
= inode
->u
.ext2_i
.i_prealloc_block
++;
110 inode
->u
.ext2_i
.i_prealloc_count
--;
111 ext2_debug ("preallocation hit (%lu/%lu).\n",
112 ++alloc_hits
, ++alloc_attempts
);
115 ext2_discard_prealloc (inode
);
116 ext2_debug ("preallocation miss (%lu/%lu).\n",
117 alloc_hits
, ++alloc_attempts
);
118 if (S_ISREG(inode
->i_mode
))
119 result
= ext2_new_block (inode
, goal
,
120 &inode
->u
.ext2_i
.i_prealloc_count
,
121 &inode
->u
.ext2_i
.i_prealloc_block
, err
);
123 result
= ext2_new_block (inode
, goal
, 0, 0, err
);
126 result
= ext2_new_block (inode
, goal
, 0, 0, err
);
132 int ext2_bmap (struct inode
* inode
, int block
)
135 int addr_per_block
= EXT2_ADDR_PER_BLOCK(inode
->i_sb
);
136 int addr_per_block_bits
= EXT2_ADDR_PER_BLOCK_BITS(inode
->i_sb
);
139 ext2_warning (inode
->i_sb
, "ext2_bmap", "block < 0");
142 if (block
>= EXT2_NDIR_BLOCKS
+ addr_per_block
+
143 (1 << (addr_per_block_bits
* 2)) +
144 ((1 << (addr_per_block_bits
* 2)) << addr_per_block_bits
)) {
145 ext2_warning (inode
->i_sb
, "ext2_bmap", "block > big");
148 if (block
< EXT2_NDIR_BLOCKS
)
149 return inode_bmap (inode
, block
);
150 block
-= EXT2_NDIR_BLOCKS
;
151 if (block
< addr_per_block
) {
152 i
= inode_bmap (inode
, EXT2_IND_BLOCK
);
155 return block_bmap (bread (inode
->i_dev
, i
,
156 inode
->i_sb
->s_blocksize
), block
);
158 block
-= addr_per_block
;
159 if (block
< (1 << (addr_per_block_bits
* 2))) {
160 i
= inode_bmap (inode
, EXT2_DIND_BLOCK
);
163 i
= block_bmap (bread (inode
->i_dev
, i
,
164 inode
->i_sb
->s_blocksize
),
165 block
>> addr_per_block_bits
);
168 return block_bmap (bread (inode
->i_dev
, i
,
169 inode
->i_sb
->s_blocksize
),
170 block
& (addr_per_block
- 1));
172 block
-= (1 << (addr_per_block_bits
* 2));
173 i
= inode_bmap (inode
, EXT2_TIND_BLOCK
);
176 i
= block_bmap (bread (inode
->i_dev
, i
, inode
->i_sb
->s_blocksize
),
177 block
>> (addr_per_block_bits
* 2));
180 i
= block_bmap (bread (inode
->i_dev
, i
, inode
->i_sb
->s_blocksize
),
181 (block
>> addr_per_block_bits
) & (addr_per_block
- 1));
184 return block_bmap (bread (inode
->i_dev
, i
, inode
->i_sb
->s_blocksize
),
185 block
& (addr_per_block
- 1));
188 int ext2_bmap_create (struct inode
* inode
, int block
)
191 int addr_per_block
= EXT2_ADDR_PER_BLOCK(inode
->i_sb
);
192 int addr_per_block_bits
= EXT2_ADDR_PER_BLOCK_BITS(inode
->i_sb
);
195 ext2_warning (inode
->i_sb
, "ext2_bmap", "block < 0");
198 if (block
>= EXT2_NDIR_BLOCKS
+ addr_per_block
+
199 (1 << (addr_per_block_bits
* 2)) +
200 ((1 << (addr_per_block_bits
* 2)) << addr_per_block_bits
)) {
201 ext2_warning (inode
->i_sb
, "ext2_bmap", "block > big");
204 if (block
< EXT2_NDIR_BLOCKS
)
205 return inode_bmap (inode
, block
);
206 block
-= EXT2_NDIR_BLOCKS
;
207 if (block
< addr_per_block
) {
208 i
= inode_bmap (inode
, EXT2_IND_BLOCK
);
211 return block_bmap (bread (inode
->i_dev
, i
,
212 inode
->i_sb
->s_blocksize
), block
);
214 block
-= addr_per_block
;
215 if (block
< (1 << (addr_per_block_bits
* 2))) {
216 i
= inode_bmap (inode
, EXT2_DIND_BLOCK
);
219 i
= block_bmap (bread (inode
->i_dev
, i
,
220 inode
->i_sb
->s_blocksize
),
221 block
>> addr_per_block_bits
);
224 return block_bmap (bread (inode
->i_dev
, i
,
225 inode
->i_sb
->s_blocksize
),
226 block
& (addr_per_block
- 1));
228 block
-= (1 << (addr_per_block_bits
* 2));
229 i
= inode_bmap (inode
, EXT2_TIND_BLOCK
);
232 i
= block_bmap (bread (inode
->i_dev
, i
, inode
->i_sb
->s_blocksize
),
233 block
>> (addr_per_block_bits
* 2));
236 i
= block_bmap (bread (inode
->i_dev
, i
, inode
->i_sb
->s_blocksize
),
237 (block
>> addr_per_block_bits
) & (addr_per_block
- 1));
240 return block_bmap (bread (inode
->i_dev
, i
, inode
->i_sb
->s_blocksize
),
241 block
& (addr_per_block
- 1));
244 static struct buffer_head
* inode_getblk (struct inode
* inode
, int nr
,
245 int create
, int new_block
, int * err
, int metadata
,
246 int *phys_block
, int *created
)
250 struct buffer_head
* result
;
251 int blocks
= inode
->i_sb
->s_blocksize
/ 512;
253 p
= inode
->u
.ext2_i
.i_data
+ nr
;
255 tmp
= le32_to_cpu(*p
);
258 struct buffer_head
* result
= getblk (inode
->i_dev
, tmp
, inode
->i_sb
->s_blocksize
);
259 if (tmp
== le32_to_cpu(*p
))
272 /* Check file limits.. */
274 unsigned long limit
= current
->rlim
[RLIMIT_FSIZE
].rlim_cur
;
275 if (limit
< RLIM_INFINITY
) {
276 limit
>>= EXT2_BLOCK_SIZE_BITS(inode
->i_sb
);
277 if (new_block
>= limit
) {
278 send_sig(SIGXFSZ
, current
, 0);
286 if (inode
->u
.ext2_i
.i_next_alloc_block
== new_block
)
287 goal
= inode
->u
.ext2_i
.i_next_alloc_goal
;
289 ext2_debug ("hint = %d,", goal
);
292 for (tmp
= nr
- 1; tmp
>= 0; tmp
--) {
293 if (inode
->u
.ext2_i
.i_data
[tmp
]) {
294 goal
= le32_to_cpu(inode
->u
.ext2_i
.i_data
[tmp
]);
299 goal
= (inode
->u
.ext2_i
.i_block_group
*
300 EXT2_BLOCKS_PER_GROUP(inode
->i_sb
)) +
301 le32_to_cpu(inode
->i_sb
->u
.ext2_sb
.s_es
->s_first_data_block
);
304 ext2_debug ("goal = %d.\n", goal
);
306 tmp
= ext2_alloc_block (inode
, goal
, err
);
310 result
= getblk (inode
->i_dev
, tmp
, inode
->i_sb
->s_blocksize
);
312 ext2_free_blocks (inode
, tmp
, 1);
316 memset(result
->b_data
, 0, inode
->i_sb
->s_blocksize
);
317 mark_buffer_uptodate(result
, 1);
318 mark_buffer_dirty(result
, 1);
321 ext2_free_blocks (inode
, tmp
, 1);
329 *p
= cpu_to_le32(tmp
);
331 inode
->u
.ext2_i
.i_next_alloc_block
= new_block
;
332 inode
->u
.ext2_i
.i_next_alloc_goal
= tmp
;
333 inode
->i_ctime
= CURRENT_TIME
;
334 inode
->i_blocks
+= blocks
;
335 if (IS_SYNC(inode
) || inode
->u
.ext2_i
.i_osync
)
336 ext2_sync_inode (inode
);
338 mark_inode_dirty(inode
);
344 * possibly create / access
345 * can fail due to: - not present
348 * NULL return in the data case is mandatory.
350 static struct buffer_head
* block_getblk (struct inode
* inode
,
351 struct buffer_head
* bh
, int nr
, int create
, int blocksize
,
352 int new_block
, int * err
, int metadata
, int *phys_block
, int *created
)
356 struct buffer_head
* result
;
357 int blocks
= inode
->i_sb
->s_blocksize
/ 512;
362 if (!buffer_uptodate(bh
)) {
363 ll_rw_block (READ
, 1, &bh
);
365 if (!buffer_uptodate(bh
)) {
370 p
= (u32
*) bh
->b_data
+ nr
;
372 tmp
= le32_to_cpu(*p
);
375 result
= getblk (bh
->b_dev
, tmp
, blocksize
);
376 if (tmp
== le32_to_cpu(*p
)) {
394 limit
= current
->rlim
[RLIMIT_FSIZE
].rlim_cur
;
395 if (limit
< RLIM_INFINITY
) {
396 limit
>>= EXT2_BLOCK_SIZE_BITS(inode
->i_sb
);
397 if (new_block
>= limit
) {
399 send_sig(SIGXFSZ
, current
, 0);
404 if (inode
->u
.ext2_i
.i_next_alloc_block
== new_block
)
405 goal
= inode
->u
.ext2_i
.i_next_alloc_goal
;
407 for (tmp
= nr
- 1; tmp
>= 0; tmp
--) {
408 if (le32_to_cpu(((u32
*) bh
->b_data
)[tmp
])) {
409 goal
= le32_to_cpu(((u32
*)bh
->b_data
)[tmp
]);
414 goal
= bh
->b_blocknr
;
416 tmp
= ext2_alloc_block (inode
, goal
, err
);
422 result
= getblk (bh
->b_dev
, tmp
, blocksize
);
424 ext2_free_blocks (inode
, tmp
, 1);
428 memset(result
->b_data
, 0, inode
->i_sb
->s_blocksize
);
429 mark_buffer_uptodate(result
, 1);
430 mark_buffer_dirty(result
, 1);
437 if (le32_to_cpu(*p
)) {
438 ext2_free_blocks (inode
, tmp
, 1);
442 *p
= le32_to_cpu(tmp
);
443 mark_buffer_dirty(bh
, 1);
444 if (IS_SYNC(inode
) || inode
->u
.ext2_i
.i_osync
) {
445 ll_rw_block (WRITE
, 1, &bh
);
448 inode
->i_ctime
= CURRENT_TIME
;
449 inode
->i_blocks
+= blocks
;
450 mark_inode_dirty(inode
);
451 inode
->u
.ext2_i
.i_next_alloc_block
= new_block
;
452 inode
->u
.ext2_i
.i_next_alloc_goal
= tmp
;
457 int ext2_getblk_block (struct inode
* inode
, long block
,
458 int create
, int * err
, int * created
)
460 struct buffer_head
* bh
, *tmp
;
462 unsigned long addr_per_block
= EXT2_ADDR_PER_BLOCK(inode
->i_sb
);
463 int addr_per_block_bits
= EXT2_ADDR_PER_BLOCK_BITS(inode
->i_sb
);
468 ext2_warning (inode
->i_sb
, "ext2_getblk", "block < 0");
471 if (block
> EXT2_NDIR_BLOCKS
+ addr_per_block
+
472 (1 << (addr_per_block_bits
* 2)) +
473 ((1 << (addr_per_block_bits
* 2)) << addr_per_block_bits
)) {
474 ext2_warning (inode
->i_sb
, "ext2_getblk", "block > big");
478 * If this is a sequential block allocation, set the next_alloc_block
479 * to this block now so that all the indblock and data block
480 * allocations use the same goal zone
483 ext2_debug ("block %lu, next %lu, goal %lu.\n", block
,
484 inode
->u
.ext2_i
.i_next_alloc_block
,
485 inode
->u
.ext2_i
.i_next_alloc_goal
);
487 if (block
== inode
->u
.ext2_i
.i_next_alloc_block
+ 1) {
488 inode
->u
.ext2_i
.i_next_alloc_block
++;
489 inode
->u
.ext2_i
.i_next_alloc_goal
++;
492 *err
= 0; // -ENOSPC;
495 if (block
< EXT2_NDIR_BLOCKS
) {
499 tmp
= inode_getblk (inode
, block
, create
, b
,
500 err
, 0, &phys_block
, created
);
503 block
-= EXT2_NDIR_BLOCKS
;
504 if (block
< addr_per_block
) {
505 bh
= inode_getblk (inode
, EXT2_IND_BLOCK
, create
, b
, err
, 1, NULL
, NULL
);
506 tmp
= block_getblk (inode
, bh
, block
, create
,
507 inode
->i_sb
->s_blocksize
, b
, err
, 0, &phys_block
, created
);
510 block
-= addr_per_block
;
511 if (block
< (1 << (addr_per_block_bits
* 2))) {
512 bh
= inode_getblk (inode
, EXT2_DIND_BLOCK
, create
, b
, err
, 1, NULL
, NULL
);
513 bh
= block_getblk (inode
, bh
, block
>> addr_per_block_bits
,
514 create
, inode
->i_sb
->s_blocksize
, b
, err
, 1, NULL
, NULL
);
515 tmp
= block_getblk (inode
, bh
, block
& (addr_per_block
- 1),
516 create
, inode
->i_sb
->s_blocksize
, b
, err
, 0, &phys_block
, created
);
519 block
-= (1 << (addr_per_block_bits
* 2));
520 bh
= inode_getblk (inode
, EXT2_TIND_BLOCK
, create
, b
, err
, 1, NULL
,NULL
);
521 bh
= block_getblk (inode
, bh
, block
>> (addr_per_block_bits
* 2),
522 create
, inode
->i_sb
->s_blocksize
, b
, err
, 1, NULL
,NULL
);
523 bh
= block_getblk (inode
, bh
, (block
>> addr_per_block_bits
) &
524 (addr_per_block
- 1), create
, inode
->i_sb
->s_blocksize
,
525 b
, err
, 1, NULL
,NULL
);
526 tmp
= block_getblk (inode
, bh
, block
& (addr_per_block
- 1), create
,
527 inode
->i_sb
->s_blocksize
, b
, err
, 0, &phys_block
, created
);
539 struct buffer_head
* ext2_getblk (struct inode
* inode
, long block
,
540 int create
, int * err
)
542 struct buffer_head
*tmp
= NULL
;
546 phys_block
= ext2_getblk_block (inode
, block
, create
, err
, &created
);
549 tmp
= getblk (inode
->i_dev
, phys_block
, inode
->i_sb
->s_blocksize
);
551 memset(tmp
->b_data
, 0, inode
->i_sb
->s_blocksize
);
552 mark_buffer_uptodate(tmp
, 1);
553 mark_buffer_dirty(tmp
, 1);
559 struct buffer_head
* ext2_bread (struct inode
* inode
, int block
,
560 int create
, int *err
)
562 struct buffer_head
* bh
;
565 prev_blocks
= inode
->i_blocks
;
567 bh
= ext2_getblk (inode
, block
, create
, err
);
572 * If the inode has grown, and this is a directory, then perform
573 * preallocation of a few more blocks to try to keep directory
574 * fragmentation down.
577 S_ISDIR(inode
->i_mode
) &&
578 inode
->i_blocks
> prev_blocks
&&
579 EXT2_HAS_COMPAT_FEATURE(inode
->i_sb
,
580 EXT2_FEATURE_COMPAT_DIR_PREALLOC
)) {
582 struct buffer_head
*tmp_bh
;
585 i
< EXT2_SB(inode
->i_sb
)->s_es
->s_prealloc_dir_blocks
;
588 * ext2_getblk will zero out the contents of the
591 tmp_bh
= ext2_getblk(inode
, block
+i
, create
, err
);
600 if (buffer_uptodate(bh
))
602 ll_rw_block (READ
, 1, &bh
);
604 if (buffer_uptodate(bh
))
611 void ext2_read_inode (struct inode
* inode
)
613 struct buffer_head
* bh
;
614 struct ext2_inode
* raw_inode
;
615 unsigned long block_group
;
616 unsigned long group_desc
;
619 unsigned long offset
;
620 struct ext2_group_desc
* gdp
;
622 if ((inode
->i_ino
!= EXT2_ROOT_INO
&& inode
->i_ino
!= EXT2_ACL_IDX_INO
&&
623 inode
->i_ino
!= EXT2_ACL_DATA_INO
&&
624 inode
->i_ino
< EXT2_FIRST_INO(inode
->i_sb
)) ||
625 inode
->i_ino
> le32_to_cpu(inode
->i_sb
->u
.ext2_sb
.s_es
->s_inodes_count
)) {
626 ext2_error (inode
->i_sb
, "ext2_read_inode",
627 "bad inode number: %lu", inode
->i_ino
);
630 block_group
= (inode
->i_ino
- 1) / EXT2_INODES_PER_GROUP(inode
->i_sb
);
631 if (block_group
>= inode
->i_sb
->u
.ext2_sb
.s_groups_count
) {
632 ext2_error (inode
->i_sb
, "ext2_read_inode",
633 "group >= groups count");
636 group_desc
= block_group
>> EXT2_DESC_PER_BLOCK_BITS(inode
->i_sb
);
637 desc
= block_group
& (EXT2_DESC_PER_BLOCK(inode
->i_sb
) - 1);
638 bh
= inode
->i_sb
->u
.ext2_sb
.s_group_desc
[group_desc
];
640 ext2_error (inode
->i_sb
, "ext2_read_inode",
641 "Descriptor not loaded");
645 gdp
= (struct ext2_group_desc
*) bh
->b_data
;
647 * Figure out the offset within the block group inode table
649 offset
= ((inode
->i_ino
- 1) % EXT2_INODES_PER_GROUP(inode
->i_sb
)) *
650 EXT2_INODE_SIZE(inode
->i_sb
);
651 block
= le32_to_cpu(gdp
[desc
].bg_inode_table
) +
652 (offset
>> EXT2_BLOCK_SIZE_BITS(inode
->i_sb
));
653 if (!(bh
= bread (inode
->i_dev
, block
, inode
->i_sb
->s_blocksize
))) {
654 ext2_error (inode
->i_sb
, "ext2_read_inode",
655 "unable to read inode block - "
656 "inode=%lu, block=%lu", inode
->i_ino
, block
);
659 offset
&= (EXT2_BLOCK_SIZE(inode
->i_sb
) - 1);
660 raw_inode
= (struct ext2_inode
*) (bh
->b_data
+ offset
);
662 inode
->i_mode
= le16_to_cpu(raw_inode
->i_mode
);
663 inode
->i_uid
= le16_to_cpu(raw_inode
->i_uid
);
664 inode
->i_gid
= le16_to_cpu(raw_inode
->i_gid
);
665 inode
->i_nlink
= le16_to_cpu(raw_inode
->i_links_count
);
666 inode
->i_size
= le32_to_cpu(raw_inode
->i_size
);
667 inode
->i_atime
= le32_to_cpu(raw_inode
->i_atime
);
668 inode
->i_ctime
= le32_to_cpu(raw_inode
->i_ctime
);
669 inode
->i_mtime
= le32_to_cpu(raw_inode
->i_mtime
);
670 inode
->u
.ext2_i
.i_dtime
= le32_to_cpu(raw_inode
->i_dtime
);
671 inode
->i_blksize
= PAGE_SIZE
; /* This is the optimal IO size (for stat), not the fs block size */
672 inode
->i_blocks
= le32_to_cpu(raw_inode
->i_blocks
);
673 inode
->i_version
= ++event
;
674 inode
->u
.ext2_i
.i_new_inode
= 0;
675 inode
->u
.ext2_i
.i_flags
= le32_to_cpu(raw_inode
->i_flags
);
676 inode
->u
.ext2_i
.i_faddr
= le32_to_cpu(raw_inode
->i_faddr
);
677 inode
->u
.ext2_i
.i_frag_no
= raw_inode
->i_frag
;
678 inode
->u
.ext2_i
.i_frag_size
= raw_inode
->i_fsize
;
679 inode
->u
.ext2_i
.i_osync
= 0;
680 inode
->u
.ext2_i
.i_file_acl
= le32_to_cpu(raw_inode
->i_file_acl
);
681 if (S_ISDIR(inode
->i_mode
))
682 inode
->u
.ext2_i
.i_dir_acl
= le32_to_cpu(raw_inode
->i_dir_acl
);
684 inode
->u
.ext2_i
.i_dir_acl
= 0;
685 inode
->u
.ext2_i
.i_high_size
=
686 le32_to_cpu(raw_inode
->i_size_high
);
687 #if BITS_PER_LONG < 64
688 if (raw_inode
->i_size_high
)
689 inode
->i_size
= (__u32
)-1;
691 inode
->i_size
|= ((__u64
)le32_to_cpu(raw_inode
->i_size_high
))
695 inode
->i_generation
= le32_to_cpu(raw_inode
->i_generation
);
696 inode
->u
.ext2_i
.i_block_group
= block_group
;
697 inode
->u
.ext2_i
.i_next_alloc_block
= 0;
698 inode
->u
.ext2_i
.i_next_alloc_goal
= 0;
699 if (inode
->u
.ext2_i
.i_prealloc_count
)
700 ext2_error (inode
->i_sb
, "ext2_read_inode",
701 "New inode has non-zero prealloc count!");
704 * NOTE! The in-memory inode i_blocks array is in little-endian order
705 * even on big-endian machines: we do NOT byteswap the block numbers!
707 for (block
= 0; block
< EXT2_N_BLOCKS
; block
++)
708 inode
->u
.ext2_i
.i_data
[block
] = raw_inode
->i_block
[block
];
710 if (inode
->i_ino
== EXT2_ACL_IDX_INO
||
711 inode
->i_ino
== EXT2_ACL_DATA_INO
)
712 /* Nothing to do */ ;
713 else if (S_ISREG(inode
->i_mode
))
714 inode
->i_op
= &ext2_file_inode_operations
;
715 else if (S_ISDIR(inode
->i_mode
))
716 inode
->i_op
= &ext2_dir_inode_operations
;
717 else if (S_ISLNK(inode
->i_mode
))
718 inode
->i_op
= &ext2_symlink_inode_operations
;
720 init_special_inode(inode
, inode
->i_mode
,
721 le32_to_cpu(raw_inode
->i_block
[0]));
723 inode
->i_attr_flags
= 0;
724 if (inode
->u
.ext2_i
.i_flags
& EXT2_SYNC_FL
) {
725 inode
->i_attr_flags
|= ATTR_FLAG_SYNCRONOUS
;
726 inode
->i_flags
|= MS_SYNCHRONOUS
;
728 if (inode
->u
.ext2_i
.i_flags
& EXT2_APPEND_FL
) {
729 inode
->i_attr_flags
|= ATTR_FLAG_APPEND
;
730 inode
->i_flags
|= S_APPEND
;
732 if (inode
->u
.ext2_i
.i_flags
& EXT2_IMMUTABLE_FL
) {
733 inode
->i_attr_flags
|= ATTR_FLAG_IMMUTABLE
;
734 inode
->i_flags
|= S_IMMUTABLE
;
736 if (inode
->u
.ext2_i
.i_flags
& EXT2_NOATIME_FL
) {
737 inode
->i_attr_flags
|= ATTR_FLAG_NOATIME
;
738 inode
->i_flags
|= MS_NOATIME
;
743 make_bad_inode(inode
);
747 static int ext2_update_inode(struct inode
* inode
, int do_sync
)
749 struct buffer_head
* bh
;
750 struct ext2_inode
* raw_inode
;
751 unsigned long block_group
;
752 unsigned long group_desc
;
755 unsigned long offset
;
757 struct ext2_group_desc
* gdp
;
759 if ((inode
->i_ino
!= EXT2_ROOT_INO
&&
760 inode
->i_ino
< EXT2_FIRST_INO(inode
->i_sb
)) ||
761 inode
->i_ino
> le32_to_cpu(inode
->i_sb
->u
.ext2_sb
.s_es
->s_inodes_count
)) {
762 ext2_error (inode
->i_sb
, "ext2_write_inode",
763 "bad inode number: %lu", inode
->i_ino
);
766 block_group
= (inode
->i_ino
- 1) / EXT2_INODES_PER_GROUP(inode
->i_sb
);
767 if (block_group
>= inode
->i_sb
->u
.ext2_sb
.s_groups_count
) {
768 ext2_error (inode
->i_sb
, "ext2_write_inode",
769 "group >= groups count");
772 group_desc
= block_group
>> EXT2_DESC_PER_BLOCK_BITS(inode
->i_sb
);
773 desc
= block_group
& (EXT2_DESC_PER_BLOCK(inode
->i_sb
) - 1);
774 bh
= inode
->i_sb
->u
.ext2_sb
.s_group_desc
[group_desc
];
776 ext2_error (inode
->i_sb
, "ext2_write_inode",
777 "Descriptor not loaded");
780 gdp
= (struct ext2_group_desc
*) bh
->b_data
;
782 * Figure out the offset within the block group inode table
784 offset
= ((inode
->i_ino
- 1) % EXT2_INODES_PER_GROUP(inode
->i_sb
)) *
785 EXT2_INODE_SIZE(inode
->i_sb
);
786 block
= le32_to_cpu(gdp
[desc
].bg_inode_table
) +
787 (offset
>> EXT2_BLOCK_SIZE_BITS(inode
->i_sb
));
788 if (!(bh
= bread (inode
->i_dev
, block
, inode
->i_sb
->s_blocksize
))) {
789 ext2_error (inode
->i_sb
, "ext2_write_inode",
790 "unable to read inode block - "
791 "inode=%lu, block=%lu", inode
->i_ino
, block
);
794 offset
&= EXT2_BLOCK_SIZE(inode
->i_sb
) - 1;
795 raw_inode
= (struct ext2_inode
*) (bh
->b_data
+ offset
);
797 raw_inode
->i_mode
= cpu_to_le16(inode
->i_mode
);
798 raw_inode
->i_uid
= cpu_to_le16(inode
->i_uid
);
799 raw_inode
->i_gid
= cpu_to_le16(inode
->i_gid
);
800 raw_inode
->i_links_count
= cpu_to_le16(inode
->i_nlink
);
801 raw_inode
->i_size
= cpu_to_le32(inode
->i_size
);
802 raw_inode
->i_atime
= cpu_to_le32(inode
->i_atime
);
803 raw_inode
->i_ctime
= cpu_to_le32(inode
->i_ctime
);
804 raw_inode
->i_mtime
= cpu_to_le32(inode
->i_mtime
);
805 raw_inode
->i_blocks
= cpu_to_le32(inode
->i_blocks
);
806 raw_inode
->i_dtime
= cpu_to_le32(inode
->u
.ext2_i
.i_dtime
);
807 raw_inode
->i_flags
= cpu_to_le32(inode
->u
.ext2_i
.i_flags
);
808 raw_inode
->i_faddr
= cpu_to_le32(inode
->u
.ext2_i
.i_faddr
);
809 raw_inode
->i_frag
= inode
->u
.ext2_i
.i_frag_no
;
810 raw_inode
->i_fsize
= inode
->u
.ext2_i
.i_frag_size
;
811 raw_inode
->i_file_acl
= cpu_to_le32(inode
->u
.ext2_i
.i_file_acl
);
812 if (S_ISDIR(inode
->i_mode
))
813 raw_inode
->i_dir_acl
= cpu_to_le32(inode
->u
.ext2_i
.i_dir_acl
);
815 #if BITS_PER_LONG < 64
816 raw_inode
->i_size_high
=
817 cpu_to_le32(inode
->u
.ext2_i
.i_high_size
);
819 raw_inode
->i_size_high
= cpu_to_le32(inode
->i_size
>> 32);
822 raw_inode
->i_generation
= cpu_to_le32(inode
->i_generation
);
823 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
))
824 raw_inode
->i_block
[0] = cpu_to_le32(kdev_t_to_nr(inode
->i_rdev
));
825 else for (block
= 0; block
< EXT2_N_BLOCKS
; block
++)
826 raw_inode
->i_block
[block
] = inode
->u
.ext2_i
.i_data
[block
];
827 mark_buffer_dirty(bh
, 1);
829 ll_rw_block (WRITE
, 1, &bh
);
831 if (buffer_req(bh
) && !buffer_uptodate(bh
)) {
832 printk ("IO error syncing ext2 inode ["
834 bdevname(inode
->i_dev
), inode
->i_ino
);
842 void ext2_write_inode (struct inode
* inode
)
844 ext2_update_inode (inode
, 0);
847 int ext2_sync_inode (struct inode
*inode
)
849 return ext2_update_inode (inode
, 1);
852 int ext2_notify_change(struct dentry
*dentry
, struct iattr
*iattr
)
854 struct inode
*inode
= dentry
->d_inode
;
859 if ((iattr
->ia_attr_flags
&
860 (ATTR_FLAG_APPEND
| ATTR_FLAG_IMMUTABLE
)) ^
861 (inode
->u
.ext2_i
.i_flags
&
862 (EXT2_APPEND_FL
| EXT2_IMMUTABLE_FL
))) {
863 if (!capable(CAP_LINUX_IMMUTABLE
))
865 } else if ((current
->fsuid
!= inode
->i_uid
) && !capable(CAP_FOWNER
))
868 retval
= inode_change_ok(inode
, iattr
);
872 inode_setattr(inode
, iattr
);
874 flags
= iattr
->ia_attr_flags
;
875 if (flags
& ATTR_FLAG_SYNCRONOUS
) {
876 inode
->i_flags
|= MS_SYNCHRONOUS
;
877 inode
->u
.ext2_i
.i_flags
= EXT2_SYNC_FL
;
879 inode
->i_flags
&= ~MS_SYNCHRONOUS
;
880 inode
->u
.ext2_i
.i_flags
&= ~EXT2_SYNC_FL
;
882 if (flags
& ATTR_FLAG_NOATIME
) {
883 inode
->i_flags
|= MS_NOATIME
;
884 inode
->u
.ext2_i
.i_flags
= EXT2_NOATIME_FL
;
886 inode
->i_flags
&= ~MS_NOATIME
;
887 inode
->u
.ext2_i
.i_flags
&= ~EXT2_NOATIME_FL
;
889 if (flags
& ATTR_FLAG_APPEND
) {
890 inode
->i_flags
|= S_APPEND
;
891 inode
->u
.ext2_i
.i_flags
= EXT2_APPEND_FL
;
893 inode
->i_flags
&= ~S_APPEND
;
894 inode
->u
.ext2_i
.i_flags
&= ~EXT2_APPEND_FL
;
896 if (flags
& ATTR_FLAG_IMMUTABLE
) {
897 inode
->i_flags
|= S_IMMUTABLE
;
898 inode
->u
.ext2_i
.i_flags
= EXT2_IMMUTABLE_FL
;
900 inode
->i_flags
&= ~S_IMMUTABLE
;
901 inode
->u
.ext2_i
.i_flags
&= ~EXT2_IMMUTABLE_FL
;
903 mark_inode_dirty(inode
);