2 * linux/fs/ext2/inode.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
11 * linux/fs/minix/inode.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * Goal-directed block allocation by Stephen Tweedie
16 * (sct@dcs.ed.ac.uk), 1993, 1998
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
19 * 64-bit file support on 64-bit platforms by Jakub Jelinek
20 * (jj@sunsite.ms.mff.cuni.cz)
23 #include <asm/uaccess.h>
24 #include <asm/system.h>
26 #include <linux/errno.h>
28 #include <linux/ext2_fs.h>
29 #include <linux/sched.h>
30 #include <linux/stat.h>
31 #include <linux/string.h>
32 #include <linux/locks.h>
34 #include <linux/smp_lock.h>
36 static int ext2_update_inode(struct inode
* inode
, int do_sync
);
39 * Called at each iput()
41 void ext2_put_inode (struct inode
* inode
)
43 ext2_discard_prealloc (inode
);
47 * Called at the last iput() if i_nlink is zero.
49 void ext2_delete_inode (struct inode
* inode
)
51 if (inode
->i_ino
== EXT2_ACL_IDX_INO
||
52 inode
->i_ino
== EXT2_ACL_DATA_INO
)
54 inode
->u
.ext2_i
.i_dtime
= CURRENT_TIME
;
55 mark_inode_dirty(inode
);
56 ext2_update_inode(inode
, IS_SYNC(inode
));
59 ext2_truncate (inode
);
60 ext2_free_inode (inode
);
63 #define inode_bmap(inode, nr) (le32_to_cpu((inode)->u.ext2_i.i_data[(nr)]))
65 static inline int block_bmap (struct buffer_head
* bh
, int nr
)
71 tmp
= le32_to_cpu(((u32
*) bh
->b_data
)[nr
]);
77 * ext2_discard_prealloc and ext2_alloc_block are atomic wrt. the
78 * superblock in the same manner as are ext2_free_blocks and
79 * ext2_new_block. We just wait on the super rather than locking it
80 * here, since ext2_new_block will do the necessary locking and we
81 * can't block until then.
83 void ext2_discard_prealloc (struct inode
* inode
)
85 #ifdef EXT2_PREALLOCATE
88 if (inode
->u
.ext2_i
.i_prealloc_count
) {
89 total
= inode
->u
.ext2_i
.i_prealloc_count
;
90 inode
->u
.ext2_i
.i_prealloc_count
= 0;
91 ext2_free_blocks (inode
, inode
->u
.ext2_i
.i_prealloc_block
, total
);
96 static int ext2_alloc_block (struct inode
* inode
, unsigned long goal
, int *err
)
99 static unsigned long alloc_hits
= 0, alloc_attempts
= 0;
101 unsigned long result
;
103 wait_on_super (inode
->i_sb
);
105 #ifdef EXT2_PREALLOCATE
106 if (inode
->u
.ext2_i
.i_prealloc_count
&&
107 (goal
== inode
->u
.ext2_i
.i_prealloc_block
||
108 goal
+ 1 == inode
->u
.ext2_i
.i_prealloc_block
))
110 result
= inode
->u
.ext2_i
.i_prealloc_block
++;
111 inode
->u
.ext2_i
.i_prealloc_count
--;
112 ext2_debug ("preallocation hit (%lu/%lu).\n",
113 ++alloc_hits
, ++alloc_attempts
);
116 ext2_discard_prealloc (inode
);
117 ext2_debug ("preallocation miss (%lu/%lu).\n",
118 alloc_hits
, ++alloc_attempts
);
119 if (S_ISREG(inode
->i_mode
))
120 result
= ext2_new_block (inode
, goal
,
121 &inode
->u
.ext2_i
.i_prealloc_count
,
122 &inode
->u
.ext2_i
.i_prealloc_block
, err
);
124 result
= ext2_new_block (inode
, goal
, 0, 0, err
);
127 result
= ext2_new_block (inode
, goal
, 0, 0, err
);
133 int ext2_bmap (struct inode
* inode
, int block
)
136 int addr_per_block
= EXT2_ADDR_PER_BLOCK(inode
->i_sb
);
137 int addr_per_block_bits
= EXT2_ADDR_PER_BLOCK_BITS(inode
->i_sb
);
142 ext2_warning (inode
->i_sb
, "ext2_bmap", "block < 0");
145 if (block
>= EXT2_NDIR_BLOCKS
+ addr_per_block
+
146 (1 << (addr_per_block_bits
* 2)) +
147 ((1 << (addr_per_block_bits
* 2)) << addr_per_block_bits
)) {
148 ext2_warning (inode
->i_sb
, "ext2_bmap", "block > big");
151 if (block
< EXT2_NDIR_BLOCKS
) {
152 ret
= inode_bmap (inode
, block
);
155 block
-= EXT2_NDIR_BLOCKS
;
156 if (block
< addr_per_block
) {
157 i
= inode_bmap (inode
, EXT2_IND_BLOCK
);
160 ret
= block_bmap (bread (inode
->i_dev
, i
,
161 inode
->i_sb
->s_blocksize
), block
);
164 block
-= addr_per_block
;
165 if (block
< (1 << (addr_per_block_bits
* 2))) {
166 i
= inode_bmap (inode
, EXT2_DIND_BLOCK
);
169 i
= block_bmap (bread (inode
->i_dev
, i
,
170 inode
->i_sb
->s_blocksize
),
171 block
>> addr_per_block_bits
);
174 ret
= block_bmap (bread (inode
->i_dev
, i
,
175 inode
->i_sb
->s_blocksize
),
176 block
& (addr_per_block
- 1));
178 block
-= (1 << (addr_per_block_bits
* 2));
179 i
= inode_bmap (inode
, EXT2_TIND_BLOCK
);
182 i
= block_bmap (bread (inode
->i_dev
, i
, inode
->i_sb
->s_blocksize
),
183 block
>> (addr_per_block_bits
* 2));
186 i
= block_bmap (bread (inode
->i_dev
, i
, inode
->i_sb
->s_blocksize
),
187 (block
>> addr_per_block_bits
) & (addr_per_block
- 1));
190 ret
= block_bmap (bread (inode
->i_dev
, i
, inode
->i_sb
->s_blocksize
),
191 block
& (addr_per_block
- 1));
197 int ext2_bmap_create (struct inode
* inode
, int block
)
200 int addr_per_block
= EXT2_ADDR_PER_BLOCK(inode
->i_sb
);
201 int addr_per_block_bits
= EXT2_ADDR_PER_BLOCK_BITS(inode
->i_sb
);
204 ext2_warning (inode
->i_sb
, "ext2_bmap", "block < 0");
207 if (block
>= EXT2_NDIR_BLOCKS
+ addr_per_block
+
208 (1 << (addr_per_block_bits
* 2)) +
209 ((1 << (addr_per_block_bits
* 2)) << addr_per_block_bits
)) {
210 ext2_warning (inode
->i_sb
, "ext2_bmap", "block > big");
213 if (block
< EXT2_NDIR_BLOCKS
)
214 return inode_bmap (inode
, block
);
215 block
-= EXT2_NDIR_BLOCKS
;
216 if (block
< addr_per_block
) {
217 i
= inode_bmap (inode
, EXT2_IND_BLOCK
);
220 return block_bmap (bread (inode
->i_dev
, i
,
221 inode
->i_sb
->s_blocksize
), block
);
223 block
-= addr_per_block
;
224 if (block
< (1 << (addr_per_block_bits
* 2))) {
225 i
= inode_bmap (inode
, EXT2_DIND_BLOCK
);
228 i
= block_bmap (bread (inode
->i_dev
, i
,
229 inode
->i_sb
->s_blocksize
),
230 block
>> addr_per_block_bits
);
233 return block_bmap (bread (inode
->i_dev
, i
,
234 inode
->i_sb
->s_blocksize
),
235 block
& (addr_per_block
- 1));
237 block
-= (1 << (addr_per_block_bits
* 2));
238 i
= inode_bmap (inode
, EXT2_TIND_BLOCK
);
241 i
= block_bmap (bread (inode
->i_dev
, i
, inode
->i_sb
->s_blocksize
),
242 block
>> (addr_per_block_bits
* 2));
245 i
= block_bmap (bread (inode
->i_dev
, i
, inode
->i_sb
->s_blocksize
),
246 (block
>> addr_per_block_bits
) & (addr_per_block
- 1));
249 return block_bmap (bread (inode
->i_dev
, i
, inode
->i_sb
->s_blocksize
),
250 block
& (addr_per_block
- 1));
253 static struct buffer_head
* inode_getblk (struct inode
* inode
, int nr
,
254 int create
, int new_block
, int * err
, int metadata
,
255 int *phys_block
, int *created
)
259 struct buffer_head
* result
;
260 int blocks
= inode
->i_sb
->s_blocksize
/ 512;
262 p
= inode
->u
.ext2_i
.i_data
+ nr
;
264 tmp
= le32_to_cpu(*p
);
267 struct buffer_head
* result
= getblk (inode
->i_dev
, tmp
, inode
->i_sb
->s_blocksize
);
268 if (tmp
== le32_to_cpu(*p
))
281 /* Check file limits.. */
283 unsigned long limit
= current
->rlim
[RLIMIT_FSIZE
].rlim_cur
;
284 if (limit
< RLIM_INFINITY
) {
285 limit
>>= EXT2_BLOCK_SIZE_BITS(inode
->i_sb
);
286 if (new_block
>= limit
) {
287 send_sig(SIGXFSZ
, current
, 0);
295 if (inode
->u
.ext2_i
.i_next_alloc_block
== new_block
)
296 goal
= inode
->u
.ext2_i
.i_next_alloc_goal
;
298 ext2_debug ("hint = %d,", goal
);
301 for (tmp
= nr
- 1; tmp
>= 0; tmp
--) {
302 if (inode
->u
.ext2_i
.i_data
[tmp
]) {
303 goal
= le32_to_cpu(inode
->u
.ext2_i
.i_data
[tmp
]);
308 goal
= (inode
->u
.ext2_i
.i_block_group
*
309 EXT2_BLOCKS_PER_GROUP(inode
->i_sb
)) +
310 le32_to_cpu(inode
->i_sb
->u
.ext2_sb
.s_es
->s_first_data_block
);
313 ext2_debug ("goal = %d.\n", goal
);
315 tmp
= ext2_alloc_block (inode
, goal
, err
);
319 result
= getblk (inode
->i_dev
, tmp
, inode
->i_sb
->s_blocksize
);
321 ext2_free_blocks (inode
, tmp
, 1);
325 memset(result
->b_data
, 0, inode
->i_sb
->s_blocksize
);
326 mark_buffer_uptodate(result
, 1);
327 mark_buffer_dirty(result
, 1);
330 ext2_free_blocks (inode
, tmp
, 1);
338 *p
= cpu_to_le32(tmp
);
340 inode
->u
.ext2_i
.i_next_alloc_block
= new_block
;
341 inode
->u
.ext2_i
.i_next_alloc_goal
= tmp
;
342 inode
->i_ctime
= CURRENT_TIME
;
343 inode
->i_blocks
+= blocks
;
344 if (IS_SYNC(inode
) || inode
->u
.ext2_i
.i_osync
)
345 ext2_sync_inode (inode
);
347 mark_inode_dirty(inode
);
353 * possibly create / access
354 * can fail due to: - not present
357 * NULL return in the data case is mandatory.
359 static struct buffer_head
* block_getblk (struct inode
* inode
,
360 struct buffer_head
* bh
, int nr
, int create
, int blocksize
,
361 int new_block
, int * err
, int metadata
, int *phys_block
, int *created
)
365 struct buffer_head
* result
;
366 int blocks
= inode
->i_sb
->s_blocksize
/ 512;
371 if (!buffer_uptodate(bh
)) {
372 ll_rw_block (READ
, 1, &bh
);
374 if (!buffer_uptodate(bh
)) {
379 p
= (u32
*) bh
->b_data
+ nr
;
381 tmp
= le32_to_cpu(*p
);
384 result
= getblk (bh
->b_dev
, tmp
, blocksize
);
385 if (tmp
== le32_to_cpu(*p
)) {
403 limit
= current
->rlim
[RLIMIT_FSIZE
].rlim_cur
;
404 if (limit
< RLIM_INFINITY
) {
405 limit
>>= EXT2_BLOCK_SIZE_BITS(inode
->i_sb
);
406 if (new_block
>= limit
) {
408 send_sig(SIGXFSZ
, current
, 0);
413 if (inode
->u
.ext2_i
.i_next_alloc_block
== new_block
)
414 goal
= inode
->u
.ext2_i
.i_next_alloc_goal
;
416 for (tmp
= nr
- 1; tmp
>= 0; tmp
--) {
417 if (le32_to_cpu(((u32
*) bh
->b_data
)[tmp
])) {
418 goal
= le32_to_cpu(((u32
*)bh
->b_data
)[tmp
]);
423 goal
= bh
->b_blocknr
;
425 tmp
= ext2_alloc_block (inode
, goal
, err
);
431 result
= getblk (bh
->b_dev
, tmp
, blocksize
);
433 ext2_free_blocks (inode
, tmp
, 1);
437 memset(result
->b_data
, 0, inode
->i_sb
->s_blocksize
);
438 mark_buffer_uptodate(result
, 1);
439 mark_buffer_dirty(result
, 1);
446 if (le32_to_cpu(*p
)) {
447 ext2_free_blocks (inode
, tmp
, 1);
451 *p
= le32_to_cpu(tmp
);
452 mark_buffer_dirty(bh
, 1);
453 if (IS_SYNC(inode
) || inode
->u
.ext2_i
.i_osync
) {
454 ll_rw_block (WRITE
, 1, &bh
);
457 inode
->i_ctime
= CURRENT_TIME
;
458 inode
->i_blocks
+= blocks
;
459 mark_inode_dirty(inode
);
460 inode
->u
.ext2_i
.i_next_alloc_block
= new_block
;
461 inode
->u
.ext2_i
.i_next_alloc_goal
= tmp
;
466 int ext2_getblk_block (struct inode
* inode
, long block
,
467 int create
, int * err
, int * created
)
469 struct buffer_head
* bh
, *tmp
;
471 unsigned long addr_per_block
= EXT2_ADDR_PER_BLOCK(inode
->i_sb
);
472 int addr_per_block_bits
= EXT2_ADDR_PER_BLOCK_BITS(inode
->i_sb
);
479 ext2_warning (inode
->i_sb
, "ext2_getblk", "block < 0");
482 if (block
> EXT2_NDIR_BLOCKS
+ addr_per_block
+
483 (1 << (addr_per_block_bits
* 2)) +
484 ((1 << (addr_per_block_bits
* 2)) << addr_per_block_bits
)) {
485 ext2_warning (inode
->i_sb
, "ext2_getblk", "block > big");
489 * If this is a sequential block allocation, set the next_alloc_block
490 * to this block now so that all the indblock and data block
491 * allocations use the same goal zone
494 ext2_debug ("block %lu, next %lu, goal %lu.\n", block
,
495 inode
->u
.ext2_i
.i_next_alloc_block
,
496 inode
->u
.ext2_i
.i_next_alloc_goal
);
498 if (block
== inode
->u
.ext2_i
.i_next_alloc_block
+ 1) {
499 inode
->u
.ext2_i
.i_next_alloc_block
++;
500 inode
->u
.ext2_i
.i_next_alloc_goal
++;
503 *err
= 0; // -ENOSPC;
506 if (block
< EXT2_NDIR_BLOCKS
) {
510 tmp
= inode_getblk (inode
, block
, create
, b
,
511 err
, 0, &phys_block
, created
);
514 block
-= EXT2_NDIR_BLOCKS
;
515 if (block
< addr_per_block
) {
516 bh
= inode_getblk (inode
, EXT2_IND_BLOCK
, create
, b
, err
, 1, NULL
, NULL
);
517 tmp
= block_getblk (inode
, bh
, block
, create
,
518 inode
->i_sb
->s_blocksize
, b
, err
, 0, &phys_block
, created
);
521 block
-= addr_per_block
;
522 if (block
< (1 << (addr_per_block_bits
* 2))) {
523 bh
= inode_getblk (inode
, EXT2_DIND_BLOCK
, create
, b
, err
, 1, NULL
, NULL
);
524 bh
= block_getblk (inode
, bh
, block
>> addr_per_block_bits
,
525 create
, inode
->i_sb
->s_blocksize
, b
, err
, 1, NULL
, NULL
);
526 tmp
= block_getblk (inode
, bh
, block
& (addr_per_block
- 1),
527 create
, inode
->i_sb
->s_blocksize
, b
, err
, 0, &phys_block
, created
);
530 block
-= (1 << (addr_per_block_bits
* 2));
531 bh
= inode_getblk (inode
, EXT2_TIND_BLOCK
, create
, b
, err
, 1, NULL
,NULL
);
532 bh
= block_getblk (inode
, bh
, block
>> (addr_per_block_bits
* 2),
533 create
, inode
->i_sb
->s_blocksize
, b
, err
, 1, NULL
,NULL
);
534 bh
= block_getblk (inode
, bh
, (block
>> addr_per_block_bits
) &
535 (addr_per_block
- 1), create
, inode
->i_sb
->s_blocksize
,
536 b
, err
, 1, NULL
,NULL
);
537 tmp
= block_getblk (inode
, bh
, block
& (addr_per_block
- 1), create
,
538 inode
->i_sb
->s_blocksize
, b
, err
, 0, &phys_block
, created
);
551 struct buffer_head
* ext2_getblk (struct inode
* inode
, long block
,
552 int create
, int * err
)
554 struct buffer_head
*tmp
= NULL
;
558 phys_block
= ext2_getblk_block (inode
, block
, create
, err
, &created
);
561 tmp
= getblk (inode
->i_dev
, phys_block
, inode
->i_sb
->s_blocksize
);
563 memset(tmp
->b_data
, 0, inode
->i_sb
->s_blocksize
);
564 mark_buffer_uptodate(tmp
, 1);
565 mark_buffer_dirty(tmp
, 1);
571 struct buffer_head
* ext2_bread (struct inode
* inode
, int block
,
572 int create
, int *err
)
574 struct buffer_head
* bh
;
577 prev_blocks
= inode
->i_blocks
;
579 bh
= ext2_getblk (inode
, block
, create
, err
);
584 * If the inode has grown, and this is a directory, then perform
585 * preallocation of a few more blocks to try to keep directory
586 * fragmentation down.
589 S_ISDIR(inode
->i_mode
) &&
590 inode
->i_blocks
> prev_blocks
&&
591 EXT2_HAS_COMPAT_FEATURE(inode
->i_sb
,
592 EXT2_FEATURE_COMPAT_DIR_PREALLOC
)) {
594 struct buffer_head
*tmp_bh
;
597 i
< EXT2_SB(inode
->i_sb
)->s_es
->s_prealloc_dir_blocks
;
600 * ext2_getblk will zero out the contents of the
603 tmp_bh
= ext2_getblk(inode
, block
+i
, create
, err
);
612 if (buffer_uptodate(bh
))
614 ll_rw_block (READ
, 1, &bh
);
616 if (buffer_uptodate(bh
))
623 void ext2_read_inode (struct inode
* inode
)
625 struct buffer_head
* bh
;
626 struct ext2_inode
* raw_inode
;
627 unsigned long block_group
;
628 unsigned long group_desc
;
631 unsigned long offset
;
632 struct ext2_group_desc
* gdp
;
634 if ((inode
->i_ino
!= EXT2_ROOT_INO
&& inode
->i_ino
!= EXT2_ACL_IDX_INO
&&
635 inode
->i_ino
!= EXT2_ACL_DATA_INO
&&
636 inode
->i_ino
< EXT2_FIRST_INO(inode
->i_sb
)) ||
637 inode
->i_ino
> le32_to_cpu(inode
->i_sb
->u
.ext2_sb
.s_es
->s_inodes_count
)) {
638 ext2_error (inode
->i_sb
, "ext2_read_inode",
639 "bad inode number: %lu", inode
->i_ino
);
642 block_group
= (inode
->i_ino
- 1) / EXT2_INODES_PER_GROUP(inode
->i_sb
);
643 if (block_group
>= inode
->i_sb
->u
.ext2_sb
.s_groups_count
) {
644 ext2_error (inode
->i_sb
, "ext2_read_inode",
645 "group >= groups count");
648 group_desc
= block_group
>> EXT2_DESC_PER_BLOCK_BITS(inode
->i_sb
);
649 desc
= block_group
& (EXT2_DESC_PER_BLOCK(inode
->i_sb
) - 1);
650 bh
= inode
->i_sb
->u
.ext2_sb
.s_group_desc
[group_desc
];
652 ext2_error (inode
->i_sb
, "ext2_read_inode",
653 "Descriptor not loaded");
657 gdp
= (struct ext2_group_desc
*) bh
->b_data
;
659 * Figure out the offset within the block group inode table
661 offset
= ((inode
->i_ino
- 1) % EXT2_INODES_PER_GROUP(inode
->i_sb
)) *
662 EXT2_INODE_SIZE(inode
->i_sb
);
663 block
= le32_to_cpu(gdp
[desc
].bg_inode_table
) +
664 (offset
>> EXT2_BLOCK_SIZE_BITS(inode
->i_sb
));
665 if (!(bh
= bread (inode
->i_dev
, block
, inode
->i_sb
->s_blocksize
))) {
666 ext2_error (inode
->i_sb
, "ext2_read_inode",
667 "unable to read inode block - "
668 "inode=%lu, block=%lu", inode
->i_ino
, block
);
671 offset
&= (EXT2_BLOCK_SIZE(inode
->i_sb
) - 1);
672 raw_inode
= (struct ext2_inode
*) (bh
->b_data
+ offset
);
674 inode
->i_mode
= le16_to_cpu(raw_inode
->i_mode
);
675 inode
->i_uid
= le16_to_cpu(raw_inode
->i_uid
);
676 inode
->i_gid
= le16_to_cpu(raw_inode
->i_gid
);
677 inode
->i_nlink
= le16_to_cpu(raw_inode
->i_links_count
);
678 inode
->i_size
= le32_to_cpu(raw_inode
->i_size
);
679 inode
->i_atime
= le32_to_cpu(raw_inode
->i_atime
);
680 inode
->i_ctime
= le32_to_cpu(raw_inode
->i_ctime
);
681 inode
->i_mtime
= le32_to_cpu(raw_inode
->i_mtime
);
682 inode
->u
.ext2_i
.i_dtime
= le32_to_cpu(raw_inode
->i_dtime
);
683 inode
->i_blksize
= PAGE_SIZE
; /* This is the optimal IO size (for stat), not the fs block size */
684 inode
->i_blocks
= le32_to_cpu(raw_inode
->i_blocks
);
685 inode
->i_version
= ++event
;
686 inode
->u
.ext2_i
.i_new_inode
= 0;
687 inode
->u
.ext2_i
.i_flags
= le32_to_cpu(raw_inode
->i_flags
);
688 inode
->u
.ext2_i
.i_faddr
= le32_to_cpu(raw_inode
->i_faddr
);
689 inode
->u
.ext2_i
.i_frag_no
= raw_inode
->i_frag
;
690 inode
->u
.ext2_i
.i_frag_size
= raw_inode
->i_fsize
;
691 inode
->u
.ext2_i
.i_osync
= 0;
692 inode
->u
.ext2_i
.i_file_acl
= le32_to_cpu(raw_inode
->i_file_acl
);
693 if (S_ISDIR(inode
->i_mode
))
694 inode
->u
.ext2_i
.i_dir_acl
= le32_to_cpu(raw_inode
->i_dir_acl
);
696 inode
->u
.ext2_i
.i_dir_acl
= 0;
697 inode
->u
.ext2_i
.i_high_size
=
698 le32_to_cpu(raw_inode
->i_size_high
);
699 #if BITS_PER_LONG < 64
700 if (raw_inode
->i_size_high
)
701 inode
->i_size
= (__u32
)-1;
703 inode
->i_size
|= ((__u64
)le32_to_cpu(raw_inode
->i_size_high
))
707 inode
->i_generation
= le32_to_cpu(raw_inode
->i_generation
);
708 inode
->u
.ext2_i
.i_block_group
= block_group
;
709 inode
->u
.ext2_i
.i_next_alloc_block
= 0;
710 inode
->u
.ext2_i
.i_next_alloc_goal
= 0;
711 if (inode
->u
.ext2_i
.i_prealloc_count
)
712 ext2_error (inode
->i_sb
, "ext2_read_inode",
713 "New inode has non-zero prealloc count!");
716 * NOTE! The in-memory inode i_blocks array is in little-endian order
717 * even on big-endian machines: we do NOT byteswap the block numbers!
719 for (block
= 0; block
< EXT2_N_BLOCKS
; block
++)
720 inode
->u
.ext2_i
.i_data
[block
] = raw_inode
->i_block
[block
];
722 if (inode
->i_ino
== EXT2_ACL_IDX_INO
||
723 inode
->i_ino
== EXT2_ACL_DATA_INO
)
724 /* Nothing to do */ ;
725 else if (S_ISREG(inode
->i_mode
))
726 inode
->i_op
= &ext2_file_inode_operations
;
727 else if (S_ISDIR(inode
->i_mode
))
728 inode
->i_op
= &ext2_dir_inode_operations
;
729 else if (S_ISLNK(inode
->i_mode
))
730 inode
->i_op
= &ext2_symlink_inode_operations
;
732 init_special_inode(inode
, inode
->i_mode
,
733 le32_to_cpu(raw_inode
->i_block
[0]));
735 inode
->i_attr_flags
= 0;
736 if (inode
->u
.ext2_i
.i_flags
& EXT2_SYNC_FL
) {
737 inode
->i_attr_flags
|= ATTR_FLAG_SYNCRONOUS
;
738 inode
->i_flags
|= MS_SYNCHRONOUS
;
740 if (inode
->u
.ext2_i
.i_flags
& EXT2_APPEND_FL
) {
741 inode
->i_attr_flags
|= ATTR_FLAG_APPEND
;
742 inode
->i_flags
|= S_APPEND
;
744 if (inode
->u
.ext2_i
.i_flags
& EXT2_IMMUTABLE_FL
) {
745 inode
->i_attr_flags
|= ATTR_FLAG_IMMUTABLE
;
746 inode
->i_flags
|= S_IMMUTABLE
;
748 if (inode
->u
.ext2_i
.i_flags
& EXT2_NOATIME_FL
) {
749 inode
->i_attr_flags
|= ATTR_FLAG_NOATIME
;
750 inode
->i_flags
|= MS_NOATIME
;
755 make_bad_inode(inode
);
759 static int ext2_update_inode(struct inode
* inode
, int do_sync
)
761 struct buffer_head
* bh
;
762 struct ext2_inode
* raw_inode
;
763 unsigned long block_group
;
764 unsigned long group_desc
;
767 unsigned long offset
;
769 struct ext2_group_desc
* gdp
;
771 if ((inode
->i_ino
!= EXT2_ROOT_INO
&&
772 inode
->i_ino
< EXT2_FIRST_INO(inode
->i_sb
)) ||
773 inode
->i_ino
> le32_to_cpu(inode
->i_sb
->u
.ext2_sb
.s_es
->s_inodes_count
)) {
774 ext2_error (inode
->i_sb
, "ext2_write_inode",
775 "bad inode number: %lu", inode
->i_ino
);
778 block_group
= (inode
->i_ino
- 1) / EXT2_INODES_PER_GROUP(inode
->i_sb
);
779 if (block_group
>= inode
->i_sb
->u
.ext2_sb
.s_groups_count
) {
780 ext2_error (inode
->i_sb
, "ext2_write_inode",
781 "group >= groups count");
784 group_desc
= block_group
>> EXT2_DESC_PER_BLOCK_BITS(inode
->i_sb
);
785 desc
= block_group
& (EXT2_DESC_PER_BLOCK(inode
->i_sb
) - 1);
786 bh
= inode
->i_sb
->u
.ext2_sb
.s_group_desc
[group_desc
];
788 ext2_error (inode
->i_sb
, "ext2_write_inode",
789 "Descriptor not loaded");
792 gdp
= (struct ext2_group_desc
*) bh
->b_data
;
794 * Figure out the offset within the block group inode table
796 offset
= ((inode
->i_ino
- 1) % EXT2_INODES_PER_GROUP(inode
->i_sb
)) *
797 EXT2_INODE_SIZE(inode
->i_sb
);
798 block
= le32_to_cpu(gdp
[desc
].bg_inode_table
) +
799 (offset
>> EXT2_BLOCK_SIZE_BITS(inode
->i_sb
));
800 if (!(bh
= bread (inode
->i_dev
, block
, inode
->i_sb
->s_blocksize
))) {
801 ext2_error (inode
->i_sb
, "ext2_write_inode",
802 "unable to read inode block - "
803 "inode=%lu, block=%lu", inode
->i_ino
, block
);
806 offset
&= EXT2_BLOCK_SIZE(inode
->i_sb
) - 1;
807 raw_inode
= (struct ext2_inode
*) (bh
->b_data
+ offset
);
809 raw_inode
->i_mode
= cpu_to_le16(inode
->i_mode
);
810 raw_inode
->i_uid
= cpu_to_le16(inode
->i_uid
);
811 raw_inode
->i_gid
= cpu_to_le16(inode
->i_gid
);
812 raw_inode
->i_links_count
= cpu_to_le16(inode
->i_nlink
);
813 raw_inode
->i_size
= cpu_to_le32(inode
->i_size
);
814 raw_inode
->i_atime
= cpu_to_le32(inode
->i_atime
);
815 raw_inode
->i_ctime
= cpu_to_le32(inode
->i_ctime
);
816 raw_inode
->i_mtime
= cpu_to_le32(inode
->i_mtime
);
817 raw_inode
->i_blocks
= cpu_to_le32(inode
->i_blocks
);
818 raw_inode
->i_dtime
= cpu_to_le32(inode
->u
.ext2_i
.i_dtime
);
819 raw_inode
->i_flags
= cpu_to_le32(inode
->u
.ext2_i
.i_flags
);
820 raw_inode
->i_faddr
= cpu_to_le32(inode
->u
.ext2_i
.i_faddr
);
821 raw_inode
->i_frag
= inode
->u
.ext2_i
.i_frag_no
;
822 raw_inode
->i_fsize
= inode
->u
.ext2_i
.i_frag_size
;
823 raw_inode
->i_file_acl
= cpu_to_le32(inode
->u
.ext2_i
.i_file_acl
);
824 if (S_ISDIR(inode
->i_mode
))
825 raw_inode
->i_dir_acl
= cpu_to_le32(inode
->u
.ext2_i
.i_dir_acl
);
827 #if BITS_PER_LONG < 64
828 raw_inode
->i_size_high
=
829 cpu_to_le32(inode
->u
.ext2_i
.i_high_size
);
831 raw_inode
->i_size_high
= cpu_to_le32(inode
->i_size
>> 32);
834 raw_inode
->i_generation
= cpu_to_le32(inode
->i_generation
);
835 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
))
836 raw_inode
->i_block
[0] = cpu_to_le32(kdev_t_to_nr(inode
->i_rdev
));
837 else for (block
= 0; block
< EXT2_N_BLOCKS
; block
++)
838 raw_inode
->i_block
[block
] = inode
->u
.ext2_i
.i_data
[block
];
839 mark_buffer_dirty(bh
, 1);
841 ll_rw_block (WRITE
, 1, &bh
);
843 if (buffer_req(bh
) && !buffer_uptodate(bh
)) {
844 printk ("IO error syncing ext2 inode ["
846 bdevname(inode
->i_dev
), inode
->i_ino
);
854 void ext2_write_inode (struct inode
* inode
)
856 ext2_update_inode (inode
, 0);
859 int ext2_sync_inode (struct inode
*inode
)
861 return ext2_update_inode (inode
, 1);
864 int ext2_notify_change(struct dentry
*dentry
, struct iattr
*iattr
)
866 struct inode
*inode
= dentry
->d_inode
;
871 if ((iattr
->ia_attr_flags
&
872 (ATTR_FLAG_APPEND
| ATTR_FLAG_IMMUTABLE
)) ^
873 (inode
->u
.ext2_i
.i_flags
&
874 (EXT2_APPEND_FL
| EXT2_IMMUTABLE_FL
))) {
875 if (!capable(CAP_LINUX_IMMUTABLE
))
877 } else if ((current
->fsuid
!= inode
->i_uid
) && !capable(CAP_FOWNER
))
880 retval
= inode_change_ok(inode
, iattr
);
884 inode_setattr(inode
, iattr
);
886 flags
= iattr
->ia_attr_flags
;
887 if (flags
& ATTR_FLAG_SYNCRONOUS
) {
888 inode
->i_flags
|= MS_SYNCHRONOUS
;
889 inode
->u
.ext2_i
.i_flags
= EXT2_SYNC_FL
;
891 inode
->i_flags
&= ~MS_SYNCHRONOUS
;
892 inode
->u
.ext2_i
.i_flags
&= ~EXT2_SYNC_FL
;
894 if (flags
& ATTR_FLAG_NOATIME
) {
895 inode
->i_flags
|= MS_NOATIME
;
896 inode
->u
.ext2_i
.i_flags
= EXT2_NOATIME_FL
;
898 inode
->i_flags
&= ~MS_NOATIME
;
899 inode
->u
.ext2_i
.i_flags
&= ~EXT2_NOATIME_FL
;
901 if (flags
& ATTR_FLAG_APPEND
) {
902 inode
->i_flags
|= S_APPEND
;
903 inode
->u
.ext2_i
.i_flags
= EXT2_APPEND_FL
;
905 inode
->i_flags
&= ~S_APPEND
;
906 inode
->u
.ext2_i
.i_flags
&= ~EXT2_APPEND_FL
;
908 if (flags
& ATTR_FLAG_IMMUTABLE
) {
909 inode
->i_flags
|= S_IMMUTABLE
;
910 inode
->u
.ext2_i
.i_flags
= EXT2_IMMUTABLE_FL
;
912 inode
->i_flags
&= ~S_IMMUTABLE
;
913 inode
->u
.ext2_i
.i_flags
&= ~EXT2_IMMUTABLE_FL
;
915 mark_inode_dirty(inode
);