Import 2.3.1pre2
[davej-history.git] / fs / ext2 / inode.c
blob693964a8030e6f86006384e3b837149dd2210071
1 /*
2 * linux/fs/ext2/inode.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * from
11 * linux/fs/minix/inode.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * Goal-directed block allocation by Stephen Tweedie
16 * (sct@dcs.ed.ac.uk), 1993, 1998
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
19 * 64-bit file support on 64-bit platforms by Jakub Jelinek
20 * (jj@sunsite.ms.mff.cuni.cz)
23 #include <asm/uaccess.h>
24 #include <asm/system.h>
26 #include <linux/errno.h>
27 #include <linux/fs.h>
28 #include <linux/ext2_fs.h>
29 #include <linux/sched.h>
30 #include <linux/stat.h>
31 #include <linux/string.h>
32 #include <linux/locks.h>
33 #include <linux/mm.h>
35 static int ext2_update_inode(struct inode * inode, int do_sync);
38 * Called at each iput()
40 void ext2_put_inode (struct inode * inode)
42 ext2_discard_prealloc (inode);
46 * Called at the last iput() if i_nlink is zero.
48 void ext2_delete_inode (struct inode * inode)
50 if (inode->i_ino == EXT2_ACL_IDX_INO ||
51 inode->i_ino == EXT2_ACL_DATA_INO)
52 return;
53 inode->u.ext2_i.i_dtime = CURRENT_TIME;
54 mark_inode_dirty(inode);
55 ext2_update_inode(inode, IS_SYNC(inode));
56 inode->i_size = 0;
57 if (inode->i_blocks)
58 ext2_truncate (inode);
59 ext2_free_inode (inode);
62 #define inode_bmap(inode, nr) ((inode)->u.ext2_i.i_data[(nr)])
64 static inline int block_bmap (struct buffer_head * bh, int nr)
66 int tmp;
68 if (!bh)
69 return 0;
70 tmp = le32_to_cpu(((u32 *) bh->b_data)[nr]);
71 brelse (bh);
72 return tmp;
75 /*
76 * ext2_discard_prealloc and ext2_alloc_block are atomic wrt. the
77 * superblock in the same manner as are ext2_free_blocks and
78 * ext2_new_block. We just wait on the super rather than locking it
79 * here, since ext2_new_block will do the necessary locking and we
80 * can't block until then.
82 void ext2_discard_prealloc (struct inode * inode)
84 #ifdef EXT2_PREALLOCATE
85 unsigned short total;
87 if (inode->u.ext2_i.i_prealloc_count) {
88 total = inode->u.ext2_i.i_prealloc_count;
89 inode->u.ext2_i.i_prealloc_count = 0;
90 ext2_free_blocks (inode, inode->u.ext2_i.i_prealloc_block, total);
92 #endif
95 static int ext2_alloc_block (struct inode * inode, unsigned long goal, int * err)
97 #ifdef EXT2FS_DEBUG
98 static unsigned long alloc_hits = 0, alloc_attempts = 0;
99 #endif
100 unsigned long result;
101 struct buffer_head * bh;
103 wait_on_super (inode->i_sb);
105 #ifdef EXT2_PREALLOCATE
106 if (inode->u.ext2_i.i_prealloc_count &&
107 (goal == inode->u.ext2_i.i_prealloc_block ||
108 goal + 1 == inode->u.ext2_i.i_prealloc_block))
110 result = inode->u.ext2_i.i_prealloc_block++;
111 inode->u.ext2_i.i_prealloc_count--;
112 ext2_debug ("preallocation hit (%lu/%lu).\n",
113 ++alloc_hits, ++alloc_attempts);
115 /* It doesn't matter if we block in getblk() since
116 we have already atomically allocated the block, and
117 are only clearing it now. */
118 if (!(bh = getblk (inode->i_sb->s_dev, result,
119 inode->i_sb->s_blocksize))) {
120 ext2_error (inode->i_sb, "ext2_alloc_block",
121 "cannot get block %lu", result);
122 return 0;
124 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
125 mark_buffer_uptodate(bh, 1);
126 mark_buffer_dirty(bh, 1);
127 brelse (bh);
128 } else {
129 ext2_discard_prealloc (inode);
130 ext2_debug ("preallocation miss (%lu/%lu).\n",
131 alloc_hits, ++alloc_attempts);
132 if (S_ISREG(inode->i_mode))
133 result = ext2_new_block (inode, goal,
134 &inode->u.ext2_i.i_prealloc_count,
135 &inode->u.ext2_i.i_prealloc_block, err);
136 else
137 result = ext2_new_block (inode, goal, 0, 0, err);
139 #else
140 result = ext2_new_block (inode, goal, 0, 0, err);
141 #endif
143 return result;
147 int ext2_bmap (struct inode * inode, int block)
149 int i;
150 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
151 int addr_per_block_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
153 if (block < 0) {
154 ext2_warning (inode->i_sb, "ext2_bmap", "block < 0");
155 return 0;
157 if (block >= EXT2_NDIR_BLOCKS + addr_per_block +
158 (1 << (addr_per_block_bits * 2)) +
159 ((1 << (addr_per_block_bits * 2)) << addr_per_block_bits)) {
160 ext2_warning (inode->i_sb, "ext2_bmap", "block > big");
161 return 0;
163 if (block < EXT2_NDIR_BLOCKS)
164 return inode_bmap (inode, block);
165 block -= EXT2_NDIR_BLOCKS;
166 if (block < addr_per_block) {
167 i = inode_bmap (inode, EXT2_IND_BLOCK);
168 if (!i)
169 return 0;
170 return block_bmap (bread (inode->i_dev, i,
171 inode->i_sb->s_blocksize), block);
173 block -= addr_per_block;
174 if (block < (1 << (addr_per_block_bits * 2))) {
175 i = inode_bmap (inode, EXT2_DIND_BLOCK);
176 if (!i)
177 return 0;
178 i = block_bmap (bread (inode->i_dev, i,
179 inode->i_sb->s_blocksize),
180 block >> addr_per_block_bits);
181 if (!i)
182 return 0;
183 return block_bmap (bread (inode->i_dev, i,
184 inode->i_sb->s_blocksize),
185 block & (addr_per_block - 1));
187 block -= (1 << (addr_per_block_bits * 2));
188 i = inode_bmap (inode, EXT2_TIND_BLOCK);
189 if (!i)
190 return 0;
191 i = block_bmap (bread (inode->i_dev, i, inode->i_sb->s_blocksize),
192 block >> (addr_per_block_bits * 2));
193 if (!i)
194 return 0;
195 i = block_bmap (bread (inode->i_dev, i, inode->i_sb->s_blocksize),
196 (block >> addr_per_block_bits) & (addr_per_block - 1));
197 if (!i)
198 return 0;
199 return block_bmap (bread (inode->i_dev, i, inode->i_sb->s_blocksize),
200 block & (addr_per_block - 1));
203 static struct buffer_head * inode_getblk (struct inode * inode, int nr,
204 int create, int new_block, int * err)
206 u32 * p;
207 int tmp, goal = 0;
208 struct buffer_head * result;
209 int blocks = inode->i_sb->s_blocksize / 512;
211 p = inode->u.ext2_i.i_data + nr;
212 repeat:
213 tmp = *p;
214 if (tmp) {
215 struct buffer_head * result = getblk (inode->i_dev, tmp, inode->i_sb->s_blocksize);
216 if (tmp == *p)
217 return result;
218 brelse (result);
219 goto repeat;
221 *err = -EFBIG;
222 if (!create)
223 goto dont_create;
225 /* Check file limits.. */
227 unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
228 if (limit < RLIM_INFINITY) {
229 limit >>= EXT2_BLOCK_SIZE_BITS(inode->i_sb);
230 if (new_block >= limit) {
231 send_sig(SIGXFSZ, current, 0);
232 dont_create:
233 *err = -EFBIG;
234 return NULL;
239 if (inode->u.ext2_i.i_next_alloc_block == new_block)
240 goal = inode->u.ext2_i.i_next_alloc_goal;
242 ext2_debug ("hint = %d,", goal);
244 if (!goal) {
245 for (tmp = nr - 1; tmp >= 0; tmp--) {
246 if (inode->u.ext2_i.i_data[tmp]) {
247 goal = inode->u.ext2_i.i_data[tmp];
248 break;
251 if (!goal)
252 goal = (inode->u.ext2_i.i_block_group *
253 EXT2_BLOCKS_PER_GROUP(inode->i_sb)) +
254 le32_to_cpu(inode->i_sb->u.ext2_sb.s_es->s_first_data_block);
257 ext2_debug ("goal = %d.\n", goal);
259 tmp = ext2_alloc_block (inode, goal, err);
260 if (!tmp)
261 return NULL;
262 result = getblk (inode->i_dev, tmp, inode->i_sb->s_blocksize);
263 if (*p) {
264 ext2_free_blocks (inode, tmp, 1);
265 brelse (result);
266 goto repeat;
268 *p = tmp;
269 inode->u.ext2_i.i_next_alloc_block = new_block;
270 inode->u.ext2_i.i_next_alloc_goal = tmp;
271 inode->i_ctime = CURRENT_TIME;
272 inode->i_blocks += blocks;
273 if (IS_SYNC(inode) || inode->u.ext2_i.i_osync)
274 ext2_sync_inode (inode);
275 else
276 mark_inode_dirty(inode);
277 return result;
280 static struct buffer_head * block_getblk (struct inode * inode,
281 struct buffer_head * bh, int nr,
282 int create, int blocksize,
283 int new_block, int * err)
285 int tmp, goal = 0;
286 u32 * p;
287 struct buffer_head * result;
288 int blocks = inode->i_sb->s_blocksize / 512;
289 unsigned long limit;
291 if (!bh)
292 return NULL;
293 if (!buffer_uptodate(bh)) {
294 ll_rw_block (READ, 1, &bh);
295 wait_on_buffer (bh);
296 if (!buffer_uptodate(bh)) {
297 brelse (bh);
298 return NULL;
301 p = (u32 *) bh->b_data + nr;
302 repeat:
303 tmp = le32_to_cpu(*p);
304 if (tmp) {
305 result = getblk (bh->b_dev, tmp, blocksize);
306 if (tmp == le32_to_cpu(*p)) {
307 brelse (bh);
308 return result;
310 brelse (result);
311 goto repeat;
313 *err = -EFBIG;
314 if (!create) {
315 brelse (bh);
316 return NULL;
319 limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
320 if (limit < RLIM_INFINITY) {
321 limit >>= EXT2_BLOCK_SIZE_BITS(inode->i_sb);
322 if (new_block >= limit) {
323 brelse (bh);
324 send_sig(SIGXFSZ, current, 0);
325 return NULL;
329 if (inode->u.ext2_i.i_next_alloc_block == new_block)
330 goal = inode->u.ext2_i.i_next_alloc_goal;
331 if (!goal) {
332 for (tmp = nr - 1; tmp >= 0; tmp--) {
333 if (le32_to_cpu(((u32 *) bh->b_data)[tmp])) {
334 goal = le32_to_cpu(((u32 *)bh->b_data)[tmp]);
335 break;
338 if (!goal)
339 goal = bh->b_blocknr;
341 tmp = ext2_alloc_block (inode, goal, err);
342 if (!tmp) {
343 brelse (bh);
344 return NULL;
346 result = getblk (bh->b_dev, tmp, blocksize);
347 if (le32_to_cpu(*p)) {
348 ext2_free_blocks (inode, tmp, 1);
349 brelse (result);
350 goto repeat;
352 *p = le32_to_cpu(tmp);
353 mark_buffer_dirty(bh, 1);
354 if (IS_SYNC(inode) || inode->u.ext2_i.i_osync) {
355 ll_rw_block (WRITE, 1, &bh);
356 wait_on_buffer (bh);
358 inode->i_ctime = CURRENT_TIME;
359 inode->i_blocks += blocks;
360 mark_inode_dirty(inode);
361 inode->u.ext2_i.i_next_alloc_block = new_block;
362 inode->u.ext2_i.i_next_alloc_goal = tmp;
363 brelse (bh);
364 return result;
367 struct buffer_head * ext2_getblk (struct inode * inode, long block,
368 int create, int * err)
370 struct buffer_head * bh;
371 unsigned long b;
372 unsigned long addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
373 int addr_per_block_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
375 *err = -EIO;
376 if (block < 0) {
377 ext2_warning (inode->i_sb, "ext2_getblk", "block < 0");
378 return NULL;
380 if (block > EXT2_NDIR_BLOCKS + addr_per_block +
381 (1 << (addr_per_block_bits * 2)) +
382 ((1 << (addr_per_block_bits * 2)) << addr_per_block_bits)) {
383 ext2_warning (inode->i_sb, "ext2_getblk", "block > big");
384 return NULL;
387 * If this is a sequential block allocation, set the next_alloc_block
388 * to this block now so that all the indblock and data block
389 * allocations use the same goal zone
392 ext2_debug ("block %lu, next %lu, goal %lu.\n", block,
393 inode->u.ext2_i.i_next_alloc_block,
394 inode->u.ext2_i.i_next_alloc_goal);
396 if (block == inode->u.ext2_i.i_next_alloc_block + 1) {
397 inode->u.ext2_i.i_next_alloc_block++;
398 inode->u.ext2_i.i_next_alloc_goal++;
401 *err = -ENOSPC;
402 b = block;
403 if (block < EXT2_NDIR_BLOCKS)
404 return inode_getblk (inode, block, create, b, err);
405 block -= EXT2_NDIR_BLOCKS;
406 if (block < addr_per_block) {
407 bh = inode_getblk (inode, EXT2_IND_BLOCK, create, b, err);
408 return block_getblk (inode, bh, block, create,
409 inode->i_sb->s_blocksize, b, err);
411 block -= addr_per_block;
412 if (block < (1 << (addr_per_block_bits * 2))) {
413 bh = inode_getblk (inode, EXT2_DIND_BLOCK, create, b, err);
414 bh = block_getblk (inode, bh, block >> addr_per_block_bits,
415 create, inode->i_sb->s_blocksize, b, err);
416 return block_getblk (inode, bh, block & (addr_per_block - 1),
417 create, inode->i_sb->s_blocksize, b, err);
419 block -= (1 << (addr_per_block_bits * 2));
420 bh = inode_getblk (inode, EXT2_TIND_BLOCK, create, b, err);
421 bh = block_getblk (inode, bh, block >> (addr_per_block_bits * 2),
422 create, inode->i_sb->s_blocksize, b, err);
423 bh = block_getblk (inode, bh, (block >> addr_per_block_bits) & (addr_per_block - 1),
424 create, inode->i_sb->s_blocksize, b, err);
425 return block_getblk (inode, bh, block & (addr_per_block - 1), create,
426 inode->i_sb->s_blocksize, b, err);
429 struct buffer_head * ext2_bread (struct inode * inode, int block,
430 int create, int *err)
432 struct buffer_head * bh;
433 int prev_blocks;
435 prev_blocks = inode->i_blocks;
437 bh = ext2_getblk (inode, block, create, err);
438 if (!bh)
439 return bh;
442 * If the inode has grown, and this is a directory, then perform
443 * preallocation of a few more blocks to try to keep directory
444 * fragmentation down.
446 if (create &&
447 S_ISDIR(inode->i_mode) &&
448 inode->i_blocks > prev_blocks &&
449 EXT2_HAS_COMPAT_FEATURE(inode->i_sb,
450 EXT2_FEATURE_COMPAT_DIR_PREALLOC)) {
451 int i;
452 struct buffer_head *tmp_bh;
454 for (i = 1;
455 i < EXT2_SB(inode->i_sb)->s_es->s_prealloc_dir_blocks;
456 i++) {
458 * ext2_getblk will zero out the contents of the
459 * directory for us
461 tmp_bh = ext2_getblk(inode, block+i, create, err);
462 if (!tmp_bh) {
463 brelse (bh);
464 return 0;
466 brelse (tmp_bh);
470 if (buffer_uptodate(bh))
471 return bh;
472 ll_rw_block (READ, 1, &bh);
473 wait_on_buffer (bh);
474 if (buffer_uptodate(bh))
475 return bh;
476 brelse (bh);
477 *err = -EIO;
478 return NULL;
481 void ext2_read_inode (struct inode * inode)
483 struct buffer_head * bh;
484 struct ext2_inode * raw_inode;
485 unsigned long block_group;
486 unsigned long group_desc;
487 unsigned long desc;
488 unsigned long block;
489 unsigned long offset;
490 struct ext2_group_desc * gdp;
492 if ((inode->i_ino != EXT2_ROOT_INO && inode->i_ino != EXT2_ACL_IDX_INO &&
493 inode->i_ino != EXT2_ACL_DATA_INO &&
494 inode->i_ino < EXT2_FIRST_INO(inode->i_sb)) ||
495 inode->i_ino > le32_to_cpu(inode->i_sb->u.ext2_sb.s_es->s_inodes_count)) {
496 ext2_error (inode->i_sb, "ext2_read_inode",
497 "bad inode number: %lu", inode->i_ino);
498 goto bad_inode;
500 block_group = (inode->i_ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
501 if (block_group >= inode->i_sb->u.ext2_sb.s_groups_count) {
502 ext2_error (inode->i_sb, "ext2_read_inode",
503 "group >= groups count");
504 goto bad_inode;
506 group_desc = block_group >> EXT2_DESC_PER_BLOCK_BITS(inode->i_sb);
507 desc = block_group & (EXT2_DESC_PER_BLOCK(inode->i_sb) - 1);
508 bh = inode->i_sb->u.ext2_sb.s_group_desc[group_desc];
509 if (!bh) {
510 ext2_error (inode->i_sb, "ext2_read_inode",
511 "Descriptor not loaded");
512 goto bad_inode;
515 gdp = (struct ext2_group_desc *) bh->b_data;
517 * Figure out the offset within the block group inode table
519 offset = ((inode->i_ino - 1) % EXT2_INODES_PER_GROUP(inode->i_sb)) *
520 EXT2_INODE_SIZE(inode->i_sb);
521 block = le32_to_cpu(gdp[desc].bg_inode_table) +
522 (offset >> EXT2_BLOCK_SIZE_BITS(inode->i_sb));
523 if (!(bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize))) {
524 ext2_error (inode->i_sb, "ext2_read_inode",
525 "unable to read inode block - "
526 "inode=%lu, block=%lu", inode->i_ino, block);
527 goto bad_inode;
529 offset &= (EXT2_BLOCK_SIZE(inode->i_sb) - 1);
530 raw_inode = (struct ext2_inode *) (bh->b_data + offset);
532 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
533 inode->i_uid = le16_to_cpu(raw_inode->i_uid);
534 inode->i_gid = le16_to_cpu(raw_inode->i_gid);
535 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
536 inode->i_size = le32_to_cpu(raw_inode->i_size);
537 inode->i_atime = le32_to_cpu(raw_inode->i_atime);
538 inode->i_ctime = le32_to_cpu(raw_inode->i_ctime);
539 inode->i_mtime = le32_to_cpu(raw_inode->i_mtime);
540 inode->u.ext2_i.i_dtime = le32_to_cpu(raw_inode->i_dtime);
541 inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat), not the fs block size */
542 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
543 inode->i_version = ++event;
544 inode->u.ext2_i.i_new_inode = 0;
545 inode->u.ext2_i.i_flags = le32_to_cpu(raw_inode->i_flags);
546 inode->u.ext2_i.i_faddr = le32_to_cpu(raw_inode->i_faddr);
547 inode->u.ext2_i.i_frag_no = raw_inode->i_frag;
548 inode->u.ext2_i.i_frag_size = raw_inode->i_fsize;
549 inode->u.ext2_i.i_osync = 0;
550 inode->u.ext2_i.i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
551 if (S_ISDIR(inode->i_mode))
552 inode->u.ext2_i.i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
553 else {
554 inode->u.ext2_i.i_dir_acl = 0;
555 inode->u.ext2_i.i_high_size =
556 le32_to_cpu(raw_inode->i_size_high);
557 #if BITS_PER_LONG < 64
558 if (raw_inode->i_size_high)
559 inode->i_size = (__u32)-1;
560 #else
561 inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high))
562 << 32;
563 #endif
565 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
566 inode->u.ext2_i.i_block_group = block_group;
567 inode->u.ext2_i.i_next_alloc_block = 0;
568 inode->u.ext2_i.i_next_alloc_goal = 0;
569 if (inode->u.ext2_i.i_prealloc_count)
570 ext2_error (inode->i_sb, "ext2_read_inode",
571 "New inode has non-zero prealloc count!");
572 if (S_ISLNK(inode->i_mode) && !inode->i_blocks)
573 for (block = 0; block < EXT2_N_BLOCKS; block++)
574 inode->u.ext2_i.i_data[block] = raw_inode->i_block[block];
575 else for (block = 0; block < EXT2_N_BLOCKS; block++)
576 inode->u.ext2_i.i_data[block] = le32_to_cpu(raw_inode->i_block[block]);
577 if (inode->i_ino == EXT2_ACL_IDX_INO ||
578 inode->i_ino == EXT2_ACL_DATA_INO)
579 /* Nothing to do */ ;
580 else if (S_ISREG(inode->i_mode))
581 inode->i_op = &ext2_file_inode_operations;
582 else if (S_ISDIR(inode->i_mode))
583 inode->i_op = &ext2_dir_inode_operations;
584 else if (S_ISLNK(inode->i_mode))
585 inode->i_op = &ext2_symlink_inode_operations;
586 else
587 init_special_inode(inode, inode->i_mode,
588 le32_to_cpu(raw_inode->i_block[0]));
589 brelse (bh);
590 inode->i_attr_flags = 0;
591 if (inode->u.ext2_i.i_flags & EXT2_SYNC_FL) {
592 inode->i_attr_flags |= ATTR_FLAG_SYNCRONOUS;
593 inode->i_flags |= MS_SYNCHRONOUS;
595 if (inode->u.ext2_i.i_flags & EXT2_APPEND_FL) {
596 inode->i_attr_flags |= ATTR_FLAG_APPEND;
597 inode->i_flags |= S_APPEND;
599 if (inode->u.ext2_i.i_flags & EXT2_IMMUTABLE_FL) {
600 inode->i_attr_flags |= ATTR_FLAG_IMMUTABLE;
601 inode->i_flags |= S_IMMUTABLE;
603 if (inode->u.ext2_i.i_flags & EXT2_NOATIME_FL) {
604 inode->i_attr_flags |= ATTR_FLAG_NOATIME;
605 inode->i_flags |= MS_NOATIME;
607 return;
609 bad_inode:
610 make_bad_inode(inode);
611 return;
614 static int ext2_update_inode(struct inode * inode, int do_sync)
616 struct buffer_head * bh;
617 struct ext2_inode * raw_inode;
618 unsigned long block_group;
619 unsigned long group_desc;
620 unsigned long desc;
621 unsigned long block;
622 unsigned long offset;
623 int err = 0;
624 struct ext2_group_desc * gdp;
626 if ((inode->i_ino != EXT2_ROOT_INO &&
627 inode->i_ino < EXT2_FIRST_INO(inode->i_sb)) ||
628 inode->i_ino > le32_to_cpu(inode->i_sb->u.ext2_sb.s_es->s_inodes_count)) {
629 ext2_error (inode->i_sb, "ext2_write_inode",
630 "bad inode number: %lu", inode->i_ino);
631 return -EIO;
633 block_group = (inode->i_ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
634 if (block_group >= inode->i_sb->u.ext2_sb.s_groups_count) {
635 ext2_error (inode->i_sb, "ext2_write_inode",
636 "group >= groups count");
637 return -EIO;
639 group_desc = block_group >> EXT2_DESC_PER_BLOCK_BITS(inode->i_sb);
640 desc = block_group & (EXT2_DESC_PER_BLOCK(inode->i_sb) - 1);
641 bh = inode->i_sb->u.ext2_sb.s_group_desc[group_desc];
642 if (!bh) {
643 ext2_error (inode->i_sb, "ext2_write_inode",
644 "Descriptor not loaded");
645 return -EIO;
647 gdp = (struct ext2_group_desc *) bh->b_data;
649 * Figure out the offset within the block group inode table
651 offset = ((inode->i_ino - 1) % EXT2_INODES_PER_GROUP(inode->i_sb)) *
652 EXT2_INODE_SIZE(inode->i_sb);
653 block = le32_to_cpu(gdp[desc].bg_inode_table) +
654 (offset >> EXT2_BLOCK_SIZE_BITS(inode->i_sb));
655 if (!(bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize))) {
656 ext2_error (inode->i_sb, "ext2_write_inode",
657 "unable to read inode block - "
658 "inode=%lu, block=%lu", inode->i_ino, block);
659 return -EIO;
661 offset &= EXT2_BLOCK_SIZE(inode->i_sb) - 1;
662 raw_inode = (struct ext2_inode *) (bh->b_data + offset);
664 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
665 raw_inode->i_uid = cpu_to_le16(inode->i_uid);
666 raw_inode->i_gid = cpu_to_le16(inode->i_gid);
667 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
668 raw_inode->i_size = cpu_to_le32(inode->i_size);
669 raw_inode->i_atime = cpu_to_le32(inode->i_atime);
670 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime);
671 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime);
672 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
673 raw_inode->i_dtime = cpu_to_le32(inode->u.ext2_i.i_dtime);
674 raw_inode->i_flags = cpu_to_le32(inode->u.ext2_i.i_flags);
675 raw_inode->i_faddr = cpu_to_le32(inode->u.ext2_i.i_faddr);
676 raw_inode->i_frag = inode->u.ext2_i.i_frag_no;
677 raw_inode->i_fsize = inode->u.ext2_i.i_frag_size;
678 raw_inode->i_file_acl = cpu_to_le32(inode->u.ext2_i.i_file_acl);
679 if (S_ISDIR(inode->i_mode))
680 raw_inode->i_dir_acl = cpu_to_le32(inode->u.ext2_i.i_dir_acl);
681 else {
682 #if BITS_PER_LONG < 64
683 raw_inode->i_size_high =
684 cpu_to_le32(inode->u.ext2_i.i_high_size);
685 #else
686 raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
687 #endif
689 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
690 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
691 raw_inode->i_block[0] = cpu_to_le32(kdev_t_to_nr(inode->i_rdev));
692 else if (S_ISLNK(inode->i_mode) && !inode->i_blocks)
693 for (block = 0; block < EXT2_N_BLOCKS; block++)
694 raw_inode->i_block[block] = inode->u.ext2_i.i_data[block];
695 else for (block = 0; block < EXT2_N_BLOCKS; block++)
696 raw_inode->i_block[block] = cpu_to_le32(inode->u.ext2_i.i_data[block]);
697 mark_buffer_dirty(bh, 1);
698 if (do_sync) {
699 ll_rw_block (WRITE, 1, &bh);
700 wait_on_buffer (bh);
701 if (buffer_req(bh) && !buffer_uptodate(bh)) {
702 printk ("IO error syncing ext2 inode ["
703 "%s:%08lx]\n",
704 bdevname(inode->i_dev), inode->i_ino);
705 err = -EIO;
708 brelse (bh);
709 return err;
712 void ext2_write_inode (struct inode * inode)
714 ext2_update_inode (inode, 0);
717 int ext2_sync_inode (struct inode *inode)
719 return ext2_update_inode (inode, 1);
722 int ext2_notify_change(struct dentry *dentry, struct iattr *iattr)
724 struct inode *inode = dentry->d_inode;
725 int retval;
726 unsigned int flags;
728 retval = -EPERM;
729 if ((iattr->ia_attr_flags &
730 (ATTR_FLAG_APPEND | ATTR_FLAG_IMMUTABLE)) ^
731 (inode->u.ext2_i.i_flags &
732 (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL))) {
733 if (!capable(CAP_LINUX_IMMUTABLE))
734 goto out;
735 } else if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
736 goto out;
738 retval = inode_change_ok(inode, iattr);
739 if (retval != 0)
740 goto out;
742 inode_setattr(inode, iattr);
744 flags = iattr->ia_attr_flags;
745 if (flags & ATTR_FLAG_SYNCRONOUS) {
746 inode->i_flags |= MS_SYNCHRONOUS;
747 inode->u.ext2_i.i_flags = EXT2_SYNC_FL;
748 } else {
749 inode->i_flags &= ~MS_SYNCHRONOUS;
750 inode->u.ext2_i.i_flags &= ~EXT2_SYNC_FL;
752 if (flags & ATTR_FLAG_NOATIME) {
753 inode->i_flags |= MS_NOATIME;
754 inode->u.ext2_i.i_flags = EXT2_NOATIME_FL;
755 } else {
756 inode->i_flags &= ~MS_NOATIME;
757 inode->u.ext2_i.i_flags &= ~EXT2_NOATIME_FL;
759 if (flags & ATTR_FLAG_APPEND) {
760 inode->i_flags |= S_APPEND;
761 inode->u.ext2_i.i_flags = EXT2_APPEND_FL;
762 } else {
763 inode->i_flags &= ~S_APPEND;
764 inode->u.ext2_i.i_flags &= ~EXT2_APPEND_FL;
766 if (flags & ATTR_FLAG_IMMUTABLE) {
767 inode->i_flags |= S_IMMUTABLE;
768 inode->u.ext2_i.i_flags = EXT2_IMMUTABLE_FL;
769 } else {
770 inode->i_flags &= ~S_IMMUTABLE;
771 inode->u.ext2_i.i_flags &= ~EXT2_IMMUTABLE_FL;
773 mark_inode_dirty(inode);
774 out:
775 return retval;