5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 * linux/fs/ext2/inode.c
12 * Copyright (C) 1992, 1993, 1994, 1995
13 * Remy Card (card@masi.ibp.fr)
14 * Laboratoire MASI - Institut Blaise Pascal
15 * Universite Pierre et Marie Curie (Paris VI)
19 * linux/fs/minix/inode.c
21 * Copyright (C) 1991, 1992 Linus Torvalds
23 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
24 * Big-endian to little-endian byte-swapping/bitmaps by
25 * David S. Miller (davem@caip.rutgers.edu), 1995
28 #include <asm/uaccess.h>
29 #include <asm/system.h>
31 #include <linux/errno.h>
33 #include <linux/ufs_fs.h>
34 #include <linux/sched.h>
35 #include <linux/stat.h>
36 #include <linux/string.h>
37 #include <linux/locks.h>
43 #undef UFS_INODE_DEBUG
44 #undef UFS_INODE_DEBUG_MORE
46 #ifdef UFS_INODE_DEBUG
47 #define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
52 #ifdef UFS_INODE_DEBUG_MORE
53 static void ufs_print_inode(struct inode
* inode
)
55 unsigned swab
= inode
->i_sb
->u
.ufs_sb
.s_swab
;
56 printk("ino %lu mode 0%6.6o nlink %d uid %d uid32 %u"
57 " gid %d gid32 %u size %lu blocks %lu\n",
58 inode
->i_ino
, inode
->i_mode
, inode
->i_nlink
,
59 inode
->i_uid
, inode
->u
.ufs_i
.i_uid
, inode
->i_gid
,
60 inode
->u
.ufs_i
.i_gid
, inode
->i_size
, inode
->i_blocks
);
61 printk(" db <%u %u %u %u %u %u %u %u %u %u %u %u>\n",
62 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[0]),
63 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[1]),
64 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[2]),
65 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[3]),
66 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[4]),
67 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[5]),
68 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[6]),
69 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[7]),
70 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[8]),
71 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[9]),
72 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[10]),
73 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[11]));
74 printk(" gen %u ib <%u %u %u>\n",
76 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[UFS_IND_BLOCK
]),
77 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[UFS_DIND_BLOCK
]),
78 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[UFS_TIND_BLOCK
]));
82 #define ufs_inode_bmap(inode, nr) \
83 (SWAB32((inode)->u.ufs_i.i_u1.i_data[(nr) >> uspi->s_fpbshift]) + ((nr) & uspi->s_fpbmask))
85 static inline unsigned ufs_block_bmap (struct buffer_head
* bh
, unsigned nr
,
86 struct ufs_sb_private_info
* uspi
, unsigned swab
)
90 UFSD(("ENTER, nr %u\n", nr
))
93 tmp
= SWAB32(((u32
*) bh
->b_data
)[nr
>> uspi
->s_fpbshift
]) + (nr
& uspi
->s_fpbmask
);
95 UFSD(("EXIT, result %u\n", tmp
))
99 int ufs_bmap (struct inode
* inode
, int fragment
)
101 struct super_block
* sb
;
102 struct ufs_sb_private_info
* uspi
;
107 uspi
= sb
->u
.ufs_sb
.s_uspi
;
108 swab
= sb
->u
.ufs_sb
.s_swab
;
110 UFSD(("ENTER, ino %lu, fragment %u\n", inode
->i_ino
, fragment
))
112 if (fragment
>= ((UFS_NDADDR
+ uspi
->s_apb
+ uspi
->s_2apb
+ uspi
->s_3apb
) << uspi
->s_fpbshift
)) {
113 ufs_warning (sb
, "ufs_bmap", "block > big");
120 if (fragment
< UFS_NDIR_FRAGMENT
)
121 return (uspi
->s_sbbase
+ ufs_inode_bmap (inode
, fragment
));
126 fragment
-= UFS_NDIR_FRAGMENT
;
127 if (fragment
< (1 << (uspi
->s_apbshift
+ uspi
->s_fpbshift
))) {
128 tmp
= ufs_inode_bmap (inode
,
129 UFS_IND_FRAGMENT
+ (fragment
>> uspi
->s_apbshift
));
132 return (uspi
->s_sbbase
+
133 ufs_block_bmap (bread (sb
->s_dev
, uspi
->s_sbbase
+ tmp
, sb
->s_blocksize
),
134 fragment
& uspi
->s_apbmask
, uspi
, swab
));
140 fragment
-= 1 << (uspi
->s_apbshift
+ uspi
->s_fpbshift
);
141 if (fragment
< (1 << (uspi
->s_2apbshift
+ uspi
->s_fpbshift
))) {
142 tmp
= ufs_inode_bmap (inode
,
143 UFS_DIND_FRAGMENT
+ (fragment
>> uspi
->s_2apbshift
));
146 tmp
= ufs_block_bmap (bread (sb
->s_dev
, uspi
->s_sbbase
+ tmp
, sb
->s_blocksize
),
147 (fragment
>> uspi
->s_apbshift
) & uspi
->s_apbmask
, uspi
, swab
);
150 return (uspi
->s_sbbase
+
151 ufs_block_bmap (bread (sb
->s_dev
, uspi
->s_sbbase
+ tmp
, sb
->s_blocksize
),
152 fragment
& uspi
->s_apbmask
, uspi
, swab
));
158 fragment
-= 1 << (uspi
->s_2apbshift
+ uspi
->s_fpbshift
);
159 tmp
= ufs_inode_bmap (inode
,
160 UFS_TIND_FRAGMENT
+ (fragment
>> uspi
->s_3apbshift
));
163 tmp
= ufs_block_bmap (bread (sb
->s_dev
, uspi
->s_sbbase
+ tmp
, sb
->s_blocksize
),
164 (fragment
>> uspi
->s_2apbshift
) & uspi
->s_apbmask
, uspi
, swab
);
167 tmp
= ufs_block_bmap (bread (sb
->s_dev
, uspi
->s_sbbase
+ tmp
, sb
->s_blocksize
),
168 (fragment
>> uspi
->s_apbshift
) & uspi
->s_apbmask
, uspi
, swab
);
171 return (uspi
->s_sbbase
+
172 ufs_block_bmap (bread (sb
->s_dev
, uspi
->s_sbbase
+ tmp
, sb
->s_blocksize
),
173 fragment
& uspi
->s_apbmask
, uspi
, swab
));
176 static struct buffer_head
* ufs_inode_getfrag (struct inode
* inode
,
177 unsigned fragment
, unsigned new_fragment
, int create
,
178 unsigned required
, int * err
)
180 struct super_block
* sb
;
181 struct ufs_sb_private_info
* uspi
;
182 struct buffer_head
* result
;
184 unsigned block
, blockoff
, lastfrag
, lastblock
, lastblockoff
;
189 UFSD(("ENTER, ino %lu, fragment %u, new_fragment %u, required %u\n",
190 inode
->i_ino
, fragment
, new_fragment
, required
))
193 swab
= sb
->u
.ufs_sb
.s_swab
;
194 uspi
= sb
->u
.ufs_sb
.s_uspi
;
195 block
= ufs_fragstoblks (fragment
);
196 blockoff
= ufs_fragnum (fragment
);
197 p
= inode
->u
.ufs_i
.i_u1
.i_data
+ block
;
202 lastfrag
= inode
->u
.ufs_i
.i_lastfrag
;
203 if (tmp
&& fragment
< lastfrag
) {
204 result
= getblk (sb
->s_dev
, uspi
->s_sbbase
+ tmp
+ blockoff
, sb
->s_blocksize
);
205 if (tmp
== SWAB32(*p
)) {
206 UFSD(("EXIT, result %u\n", tmp
+ blockoff
))
215 limit
= current
->rlim
[RLIMIT_FSIZE
].rlim_cur
;
216 if (limit
< RLIM_INFINITY
) {
217 limit
>>= sb
->s_blocksize_bits
;
218 if (new_fragment
>= limit
) {
219 send_sig(SIGXFSZ
, current
, 0);
223 lastblock
= ufs_fragstoblks (lastfrag
);
224 lastblockoff
= ufs_fragnum (lastfrag
);
226 * We will extend file into new block beyond last allocated block
228 if (lastblock
< block
) {
230 * We must reallocate last allocated block
233 p2
= inode
->u
.ufs_i
.i_u1
.i_data
+ lastblock
;
234 tmp
= ufs_new_fragments (inode
, p2
, lastfrag
,
235 SWAB32(*p2
), uspi
->s_fpb
- lastblockoff
, err
);
237 if (lastfrag
!= inode
->u
.ufs_i
.i_lastfrag
)
242 lastfrag
= inode
->u
.ufs_i
.i_lastfrag
;
245 goal
= SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[lastblock
]) + uspi
->s_fpb
;
246 tmp
= ufs_new_fragments (inode
, p
, fragment
- blockoff
,
247 goal
, required
+ blockoff
, err
);
250 * We will extend last allocated block
252 else if (lastblock
== block
) {
253 tmp
= ufs_new_fragments (inode
, p
, fragment
- (blockoff
- lastblockoff
),
254 SWAB32(*p
), required
+ (blockoff
- lastblockoff
), err
);
257 * We will allocate new block before last allocated block
259 else /* (lastblock > block) */ {
260 if (lastblock
&& (tmp
= SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[lastblock
-1])))
261 goal
= tmp
+ uspi
->s_fpb
;
262 tmp
= ufs_new_fragments (inode
, p
, fragment
- blockoff
,
263 goal
, uspi
->s_fpb
, err
);
266 if ((!blockoff
&& SWAB32(*p
)) ||
267 (blockoff
&& lastfrag
!= inode
->u
.ufs_i
.i_lastfrag
))
272 result
= getblk (inode
->i_dev
, tmp
+ blockoff
, sb
->s_blocksize
);
273 inode
->i_ctime
= CURRENT_TIME
;
275 ufs_sync_inode (inode
);
276 mark_inode_dirty(inode
);
277 UFSD(("EXIT, result %u\n", tmp
+ blockoff
))
281 static struct buffer_head
* ufs_block_getfrag (struct inode
* inode
,
282 struct buffer_head
* bh
, unsigned fragment
, unsigned new_fragment
,
283 int create
, unsigned blocksize
, int * err
)
285 struct super_block
* sb
;
286 struct ufs_sb_private_info
* uspi
;
287 struct buffer_head
* result
;
288 unsigned tmp
, goal
, block
, blockoff
;
293 swab
= sb
->u
.ufs_sb
.s_swab
;
294 uspi
= sb
->u
.ufs_sb
.s_uspi
;
295 block
= ufs_fragstoblks (fragment
);
296 blockoff
= ufs_fragnum (fragment
);
298 UFSD(("ENTER, ino %lu, fragment %u, new_fragment %u\n", inode
->i_ino
, fragment
, new_fragment
))
302 if (!buffer_uptodate(bh
)) {
303 ll_rw_block (READ
, 1, &bh
);
305 if (!buffer_uptodate(bh
)) {
311 p
= (u32
*) bh
->b_data
+ block
;
315 result
= getblk (bh
->b_dev
, uspi
->s_sbbase
+ tmp
+ blockoff
, sb
->s_blocksize
);
316 if (tmp
== SWAB32(*p
)) {
318 UFSD(("EXIT, result %u\n", tmp
+ blockoff
))
324 if (!create
|| new_fragment
>= (current
->rlim
[RLIMIT_FSIZE
].rlim_cur
>> sb
->s_blocksize
)) {
329 if (block
&& (tmp
= SWAB32(((u32
*)bh
->b_data
)[block
-1]) + uspi
->s_fpb
))
330 goal
= tmp
+ uspi
->s_fpb
;
332 goal
= bh
->b_blocknr
+ uspi
->s_fpb
;
333 tmp
= ufs_new_fragments (inode
, p
, ufs_blknum(new_fragment
), goal
, uspi
->s_fpb
, err
);
342 result
= getblk (bh
->b_dev
, tmp
+ blockoff
, sb
->s_blocksize
);
343 mark_buffer_dirty(bh
, 1);
344 if (IS_SYNC(inode
)) {
345 ll_rw_block (WRITE
, 1, &bh
);
348 inode
->i_ctime
= CURRENT_TIME
;
349 mark_inode_dirty(inode
);
351 UFSD(("EXIT, result %u\n", tmp
+ blockoff
))
355 struct buffer_head
* ufs_getfrag (struct inode
* inode
, unsigned fragment
,
356 int create
, int * err
)
358 struct super_block
* sb
;
359 struct ufs_sb_private_info
* uspi
;
360 struct buffer_head
* bh
;
365 uspi
= sb
->u
.ufs_sb
.s_uspi
;
366 swab
= sb
->u
.ufs_sb
.s_swab
;
369 UFSD(("ENTER, ino %lu, fragment %u\n", inode
->i_ino
, fragment
))
370 if (fragment
> ((UFS_NDADDR
+ uspi
->s_apb
+ uspi
->s_2apb
+ uspi
->s_3apb
) << uspi
->s_fpbshift
)) {
371 ufs_warning (sb
, "ufs_getblk", "block > big");
381 if (fragment
< UFS_NDIR_FRAGMENT
)
382 return ufs_inode_getfrag (inode
, fragment
, fragment
, create
, 1, err
);
386 fragment
-= UFS_NDIR_FRAGMENT
;
387 if (fragment
< (1 << (uspi
->s_apbshift
+ uspi
->s_fpbshift
))) {
388 bh
= ufs_inode_getfrag (inode
,
389 UFS_IND_FRAGMENT
+ (fragment
>> uspi
->s_apbshift
),
390 f
, create
, uspi
->s_fpb
, err
);
391 return ufs_block_getfrag (inode
, bh
,
392 fragment
& uspi
->s_apbmask
,
393 f
, create
, sb
->s_blocksize
, err
);
398 fragment
-= 1 << (uspi
->s_apbshift
+ uspi
->s_fpbshift
);
399 if ( fragment
< (1 << (uspi
->s_2apbshift
+ uspi
->s_fpbshift
))) {
400 bh
= ufs_inode_getfrag (inode
,
401 UFS_DIND_FRAGMENT
+ (fragment
>> uspi
->s_2apbshift
),
402 f
, create
, uspi
->s_fpb
, err
);
403 bh
= ufs_block_getfrag (inode
, bh
,
404 (fragment
>> uspi
->s_apbshift
) & uspi
->s_apbmask
,
405 f
, create
, sb
->s_blocksize
, err
);
406 return ufs_block_getfrag (inode
, bh
,
407 fragment
& uspi
->s_apbmask
,
408 f
, create
, sb
->s_blocksize
, err
);
413 fragment
-= 1 << (uspi
->s_2apbshift
+ uspi
->s_fpbshift
);
414 bh
= ufs_inode_getfrag (inode
,
415 UFS_TIND_FRAGMENT
+ (fragment
>> uspi
->s_3apbshift
),
416 f
, create
, uspi
->s_fpb
, err
);
417 bh
= ufs_block_getfrag (inode
, bh
,
418 (fragment
>> uspi
->s_2apbshift
) & uspi
->s_apbmask
,
419 f
, create
, sb
->s_blocksize
, err
);
420 bh
= ufs_block_getfrag (inode
, bh
,
421 (fragment
>> uspi
->s_apbshift
) & uspi
->s_apbmask
,
422 f
, create
, sb
->s_blocksize
, err
);
423 return ufs_block_getfrag (inode
, bh
,
424 fragment
& uspi
->s_apbmask
,
425 f
, create
, sb
->s_blocksize
, err
);
430 struct buffer_head
* ufs_bread (struct inode
* inode
, unsigned fragment
,
431 int create
, int * err
)
433 struct buffer_head
* bh
;
435 UFSD(("ENTER, ino %lu, fragment %u\n", inode
->i_ino
, fragment
))
436 bh
= ufs_getfrag (inode
, fragment
, create
, err
);
437 if (!bh
|| buffer_uptodate(bh
))
439 ll_rw_block (READ
, 1, &bh
);
441 if (buffer_uptodate(bh
))
448 void ufs_read_inode (struct inode
* inode
)
450 struct super_block
* sb
;
451 struct ufs_sb_private_info
* uspi
;
452 struct ufs_inode
* ufs_inode
;
453 struct buffer_head
* bh
;
455 unsigned flags
, swab
;
457 UFSD(("ENTER, ino %lu\n", inode
->i_ino
))
460 uspi
= sb
->u
.ufs_sb
.s_uspi
;
461 flags
= sb
->u
.ufs_sb
.s_flags
;
462 swab
= sb
->u
.ufs_sb
.s_swab
;
464 if (inode
->i_ino
< UFS_ROOTINO
||
465 inode
->i_ino
> (uspi
->s_ncg
* uspi
->s_ipg
)) {
466 ufs_warning (sb
, "ufs_read_inode", "bad inode number (%lu)\n", inode
->i_ino
);
470 bh
= bread (sb
->s_dev
, uspi
->s_sbbase
+ ufs_inotofsba(inode
->i_ino
), sb
->s_blocksize
);
472 ufs_warning (sb
, "ufs_read_inode", "unable to read inode %lu\n", inode
->i_ino
);
475 ufs_inode
= (struct ufs_inode
*) (bh
->b_data
+ sizeof(struct ufs_inode
) * ufs_inotofsbo(inode
->i_ino
));
478 * Copy data to the in-core inode.
480 inode
->i_mode
= SWAB16(ufs_inode
->ui_mode
);
481 inode
->i_nlink
= SWAB16(ufs_inode
->ui_nlink
);
482 if (inode
->i_nlink
== 0)
483 ufs_error (sb
, "ufs_read_inode", "inode %lu has zero nlink\n", inode
->i_ino
);
486 * Linux has only 16-bit uid and gid, so we can't support EFT.
487 * Files are dynamically chown()ed to root.
489 inode
->i_uid
= inode
->u
.ufs_i
.i_uid
= ufs_get_inode_uid(ufs_inode
);
490 inode
->i_gid
= inode
->u
.ufs_i
.i_gid
= ufs_get_inode_gid(ufs_inode
);
491 if (inode
->u
.ufs_i
.i_uid
>= UFS_USEEFT
) {
494 if (inode
->u
.ufs_i
.i_gid
>= UFS_USEEFT
) {
499 * Linux i_size can be 32 on some architectures. We will mark
500 * big files as read only and let user access first 32 bits.
502 inode
->u
.ufs_i
.i_size
= SWAB64(ufs_inode
->ui_size
);
503 inode
->i_size
= (off_t
) inode
->u
.ufs_i
.i_size
;
504 if (sizeof(off_t
) == 4 && (inode
->u
.ufs_i
.i_size
>> 32))
505 inode
->i_size
= (__u32
)-1;
507 inode
->i_atime
= SWAB32(ufs_inode
->ui_atime
.tv_sec
);
508 inode
->i_ctime
= SWAB32(ufs_inode
->ui_ctime
.tv_sec
);
509 inode
->i_mtime
= SWAB32(ufs_inode
->ui_mtime
.tv_sec
);
510 inode
->i_blocks
= SWAB32(ufs_inode
->ui_blocks
);
511 inode
->i_blksize
= PAGE_SIZE
; /* This is the optimal IO size (for stat) */
512 inode
->i_version
= ++event
;
514 inode
->u
.ufs_i
.i_flags
= SWAB32(ufs_inode
->ui_flags
);
515 inode
->u
.ufs_i
.i_gen
= SWAB32(ufs_inode
->ui_gen
);
516 inode
->u
.ufs_i
.i_shadow
= SWAB32(ufs_inode
->ui_u3
.ui_sun
.ui_shadow
);
517 inode
->u
.ufs_i
.i_oeftflag
= SWAB32(ufs_inode
->ui_u3
.ui_sun
.ui_oeftflag
);
518 inode
->u
.ufs_i
.i_lastfrag
= howmany (inode
->i_size
, uspi
->s_fsize
);
520 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
))
522 else if (inode
->i_blocks
) {
523 for (i
= 0; i
< (UFS_NDADDR
+ UFS_NINDIR
); i
++)
524 inode
->u
.ufs_i
.i_u1
.i_data
[i
] = ufs_inode
->ui_u2
.ui_addr
.ui_db
[i
];
527 for (i
= 0; i
< (UFS_NDADDR
+ UFS_NINDIR
) * 4; i
++)
528 inode
->u
.ufs_i
.i_u1
.i_symlink
[i
] = ufs_inode
->ui_u2
.ui_symlink
[i
];
534 if (S_ISREG(inode
->i_mode
))
535 inode
->i_op
= &ufs_file_inode_operations
;
536 else if (S_ISDIR(inode
->i_mode
))
537 inode
->i_op
= &ufs_dir_inode_operations
;
538 else if (S_ISLNK(inode
->i_mode
))
539 inode
->i_op
= &ufs_symlink_inode_operations
;
541 init_special_inode(inode
, inode
->i_mode
,
542 SWAB32(ufs_inode
->ui_u2
.ui_addr
.ui_db
[0]));
546 #ifdef UFS_INODE_DEBUG_MORE
547 ufs_print_inode (inode
);
552 static int ufs_update_inode(struct inode
* inode
, int do_sync
)
554 struct super_block
* sb
;
555 struct ufs_sb_private_info
* uspi
;
556 struct buffer_head
* bh
;
557 struct ufs_inode
* ufs_inode
;
559 unsigned flags
, swab
;
561 UFSD(("ENTER, ino %lu\n", inode
->i_ino
))
564 uspi
= sb
->u
.ufs_sb
.s_uspi
;
565 flags
= sb
->u
.ufs_sb
.s_flags
;
566 swab
= sb
->u
.ufs_sb
.s_swab
;
568 if (inode
->i_ino
< UFS_ROOTINO
||
569 inode
->i_ino
> (uspi
->s_ncg
* uspi
->s_ipg
)) {
570 ufs_warning (sb
, "ufs_read_inode", "bad inode number (%lu)\n", inode
->i_ino
);
574 bh
= bread (sb
->s_dev
, ufs_inotofsba(inode
->i_ino
), sb
->s_blocksize
);
576 ufs_warning (sb
, "ufs_read_inode", "unable to read inode %lu\n", inode
->i_ino
);
579 ufs_inode
= (struct ufs_inode
*) (bh
->b_data
+ ufs_inotofsbo(inode
->i_ino
) * sizeof(struct ufs_inode
));
581 ufs_inode
->ui_mode
= SWAB16(inode
->i_mode
);
582 ufs_inode
->ui_nlink
= SWAB16(inode
->i_nlink
);
584 if (inode
->i_uid
== 0 && inode
->u
.ufs_i
.i_uid
>= UFS_USEEFT
)
585 ufs_set_inode_uid (ufs_inode
, inode
->u
.ufs_i
.i_uid
);
587 ufs_set_inode_uid (ufs_inode
, inode
->i_uid
);
589 if (inode
->i_gid
== 0 && inode
->u
.ufs_i
.i_gid
>= UFS_USEEFT
)
590 ufs_set_inode_gid (ufs_inode
, inode
->u
.ufs_i
.i_gid
);
592 ufs_set_inode_gid (ufs_inode
, inode
->i_gid
);
594 ufs_inode
->ui_size
= SWAB64((u64
)inode
->i_size
);
595 ufs_inode
->ui_atime
.tv_sec
= SWAB32(inode
->i_atime
);
596 ufs_inode
->ui_atime
.tv_usec
= SWAB32(0);
597 ufs_inode
->ui_ctime
.tv_sec
= SWAB32(inode
->i_ctime
);
598 ufs_inode
->ui_ctime
.tv_usec
= SWAB32(0);
599 ufs_inode
->ui_mtime
.tv_sec
= SWAB32(inode
->i_mtime
);
600 ufs_inode
->ui_mtime
.tv_usec
= SWAB32(0);
601 ufs_inode
->ui_blocks
= SWAB32(inode
->i_blocks
);
602 ufs_inode
->ui_flags
= SWAB32(inode
->u
.ufs_i
.i_flags
);
603 ufs_inode
->ui_gen
= SWAB32(inode
->u
.ufs_i
.i_gen
);
605 if ((flags
& UFS_UID_MASK
) == UFS_UID_EFT
) {
606 ufs_inode
->ui_u3
.ui_sun
.ui_shadow
= SWAB32(inode
->u
.ufs_i
.i_shadow
);
607 ufs_inode
->ui_u3
.ui_sun
.ui_oeftflag
= SWAB32(inode
->u
.ufs_i
.i_oeftflag
);
610 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
))
611 ufs_inode
->ui_u2
.ui_addr
.ui_db
[0] = SWAB32(kdev_t_to_nr(inode
->i_rdev
));
612 else if (inode
->i_blocks
) {
613 for (i
= 0; i
< (UFS_NDADDR
+ UFS_NINDIR
); i
++)
614 ufs_inode
->ui_u2
.ui_addr
.ui_db
[i
] = inode
->u
.ufs_i
.i_u1
.i_data
[i
];
617 for (i
= 0; i
< (UFS_NDADDR
+ UFS_NINDIR
) * 4; i
++)
618 ufs_inode
->ui_u2
.ui_symlink
[i
] = inode
->u
.ufs_i
.i_u1
.i_symlink
[i
];
622 memset (ufs_inode
, 0, sizeof(struct ufs_inode
));
624 mark_buffer_dirty(bh
, 1);
626 ll_rw_block (WRITE
, 1, &bh
);
635 void ufs_write_inode (struct inode
* inode
)
637 ufs_update_inode (inode
, 0);
640 int ufs_sync_inode (struct inode
*inode
)
642 return ufs_update_inode (inode
, 1);
645 void ufs_put_inode (struct inode
* inode
)
647 UFSD(("ENTER & EXIT\n"))
650 void ufs_delete_inode (struct inode
* inode
)
652 /*inode->u.ufs_i.i_dtime = CURRENT_TIME;*/
653 mark_inode_dirty(inode
);
654 ufs_update_inode(inode
, IS_SYNC(inode
));
657 ufs_truncate (inode
);
658 ufs_free_inode (inode
);