5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 * linux/fs/ext2/inode.c
12 * Copyright (C) 1992, 1993, 1994, 1995
13 * Remy Card (card@masi.ibp.fr)
14 * Laboratoire MASI - Institut Blaise Pascal
15 * Universite Pierre et Marie Curie (Paris VI)
19 * linux/fs/minix/inode.c
21 * Copyright (C) 1991, 1992 Linus Torvalds
23 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
24 * Big-endian to little-endian byte-swapping/bitmaps by
25 * David S. Miller (davem@caip.rutgers.edu), 1995
28 #include <asm/uaccess.h>
29 #include <asm/system.h>
31 #include <linux/errno.h>
33 #include <linux/time.h>
34 #include <linux/stat.h>
35 #include <linux/string.h>
37 #include <linux/buffer_head.h>
38 #include <linux/writeback.h>
45 static u64
ufs_frag_map(struct inode
*inode
, sector_t frag
, bool needs_lock
);
47 static int ufs_block_to_path(struct inode
*inode
, sector_t i_block
, sector_t offsets
[4])
49 struct ufs_sb_private_info
*uspi
= UFS_SB(inode
->i_sb
)->s_uspi
;
50 int ptrs
= uspi
->s_apb
;
51 int ptrs_bits
= uspi
->s_apbshift
;
52 const long direct_blocks
= UFS_NDADDR
,
53 indirect_blocks
= ptrs
,
54 double_blocks
= (1 << (ptrs_bits
* 2));
58 UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs
,double_blocks
);
59 if (i_block
< direct_blocks
) {
60 offsets
[n
++] = i_block
;
61 } else if ((i_block
-= direct_blocks
) < indirect_blocks
) {
62 offsets
[n
++] = UFS_IND_BLOCK
;
63 offsets
[n
++] = i_block
;
64 } else if ((i_block
-= indirect_blocks
) < double_blocks
) {
65 offsets
[n
++] = UFS_DIND_BLOCK
;
66 offsets
[n
++] = i_block
>> ptrs_bits
;
67 offsets
[n
++] = i_block
& (ptrs
- 1);
68 } else if (((i_block
-= double_blocks
) >> (ptrs_bits
* 2)) < ptrs
) {
69 offsets
[n
++] = UFS_TIND_BLOCK
;
70 offsets
[n
++] = i_block
>> (ptrs_bits
* 2);
71 offsets
[n
++] = (i_block
>> ptrs_bits
) & (ptrs
- 1);
72 offsets
[n
++] = i_block
& (ptrs
- 1);
74 ufs_warning(inode
->i_sb
, "ufs_block_to_path", "block > big");
80 * Returns the location of the fragment from
81 * the beginning of the filesystem.
84 static u64
ufs_frag_map(struct inode
*inode
, sector_t frag
, bool needs_lock
)
86 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
87 struct super_block
*sb
= inode
->i_sb
;
88 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
89 u64 mask
= (u64
) uspi
->s_apbmask
>>uspi
->s_fpbshift
;
90 int shift
= uspi
->s_apbshift
-uspi
->s_fpbshift
;
91 sector_t offsets
[4], *p
;
92 int depth
= ufs_block_to_path(inode
, frag
>> uspi
->s_fpbshift
, offsets
);
96 unsigned flags
= UFS_SB(sb
)->s_flags
;
99 UFSD(": frag = %llu depth = %d\n", (unsigned long long)frag
, depth
);
100 UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
101 uspi
->s_fpbshift
, uspi
->s_apbmask
,
102 (unsigned long long)mask
);
111 if ((flags
& UFS_TYPE_MASK
) == UFS_TYPE_UFS2
)
114 block
= ufsi
->i_u1
.i_data
[*p
++];
118 struct buffer_head
*bh
;
121 bh
= sb_bread(sb
, uspi
->s_sbbase
+ fs32_to_cpu(sb
, block
)+(n
>>shift
));
124 block
= ((__fs32
*) bh
->b_data
)[n
& mask
];
129 ret
= (u64
) (uspi
->s_sbbase
+ fs32_to_cpu(sb
, block
) + (frag
& uspi
->s_fpbmask
));
132 u2_block
= ufsi
->i_u1
.u2_i_data
[*p
++];
138 struct buffer_head
*bh
;
142 temp
= (u64
)(uspi
->s_sbbase
) + fs64_to_cpu(sb
, u2_block
);
143 bh
= sb_bread(sb
, temp
+(u64
) (n
>>shift
));
146 u2_block
= ((__fs64
*)bh
->b_data
)[n
& mask
];
151 temp
= (u64
)uspi
->s_sbbase
+ fs64_to_cpu(sb
, u2_block
);
152 ret
= temp
+ (u64
) (frag
& uspi
->s_fpbmask
);
161 * ufs_inode_getfrag() - allocate new fragment(s)
162 * @inode - pointer to inode
163 * @fragment - number of `fragment' which hold pointer
164 * to new allocated fragment(s)
165 * @new_fragment - number of new allocated fragment(s)
166 * @required - how many fragment(s) we require
167 * @err - we set it if something wrong
168 * @phys - pointer to where we save physical number of new allocated fragments,
169 * NULL if we allocate not data(indirect blocks for example).
170 * @new - we set it if we allocate new block
171 * @locked_page - for ufs_new_fragments()
173 static struct buffer_head
*
174 ufs_inode_getfrag(struct inode
*inode
, u64 fragment
,
175 sector_t new_fragment
, unsigned int required
, int *err
,
176 long *phys
, int *new, struct page
*locked_page
)
178 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
179 struct super_block
*sb
= inode
->i_sb
;
180 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
181 struct buffer_head
* result
;
182 unsigned blockoff
, lastblockoff
;
183 u64 tmp
, goal
, lastfrag
, block
, lastblock
;
186 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, required %u, "
187 "metadata %d\n", inode
->i_ino
, (unsigned long long)fragment
,
188 (unsigned long long)new_fragment
, required
, !phys
);
190 /* TODO : to be done for write support
191 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
195 block
= ufs_fragstoblks (fragment
);
196 blockoff
= ufs_fragnum (fragment
);
197 p
= ufs_get_direct_data_ptr(uspi
, ufsi
, block
);
202 tmp
= ufs_data_ptr_to_cpu(sb
, p
);
204 lastfrag
= ufsi
->i_lastfrag
;
205 if (tmp
&& fragment
< lastfrag
) {
207 result
= sb_getblk(sb
, uspi
->s_sbbase
+ tmp
+ blockoff
);
208 if (tmp
== ufs_data_ptr_to_cpu(sb
, p
)) {
209 UFSD("EXIT, result %llu\n",
210 (unsigned long long)tmp
+ blockoff
);
216 *phys
= uspi
->s_sbbase
+ tmp
+ blockoff
;
221 lastblock
= ufs_fragstoblks (lastfrag
);
222 lastblockoff
= ufs_fragnum (lastfrag
);
224 * We will extend file into new block beyond last allocated block
226 if (lastblock
< block
) {
228 * We must reallocate last allocated block
231 p2
= ufs_get_direct_data_ptr(uspi
, ufsi
, lastblock
);
232 tmp
= ufs_new_fragments(inode
, p2
, lastfrag
,
233 ufs_data_ptr_to_cpu(sb
, p2
),
234 uspi
->s_fpb
- lastblockoff
,
237 if (lastfrag
!= ufsi
->i_lastfrag
)
242 lastfrag
= ufsi
->i_lastfrag
;
245 tmp
= ufs_data_ptr_to_cpu(sb
,
246 ufs_get_direct_data_ptr(uspi
, ufsi
,
249 goal
= tmp
+ uspi
->s_fpb
;
250 tmp
= ufs_new_fragments (inode
, p
, fragment
- blockoff
,
251 goal
, required
+ blockoff
,
253 phys
!= NULL
? locked_page
: NULL
);
254 } else if (lastblock
== block
) {
256 * We will extend last allocated block
258 tmp
= ufs_new_fragments(inode
, p
, fragment
-
259 (blockoff
- lastblockoff
),
260 ufs_data_ptr_to_cpu(sb
, p
),
261 required
+ (blockoff
- lastblockoff
),
262 err
, phys
!= NULL
? locked_page
: NULL
);
263 } else /* (lastblock > block) */ {
265 * We will allocate new block before last allocated block
268 tmp
= ufs_data_ptr_to_cpu(sb
,
269 ufs_get_direct_data_ptr(uspi
, ufsi
, block
- 1));
271 goal
= tmp
+ uspi
->s_fpb
;
273 tmp
= ufs_new_fragments(inode
, p
, fragment
- blockoff
,
274 goal
, uspi
->s_fpb
, err
,
275 phys
!= NULL
? locked_page
: NULL
);
278 if ((!blockoff
&& ufs_data_ptr_to_cpu(sb
, p
)) ||
279 (blockoff
&& lastfrag
!= ufsi
->i_lastfrag
))
286 result
= sb_getblk(sb
, uspi
->s_sbbase
+ tmp
+ blockoff
);
288 *phys
= uspi
->s_sbbase
+ tmp
+ blockoff
;
294 inode
->i_ctime
= CURRENT_TIME_SEC
;
296 ufs_sync_inode (inode
);
297 mark_inode_dirty(inode
);
298 UFSD("EXIT, result %llu\n", (unsigned long long)tmp
+ blockoff
);
301 /* This part : To be implemented ....
302 Required only for writing, not required for READ-ONLY.
305 u2_block = ufs_fragstoblks(fragment);
306 u2_blockoff = ufs_fragnum(fragment);
307 p = ufsi->i_u1.u2_i_data + block;
311 tmp = fs32_to_cpu(sb, *p);
312 lastfrag = ufsi->i_lastfrag;
318 * ufs_inode_getblock() - allocate new block
319 * @inode - pointer to inode
320 * @bh - pointer to block which hold "pointer" to new allocated block
321 * @fragment - number of `fragment' which hold pointer
322 * to new allocated block
323 * @new_fragment - number of new allocated fragment
324 * (block will hold this fragment and also uspi->s_fpb-1)
325 * @err - see ufs_inode_getfrag()
326 * @phys - see ufs_inode_getfrag()
327 * @new - see ufs_inode_getfrag()
328 * @locked_page - see ufs_inode_getfrag()
330 static struct buffer_head
*
331 ufs_inode_getblock(struct inode
*inode
, struct buffer_head
*bh
,
332 u64 fragment
, sector_t new_fragment
, int *err
,
333 long *phys
, int *new, struct page
*locked_page
)
335 struct super_block
*sb
= inode
->i_sb
;
336 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
337 struct buffer_head
* result
;
339 u64 tmp
, goal
, block
;
342 block
= ufs_fragstoblks (fragment
);
343 blockoff
= ufs_fragnum (fragment
);
345 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, metadata %d\n",
346 inode
->i_ino
, (unsigned long long)fragment
,
347 (unsigned long long)new_fragment
, !phys
);
352 if (!buffer_uptodate(bh
)) {
353 ll_rw_block (READ
, 1, &bh
);
355 if (!buffer_uptodate(bh
))
358 if (uspi
->fs_magic
== UFS2_MAGIC
)
359 p
= (__fs64
*)bh
->b_data
+ block
;
361 p
= (__fs32
*)bh
->b_data
+ block
;
363 tmp
= ufs_data_ptr_to_cpu(sb
, p
);
366 result
= sb_getblk(sb
, uspi
->s_sbbase
+ tmp
+ blockoff
);
367 if (tmp
== ufs_data_ptr_to_cpu(sb
, p
))
372 *phys
= uspi
->s_sbbase
+ tmp
+ blockoff
;
377 if (block
&& (uspi
->fs_magic
== UFS2_MAGIC
?
378 (tmp
= fs64_to_cpu(sb
, ((__fs64
*)bh
->b_data
)[block
-1])) :
379 (tmp
= fs32_to_cpu(sb
, ((__fs32
*)bh
->b_data
)[block
-1]))))
380 goal
= tmp
+ uspi
->s_fpb
;
382 goal
= bh
->b_blocknr
+ uspi
->s_fpb
;
383 tmp
= ufs_new_fragments(inode
, p
, ufs_blknum(new_fragment
), goal
,
384 uspi
->s_fpb
, err
, locked_page
);
386 if (ufs_data_ptr_to_cpu(sb
, p
))
393 result
= sb_getblk(sb
, uspi
->s_sbbase
+ tmp
+ blockoff
);
395 *phys
= uspi
->s_sbbase
+ tmp
+ blockoff
;
399 mark_buffer_dirty(bh
);
401 sync_dirty_buffer(bh
);
402 inode
->i_ctime
= CURRENT_TIME_SEC
;
403 mark_inode_dirty(inode
);
404 UFSD("result %llu\n", (unsigned long long)tmp
+ blockoff
);
412 * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
413 * readpage, writepage and so on
416 int ufs_getfrag_block(struct inode
*inode
, sector_t fragment
, struct buffer_head
*bh_result
, int create
)
418 struct super_block
* sb
= inode
->i_sb
;
419 struct ufs_sb_info
* sbi
= UFS_SB(sb
);
420 struct ufs_sb_private_info
* uspi
= sbi
->s_uspi
;
421 struct buffer_head
* bh
;
423 unsigned long ptr
,phys
;
425 bool needs_lock
= (sbi
->mutex_owner
!= current
);
428 phys64
= ufs_frag_map(inode
, fragment
, needs_lock
);
429 UFSD("phys64 = %llu\n", (unsigned long long)phys64
);
431 map_bh(bh_result
, sb
, phys64
);
435 /* This code entered only while writing ....? */
445 UFSD("ENTER, ino %lu, fragment %llu\n", inode
->i_ino
, (unsigned long long)fragment
);
447 ((UFS_NDADDR
+ uspi
->s_apb
+ uspi
->s_2apb
+ uspi
->s_3apb
)
448 << uspi
->s_fpbshift
))
455 * ok, these macros clean the logic up a bit and make
456 * it much more readable:
458 #define GET_INODE_DATABLOCK(x) \
459 ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new,\
461 #define GET_INODE_PTR(x) \
462 ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL,\
464 #define GET_INDIRECT_DATABLOCK(x) \
465 ufs_inode_getblock(inode, bh, x, fragment, \
466 &err, &phys, &new, bh_result->b_page)
467 #define GET_INDIRECT_PTR(x) \
468 ufs_inode_getblock(inode, bh, x, fragment, \
469 &err, NULL, NULL, NULL)
471 if (ptr
< UFS_NDIR_FRAGMENT
) {
472 bh
= GET_INODE_DATABLOCK(ptr
);
475 ptr
-= UFS_NDIR_FRAGMENT
;
476 if (ptr
< (1 << (uspi
->s_apbshift
+ uspi
->s_fpbshift
))) {
477 bh
= GET_INODE_PTR(UFS_IND_FRAGMENT
+ (ptr
>> uspi
->s_apbshift
));
480 ptr
-= 1 << (uspi
->s_apbshift
+ uspi
->s_fpbshift
);
481 if (ptr
< (1 << (uspi
->s_2apbshift
+ uspi
->s_fpbshift
))) {
482 bh
= GET_INODE_PTR(UFS_DIND_FRAGMENT
+ (ptr
>> uspi
->s_2apbshift
));
485 ptr
-= 1 << (uspi
->s_2apbshift
+ uspi
->s_fpbshift
);
486 bh
= GET_INODE_PTR(UFS_TIND_FRAGMENT
+ (ptr
>> uspi
->s_3apbshift
));
487 bh
= GET_INDIRECT_PTR((ptr
>> uspi
->s_2apbshift
) & uspi
->s_apbmask
);
489 bh
= GET_INDIRECT_PTR((ptr
>> uspi
->s_apbshift
) & uspi
->s_apbmask
);
491 bh
= GET_INDIRECT_DATABLOCK(ptr
& uspi
->s_apbmask
);
493 #undef GET_INODE_DATABLOCK
495 #undef GET_INDIRECT_DATABLOCK
496 #undef GET_INDIRECT_PTR
502 set_buffer_new(bh_result
);
503 map_bh(bh_result
, sb
, phys
);
511 ufs_warning(sb
, "ufs_get_block", "block > big");
515 static int ufs_writepage(struct page
*page
, struct writeback_control
*wbc
)
517 return block_write_full_page(page
,ufs_getfrag_block
,wbc
);
520 static int ufs_readpage(struct file
*file
, struct page
*page
)
522 return block_read_full_page(page
,ufs_getfrag_block
);
525 int ufs_prepare_chunk(struct page
*page
, loff_t pos
, unsigned len
)
527 return __block_write_begin(page
, pos
, len
, ufs_getfrag_block
);
530 static int ufs_write_begin(struct file
*file
, struct address_space
*mapping
,
531 loff_t pos
, unsigned len
, unsigned flags
,
532 struct page
**pagep
, void **fsdata
)
536 ret
= block_write_begin(mapping
, pos
, len
, flags
, pagep
,
539 loff_t isize
= mapping
->host
->i_size
;
540 if (pos
+ len
> isize
)
541 vmtruncate(mapping
->host
, isize
);
547 static sector_t
ufs_bmap(struct address_space
*mapping
, sector_t block
)
549 return generic_block_bmap(mapping
,block
,ufs_getfrag_block
);
552 const struct address_space_operations ufs_aops
= {
553 .readpage
= ufs_readpage
,
554 .writepage
= ufs_writepage
,
555 .write_begin
= ufs_write_begin
,
556 .write_end
= generic_write_end
,
560 static void ufs_set_inode_ops(struct inode
*inode
)
562 if (S_ISREG(inode
->i_mode
)) {
563 inode
->i_op
= &ufs_file_inode_operations
;
564 inode
->i_fop
= &ufs_file_operations
;
565 inode
->i_mapping
->a_ops
= &ufs_aops
;
566 } else if (S_ISDIR(inode
->i_mode
)) {
567 inode
->i_op
= &ufs_dir_inode_operations
;
568 inode
->i_fop
= &ufs_dir_operations
;
569 inode
->i_mapping
->a_ops
= &ufs_aops
;
570 } else if (S_ISLNK(inode
->i_mode
)) {
571 if (!inode
->i_blocks
)
572 inode
->i_op
= &ufs_fast_symlink_inode_operations
;
574 inode
->i_op
= &ufs_symlink_inode_operations
;
575 inode
->i_mapping
->a_ops
= &ufs_aops
;
578 init_special_inode(inode
, inode
->i_mode
,
579 ufs_get_inode_dev(inode
->i_sb
, UFS_I(inode
)));
582 static int ufs1_read_inode(struct inode
*inode
, struct ufs_inode
*ufs_inode
)
584 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
585 struct super_block
*sb
= inode
->i_sb
;
589 * Copy data to the in-core inode.
591 inode
->i_mode
= mode
= fs16_to_cpu(sb
, ufs_inode
->ui_mode
);
592 inode
->i_nlink
= fs16_to_cpu(sb
, ufs_inode
->ui_nlink
);
593 if (inode
->i_nlink
== 0) {
594 ufs_error (sb
, "ufs_read_inode", "inode %lu has zero nlink\n", inode
->i_ino
);
599 * Linux now has 32-bit uid and gid, so we can support EFT.
601 inode
->i_uid
= ufs_get_inode_uid(sb
, ufs_inode
);
602 inode
->i_gid
= ufs_get_inode_gid(sb
, ufs_inode
);
604 inode
->i_size
= fs64_to_cpu(sb
, ufs_inode
->ui_size
);
605 inode
->i_atime
.tv_sec
= fs32_to_cpu(sb
, ufs_inode
->ui_atime
.tv_sec
);
606 inode
->i_ctime
.tv_sec
= fs32_to_cpu(sb
, ufs_inode
->ui_ctime
.tv_sec
);
607 inode
->i_mtime
.tv_sec
= fs32_to_cpu(sb
, ufs_inode
->ui_mtime
.tv_sec
);
608 inode
->i_mtime
.tv_nsec
= 0;
609 inode
->i_atime
.tv_nsec
= 0;
610 inode
->i_ctime
.tv_nsec
= 0;
611 inode
->i_blocks
= fs32_to_cpu(sb
, ufs_inode
->ui_blocks
);
612 inode
->i_generation
= fs32_to_cpu(sb
, ufs_inode
->ui_gen
);
613 ufsi
->i_flags
= fs32_to_cpu(sb
, ufs_inode
->ui_flags
);
614 ufsi
->i_shadow
= fs32_to_cpu(sb
, ufs_inode
->ui_u3
.ui_sun
.ui_shadow
);
615 ufsi
->i_oeftflag
= fs32_to_cpu(sb
, ufs_inode
->ui_u3
.ui_sun
.ui_oeftflag
);
618 if (S_ISCHR(mode
) || S_ISBLK(mode
) || inode
->i_blocks
) {
619 memcpy(ufsi
->i_u1
.i_data
, &ufs_inode
->ui_u2
.ui_addr
,
620 sizeof(ufs_inode
->ui_u2
.ui_addr
));
622 memcpy(ufsi
->i_u1
.i_symlink
, ufs_inode
->ui_u2
.ui_symlink
,
623 sizeof(ufs_inode
->ui_u2
.ui_symlink
) - 1);
624 ufsi
->i_u1
.i_symlink
[sizeof(ufs_inode
->ui_u2
.ui_symlink
) - 1] = 0;
629 static int ufs2_read_inode(struct inode
*inode
, struct ufs2_inode
*ufs2_inode
)
631 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
632 struct super_block
*sb
= inode
->i_sb
;
635 UFSD("Reading ufs2 inode, ino %lu\n", inode
->i_ino
);
637 * Copy data to the in-core inode.
639 inode
->i_mode
= mode
= fs16_to_cpu(sb
, ufs2_inode
->ui_mode
);
640 inode
->i_nlink
= fs16_to_cpu(sb
, ufs2_inode
->ui_nlink
);
641 if (inode
->i_nlink
== 0) {
642 ufs_error (sb
, "ufs_read_inode", "inode %lu has zero nlink\n", inode
->i_ino
);
647 * Linux now has 32-bit uid and gid, so we can support EFT.
649 inode
->i_uid
= fs32_to_cpu(sb
, ufs2_inode
->ui_uid
);
650 inode
->i_gid
= fs32_to_cpu(sb
, ufs2_inode
->ui_gid
);
652 inode
->i_size
= fs64_to_cpu(sb
, ufs2_inode
->ui_size
);
653 inode
->i_atime
.tv_sec
= fs64_to_cpu(sb
, ufs2_inode
->ui_atime
);
654 inode
->i_ctime
.tv_sec
= fs64_to_cpu(sb
, ufs2_inode
->ui_ctime
);
655 inode
->i_mtime
.tv_sec
= fs64_to_cpu(sb
, ufs2_inode
->ui_mtime
);
656 inode
->i_atime
.tv_nsec
= fs32_to_cpu(sb
, ufs2_inode
->ui_atimensec
);
657 inode
->i_ctime
.tv_nsec
= fs32_to_cpu(sb
, ufs2_inode
->ui_ctimensec
);
658 inode
->i_mtime
.tv_nsec
= fs32_to_cpu(sb
, ufs2_inode
->ui_mtimensec
);
659 inode
->i_blocks
= fs64_to_cpu(sb
, ufs2_inode
->ui_blocks
);
660 inode
->i_generation
= fs32_to_cpu(sb
, ufs2_inode
->ui_gen
);
661 ufsi
->i_flags
= fs32_to_cpu(sb
, ufs2_inode
->ui_flags
);
663 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
664 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
667 if (S_ISCHR(mode
) || S_ISBLK(mode
) || inode
->i_blocks
) {
668 memcpy(ufsi
->i_u1
.u2_i_data
, &ufs2_inode
->ui_u2
.ui_addr
,
669 sizeof(ufs2_inode
->ui_u2
.ui_addr
));
671 memcpy(ufsi
->i_u1
.i_symlink
, ufs2_inode
->ui_u2
.ui_symlink
,
672 sizeof(ufs2_inode
->ui_u2
.ui_symlink
) - 1);
673 ufsi
->i_u1
.i_symlink
[sizeof(ufs2_inode
->ui_u2
.ui_symlink
) - 1] = 0;
678 struct inode
*ufs_iget(struct super_block
*sb
, unsigned long ino
)
680 struct ufs_inode_info
*ufsi
;
681 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
682 struct buffer_head
* bh
;
686 UFSD("ENTER, ino %lu\n", ino
);
688 if (ino
< UFS_ROOTINO
|| ino
> (uspi
->s_ncg
* uspi
->s_ipg
)) {
689 ufs_warning(sb
, "ufs_read_inode", "bad inode number (%lu)\n",
691 return ERR_PTR(-EIO
);
694 inode
= iget_locked(sb
, ino
);
696 return ERR_PTR(-ENOMEM
);
697 if (!(inode
->i_state
& I_NEW
))
702 bh
= sb_bread(sb
, uspi
->s_sbbase
+ ufs_inotofsba(inode
->i_ino
));
704 ufs_warning(sb
, "ufs_read_inode", "unable to read inode %lu\n",
708 if ((UFS_SB(sb
)->s_flags
& UFS_TYPE_MASK
) == UFS_TYPE_UFS2
) {
709 struct ufs2_inode
*ufs2_inode
= (struct ufs2_inode
*)bh
->b_data
;
711 err
= ufs2_read_inode(inode
,
712 ufs2_inode
+ ufs_inotofsbo(inode
->i_ino
));
714 struct ufs_inode
*ufs_inode
= (struct ufs_inode
*)bh
->b_data
;
716 err
= ufs1_read_inode(inode
,
717 ufs_inode
+ ufs_inotofsbo(inode
->i_ino
));
724 (inode
->i_size
+ uspi
->s_fsize
- 1) >> uspi
->s_fshift
;
725 ufsi
->i_dir_start_lookup
= 0;
728 ufs_set_inode_ops(inode
);
733 unlock_new_inode(inode
);
738 return ERR_PTR(-EIO
);
741 static void ufs1_update_inode(struct inode
*inode
, struct ufs_inode
*ufs_inode
)
743 struct super_block
*sb
= inode
->i_sb
;
744 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
746 ufs_inode
->ui_mode
= cpu_to_fs16(sb
, inode
->i_mode
);
747 ufs_inode
->ui_nlink
= cpu_to_fs16(sb
, inode
->i_nlink
);
749 ufs_set_inode_uid(sb
, ufs_inode
, inode
->i_uid
);
750 ufs_set_inode_gid(sb
, ufs_inode
, inode
->i_gid
);
752 ufs_inode
->ui_size
= cpu_to_fs64(sb
, inode
->i_size
);
753 ufs_inode
->ui_atime
.tv_sec
= cpu_to_fs32(sb
, inode
->i_atime
.tv_sec
);
754 ufs_inode
->ui_atime
.tv_usec
= 0;
755 ufs_inode
->ui_ctime
.tv_sec
= cpu_to_fs32(sb
, inode
->i_ctime
.tv_sec
);
756 ufs_inode
->ui_ctime
.tv_usec
= 0;
757 ufs_inode
->ui_mtime
.tv_sec
= cpu_to_fs32(sb
, inode
->i_mtime
.tv_sec
);
758 ufs_inode
->ui_mtime
.tv_usec
= 0;
759 ufs_inode
->ui_blocks
= cpu_to_fs32(sb
, inode
->i_blocks
);
760 ufs_inode
->ui_flags
= cpu_to_fs32(sb
, ufsi
->i_flags
);
761 ufs_inode
->ui_gen
= cpu_to_fs32(sb
, inode
->i_generation
);
763 if ((UFS_SB(sb
)->s_flags
& UFS_UID_MASK
) == UFS_UID_EFT
) {
764 ufs_inode
->ui_u3
.ui_sun
.ui_shadow
= cpu_to_fs32(sb
, ufsi
->i_shadow
);
765 ufs_inode
->ui_u3
.ui_sun
.ui_oeftflag
= cpu_to_fs32(sb
, ufsi
->i_oeftflag
);
768 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
769 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
770 ufs_inode
->ui_u2
.ui_addr
.ui_db
[0] = ufsi
->i_u1
.i_data
[0];
771 } else if (inode
->i_blocks
) {
772 memcpy(&ufs_inode
->ui_u2
.ui_addr
, ufsi
->i_u1
.i_data
,
773 sizeof(ufs_inode
->ui_u2
.ui_addr
));
776 memcpy(&ufs_inode
->ui_u2
.ui_symlink
, ufsi
->i_u1
.i_symlink
,
777 sizeof(ufs_inode
->ui_u2
.ui_symlink
));
781 memset (ufs_inode
, 0, sizeof(struct ufs_inode
));
784 static void ufs2_update_inode(struct inode
*inode
, struct ufs2_inode
*ufs_inode
)
786 struct super_block
*sb
= inode
->i_sb
;
787 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
790 ufs_inode
->ui_mode
= cpu_to_fs16(sb
, inode
->i_mode
);
791 ufs_inode
->ui_nlink
= cpu_to_fs16(sb
, inode
->i_nlink
);
793 ufs_inode
->ui_uid
= cpu_to_fs32(sb
, inode
->i_uid
);
794 ufs_inode
->ui_gid
= cpu_to_fs32(sb
, inode
->i_gid
);
796 ufs_inode
->ui_size
= cpu_to_fs64(sb
, inode
->i_size
);
797 ufs_inode
->ui_atime
= cpu_to_fs64(sb
, inode
->i_atime
.tv_sec
);
798 ufs_inode
->ui_atimensec
= cpu_to_fs32(sb
, inode
->i_atime
.tv_nsec
);
799 ufs_inode
->ui_ctime
= cpu_to_fs64(sb
, inode
->i_ctime
.tv_sec
);
800 ufs_inode
->ui_ctimensec
= cpu_to_fs32(sb
, inode
->i_ctime
.tv_nsec
);
801 ufs_inode
->ui_mtime
= cpu_to_fs64(sb
, inode
->i_mtime
.tv_sec
);
802 ufs_inode
->ui_mtimensec
= cpu_to_fs32(sb
, inode
->i_mtime
.tv_nsec
);
804 ufs_inode
->ui_blocks
= cpu_to_fs64(sb
, inode
->i_blocks
);
805 ufs_inode
->ui_flags
= cpu_to_fs32(sb
, ufsi
->i_flags
);
806 ufs_inode
->ui_gen
= cpu_to_fs32(sb
, inode
->i_generation
);
808 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
809 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
810 ufs_inode
->ui_u2
.ui_addr
.ui_db
[0] = ufsi
->i_u1
.u2_i_data
[0];
811 } else if (inode
->i_blocks
) {
812 memcpy(&ufs_inode
->ui_u2
.ui_addr
, ufsi
->i_u1
.u2_i_data
,
813 sizeof(ufs_inode
->ui_u2
.ui_addr
));
815 memcpy(&ufs_inode
->ui_u2
.ui_symlink
, ufsi
->i_u1
.i_symlink
,
816 sizeof(ufs_inode
->ui_u2
.ui_symlink
));
820 memset (ufs_inode
, 0, sizeof(struct ufs2_inode
));
824 static int ufs_update_inode(struct inode
* inode
, int do_sync
)
826 struct super_block
*sb
= inode
->i_sb
;
827 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
828 struct buffer_head
* bh
;
830 UFSD("ENTER, ino %lu\n", inode
->i_ino
);
832 if (inode
->i_ino
< UFS_ROOTINO
||
833 inode
->i_ino
> (uspi
->s_ncg
* uspi
->s_ipg
)) {
834 ufs_warning (sb
, "ufs_read_inode", "bad inode number (%lu)\n", inode
->i_ino
);
838 bh
= sb_bread(sb
, ufs_inotofsba(inode
->i_ino
));
840 ufs_warning (sb
, "ufs_read_inode", "unable to read inode %lu\n", inode
->i_ino
);
843 if (uspi
->fs_magic
== UFS2_MAGIC
) {
844 struct ufs2_inode
*ufs2_inode
= (struct ufs2_inode
*)bh
->b_data
;
846 ufs2_update_inode(inode
,
847 ufs2_inode
+ ufs_inotofsbo(inode
->i_ino
));
849 struct ufs_inode
*ufs_inode
= (struct ufs_inode
*) bh
->b_data
;
851 ufs1_update_inode(inode
, ufs_inode
+ ufs_inotofsbo(inode
->i_ino
));
854 mark_buffer_dirty(bh
);
856 sync_dirty_buffer(bh
);
863 int ufs_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
866 lock_ufs(inode
->i_sb
);
867 ret
= ufs_update_inode(inode
, wbc
->sync_mode
== WB_SYNC_ALL
);
868 unlock_ufs(inode
->i_sb
);
872 int ufs_sync_inode (struct inode
*inode
)
874 return ufs_update_inode (inode
, 1);
877 void ufs_evict_inode(struct inode
* inode
)
881 if (!inode
->i_nlink
&& !is_bad_inode(inode
))
884 truncate_inode_pages(&inode
->i_data
, 0);
887 /*UFS_I(inode)->i_dtime = CURRENT_TIME;*/
888 lock_ufs(inode
->i_sb
);
889 mark_inode_dirty(inode
);
890 ufs_update_inode(inode
, IS_SYNC(inode
));
891 old_i_size
= inode
->i_size
;
893 if (inode
->i_blocks
&& ufs_truncate(inode
, old_i_size
))
894 ufs_warning(inode
->i_sb
, __func__
, "ufs_truncate failed\n");
895 unlock_ufs(inode
->i_sb
);
898 invalidate_inode_buffers(inode
);
899 end_writeback(inode
);
902 lock_ufs(inode
->i_sb
);
903 ufs_free_inode (inode
);
904 unlock_ufs(inode
->i_sb
);