2 * linux/fs/ufs/ufs_dir.c
5 * Adrian Rodriguez (adrian@franklins-tower.rutgers.edu)
6 * Laboratory for Computer Science Research Computing Facility
7 * Rutgers, The State University of New Jersey
9 * swab support by Francois-Rene Rideau <fare@tunes.org> 19970406
11 * 4.4BSD (FreeBSD) support added on February 1st 1998 by
12 * Niels Kristian Bech Jensen <nkbj@image.dk> partially based
13 * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>.
15 * Migration to usage of "page cache" on May 2006 by
16 * Evgeniy Dushistov <dushistov@mail.ru> based on ext2 code base.
19 #include <linux/time.h>
21 #include <linux/ufs_fs.h>
22 #include <linux/smp_lock.h>
28 * NOTE! unlike strncmp, ufs_match returns 1 for success, 0 for failure.
30 * len <= UFS_MAXNAMLEN and de != NULL are guaranteed by caller.
32 static inline int ufs_match(struct super_block
*sb
, int len
,
33 const char * const name
, struct ufs_dir_entry
* de
)
35 if (len
!= ufs_get_de_namlen(sb
, de
))
39 return !memcmp(name
, de
->d_name
, len
);
42 static int ufs_commit_chunk(struct page
*page
, unsigned from
, unsigned to
)
44 struct inode
*dir
= page
->mapping
->host
;
47 page
->mapping
->a_ops
->commit_write(NULL
, page
, from
, to
);
49 err
= write_one_page(page
, 1);
55 static inline void ufs_put_page(struct page
*page
)
58 page_cache_release(page
);
61 static inline unsigned long ufs_dir_pages(struct inode
*inode
)
63 return (inode
->i_size
+PAGE_CACHE_SIZE
-1)>>PAGE_CACHE_SHIFT
;
66 ino_t
ufs_inode_by_name(struct inode
*dir
, struct dentry
*dentry
)
69 struct ufs_dir_entry
*de
;
72 de
= ufs_find_entry(dir
, dentry
, &page
);
74 res
= fs32_to_cpu(dir
->i_sb
, de
->d_ino
);
81 /* Releases the page */
82 void ufs_set_link(struct inode
*dir
, struct ufs_dir_entry
*de
,
83 struct page
*page
, struct inode
*inode
)
85 unsigned from
= (char *) de
- (char *) page_address(page
);
86 unsigned to
= from
+ fs16_to_cpu(dir
->i_sb
, de
->d_reclen
);
90 err
= page
->mapping
->a_ops
->prepare_write(NULL
, page
, from
, to
);
92 de
->d_ino
= cpu_to_fs32(dir
->i_sb
, inode
->i_ino
);
93 ufs_set_de_type(dir
->i_sb
, de
, inode
->i_mode
);
94 err
= ufs_commit_chunk(page
, from
, to
);
96 dir
->i_mtime
= dir
->i_ctime
= CURRENT_TIME_SEC
;
97 mark_inode_dirty(dir
);
101 static void ufs_check_page(struct page
*page
)
103 struct inode
*dir
= page
->mapping
->host
;
104 struct super_block
*sb
= dir
->i_sb
;
105 char *kaddr
= page_address(page
);
106 unsigned offs
, rec_len
;
107 unsigned limit
= PAGE_CACHE_SIZE
;
108 const unsigned chunk_mask
= UFS_SB(sb
)->s_uspi
->s_dirblksize
- 1;
109 struct ufs_dir_entry
*p
;
112 if ((dir
->i_size
>> PAGE_CACHE_SHIFT
) == page
->index
) {
113 limit
= dir
->i_size
& ~PAGE_CACHE_MASK
;
114 if (limit
& chunk_mask
)
119 for (offs
= 0; offs
<= limit
- UFS_DIR_REC_LEN(1); offs
+= rec_len
) {
120 p
= (struct ufs_dir_entry
*)(kaddr
+ offs
);
121 rec_len
= fs16_to_cpu(sb
, p
->d_reclen
);
123 if (rec_len
< UFS_DIR_REC_LEN(1))
127 if (rec_len
< UFS_DIR_REC_LEN(ufs_get_de_namlen(sb
, p
)))
129 if (((offs
+ rec_len
- 1) ^ offs
) & ~chunk_mask
)
131 if (fs32_to_cpu(sb
, p
->d_ino
) > (UFS_SB(sb
)->s_uspi
->s_ipg
*
132 UFS_SB(sb
)->s_uspi
->s_ncg
))
138 SetPageChecked(page
);
141 /* Too bad, we had an error */
144 ufs_error(sb
, "ufs_check_page",
145 "size of directory #%lu is not a multiple of chunk size",
150 error
= "rec_len is smaller than minimal";
153 error
= "unaligned directory entry";
156 error
= "rec_len is too small for name_len";
159 error
= "directory entry across blocks";
162 error
= "inode out of bounds";
164 ufs_error (sb
, "ufs_check_page", "bad entry in directory #%lu: %s - "
165 "offset=%lu, rec_len=%d, name_len=%d",
166 dir
->i_ino
, error
, (page
->index
<<PAGE_CACHE_SHIFT
)+offs
,
167 rec_len
, ufs_get_de_namlen(sb
, p
));
170 p
= (struct ufs_dir_entry
*)(kaddr
+ offs
);
171 ufs_error (sb
, "ext2_check_page",
172 "entry in directory #%lu spans the page boundary"
174 dir
->i_ino
, (page
->index
<<PAGE_CACHE_SHIFT
)+offs
);
176 SetPageChecked(page
);
180 static struct page
*ufs_get_page(struct inode
*dir
, unsigned long n
)
182 struct address_space
*mapping
= dir
->i_mapping
;
183 struct page
*page
= read_cache_page(mapping
, n
,
184 (filler_t
*)mapping
->a_ops
->readpage
, NULL
);
186 wait_on_page_locked(page
);
188 if (!PageUptodate(page
))
190 if (!PageChecked(page
))
191 ufs_check_page(page
);
199 return ERR_PTR(-EIO
);
203 * Return the offset into page `page_nr' of the last valid
204 * byte in that page, plus one.
207 ufs_last_byte(struct inode
*inode
, unsigned long page_nr
)
209 unsigned last_byte
= inode
->i_size
;
211 last_byte
-= page_nr
<< PAGE_CACHE_SHIFT
;
212 if (last_byte
> PAGE_CACHE_SIZE
)
213 last_byte
= PAGE_CACHE_SIZE
;
217 static inline struct ufs_dir_entry
*
218 ufs_next_entry(struct super_block
*sb
, struct ufs_dir_entry
*p
)
220 return (struct ufs_dir_entry
*)((char *)p
+
221 fs16_to_cpu(sb
, p
->d_reclen
));
224 struct ufs_dir_entry
*ufs_dotdot(struct inode
*dir
, struct page
**p
)
226 struct page
*page
= ufs_get_page(dir
, 0);
227 struct ufs_dir_entry
*de
= NULL
;
230 de
= ufs_next_entry(dir
->i_sb
,
231 (struct ufs_dir_entry
*)page_address(page
));
240 * finds an entry in the specified directory with the wanted name. It
241 * returns the page in which the entry was found, and the entry itself
242 * (as a parameter - res_dir). Page is returned mapped and unlocked.
243 * Entry is guaranteed to be valid.
245 struct ufs_dir_entry
*ufs_find_entry(struct inode
*dir
, struct dentry
*dentry
,
246 struct page
**res_page
)
248 struct super_block
*sb
= dir
->i_sb
;
249 const char *name
= dentry
->d_name
.name
;
250 int namelen
= dentry
->d_name
.len
;
251 unsigned reclen
= UFS_DIR_REC_LEN(namelen
);
252 unsigned long start
, n
;
253 unsigned long npages
= ufs_dir_pages(dir
);
254 struct page
*page
= NULL
;
255 struct ufs_inode_info
*ui
= UFS_I(dir
);
256 struct ufs_dir_entry
*de
;
258 UFSD("ENTER, dir_ino %lu, name %s, namlen %u\n", dir
->i_ino
, name
, namelen
);
260 if (npages
== 0 || namelen
> UFS_MAXNAMLEN
)
266 start
= ui
->i_dir_start_lookup
;
273 page
= ufs_get_page(dir
, n
);
275 kaddr
= page_address(page
);
276 de
= (struct ufs_dir_entry
*) kaddr
;
277 kaddr
+= ufs_last_byte(dir
, n
) - reclen
;
278 while ((char *) de
<= kaddr
) {
279 if (de
->d_reclen
== 0) {
280 ufs_error(dir
->i_sb
, __FUNCTION__
,
281 "zero-length directory entry");
285 if (ufs_match(sb
, namelen
, name
, de
))
287 de
= ufs_next_entry(sb
, de
);
293 } while (n
!= start
);
299 ui
->i_dir_start_lookup
= n
;
306 int ufs_add_link(struct dentry
*dentry
, struct inode
*inode
)
308 struct inode
*dir
= dentry
->d_parent
->d_inode
;
309 const char *name
= dentry
->d_name
.name
;
310 int namelen
= dentry
->d_name
.len
;
311 struct super_block
*sb
= dir
->i_sb
;
312 unsigned reclen
= UFS_DIR_REC_LEN(namelen
);
313 const unsigned int chunk_size
= UFS_SB(sb
)->s_uspi
->s_dirblksize
;
314 unsigned short rec_len
, name_len
;
315 struct page
*page
= NULL
;
316 struct ufs_dir_entry
*de
;
317 unsigned long npages
= ufs_dir_pages(dir
);
323 UFSD("ENTER, name %s, namelen %u\n", name
, namelen
);
326 * We take care of directory expansion in the same loop.
327 * This code plays outside i_size, so it locks the page
328 * to protect that region.
330 for (n
= 0; n
<= npages
; n
++) {
333 page
= ufs_get_page(dir
, n
);
338 kaddr
= page_address(page
);
339 dir_end
= kaddr
+ ufs_last_byte(dir
, n
);
340 de
= (struct ufs_dir_entry
*)kaddr
;
341 kaddr
+= PAGE_CACHE_SIZE
- reclen
;
342 while ((char *)de
<= kaddr
) {
343 if ((char *)de
== dir_end
) {
346 rec_len
= chunk_size
;
347 de
->d_reclen
= cpu_to_fs16(sb
, chunk_size
);
351 if (de
->d_reclen
== 0) {
352 ufs_error(dir
->i_sb
, __FUNCTION__
,
353 "zero-length directory entry");
358 if (ufs_match(sb
, namelen
, name
, de
))
360 name_len
= UFS_DIR_REC_LEN(ufs_get_de_namlen(sb
, de
));
361 rec_len
= fs16_to_cpu(sb
, de
->d_reclen
);
362 if (!de
->d_ino
&& rec_len
>= reclen
)
364 if (rec_len
>= name_len
+ reclen
)
366 de
= (struct ufs_dir_entry
*) ((char *) de
+ rec_len
);
375 from
= (char*)de
- (char*)page_address(page
);
377 err
= page
->mapping
->a_ops
->prepare_write(NULL
, page
, from
, to
);
381 struct ufs_dir_entry
*de1
=
382 (struct ufs_dir_entry
*) ((char *) de
+ name_len
);
383 de1
->d_reclen
= cpu_to_fs16(sb
, rec_len
- name_len
);
384 de
->d_reclen
= cpu_to_fs16(sb
, name_len
);
389 ufs_set_de_namlen(sb
, de
, namelen
);
390 memcpy(de
->d_name
, name
, namelen
+ 1);
391 de
->d_ino
= cpu_to_fs32(sb
, inode
->i_ino
);
392 ufs_set_de_type(sb
, de
, inode
->i_mode
);
394 err
= ufs_commit_chunk(page
, from
, to
);
395 dir
->i_mtime
= dir
->i_ctime
= CURRENT_TIME_SEC
;
397 mark_inode_dirty(dir
);
408 static inline unsigned
409 ufs_validate_entry(struct super_block
*sb
, char *base
,
410 unsigned offset
, unsigned mask
)
412 struct ufs_dir_entry
*de
= (struct ufs_dir_entry
*)(base
+ offset
);
413 struct ufs_dir_entry
*p
= (struct ufs_dir_entry
*)(base
+ (offset
&mask
));
414 while ((char*)p
< (char*)de
) {
415 if (p
->d_reclen
== 0)
417 p
= ufs_next_entry(sb
, p
);
419 return (char *)p
- base
;
424 * This is blatantly stolen from ext2fs
427 ufs_readdir(struct file
*filp
, void *dirent
, filldir_t filldir
)
429 loff_t pos
= filp
->f_pos
;
430 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
431 struct super_block
*sb
= inode
->i_sb
;
432 unsigned int offset
= pos
& ~PAGE_CACHE_MASK
;
433 unsigned long n
= pos
>> PAGE_CACHE_SHIFT
;
434 unsigned long npages
= ufs_dir_pages(inode
);
435 unsigned chunk_mask
= ~(UFS_SB(sb
)->s_uspi
->s_dirblksize
- 1);
436 int need_revalidate
= filp
->f_version
!= inode
->i_version
;
437 unsigned flags
= UFS_SB(sb
)->s_flags
;
441 if (pos
> inode
->i_size
- UFS_DIR_REC_LEN(1))
444 for ( ; n
< npages
; n
++, offset
= 0) {
446 struct ufs_dir_entry
*de
;
448 struct page
*page
= ufs_get_page(inode
, n
);
451 ufs_error(sb
, __FUNCTION__
,
454 filp
->f_pos
+= PAGE_CACHE_SIZE
- offset
;
457 kaddr
= page_address(page
);
458 if (unlikely(need_revalidate
)) {
460 offset
= ufs_validate_entry(sb
, kaddr
, offset
, chunk_mask
);
461 filp
->f_pos
= (n
<<PAGE_CACHE_SHIFT
) + offset
;
463 filp
->f_version
= inode
->i_version
;
466 de
= (struct ufs_dir_entry
*)(kaddr
+offset
);
467 limit
= kaddr
+ ufs_last_byte(inode
, n
) - UFS_DIR_REC_LEN(1);
468 for ( ;(char*)de
<= limit
; de
= ufs_next_entry(sb
, de
)) {
469 if (de
->d_reclen
== 0) {
470 ufs_error(sb
, __FUNCTION__
,
471 "zero-length directory entry");
477 unsigned char d_type
= DT_UNKNOWN
;
479 offset
= (char *)de
- kaddr
;
481 UFSD("filldir(%s,%u)\n", de
->d_name
,
482 fs32_to_cpu(sb
, de
->d_ino
));
483 UFSD("namlen %u\n", ufs_get_de_namlen(sb
, de
));
485 if ((flags
& UFS_DE_MASK
) == UFS_DE_44BSD
)
486 d_type
= de
->d_u
.d_44
.d_type
;
488 over
= filldir(dirent
, de
->d_name
,
489 ufs_get_de_namlen(sb
, de
),
490 (n
<<PAGE_CACHE_SHIFT
) | offset
,
491 fs32_to_cpu(sb
, de
->d_ino
), d_type
);
497 filp
->f_pos
+= fs16_to_cpu(sb
, de
->d_reclen
);
506 * ufs_delete_entry deletes a directory entry by merging it with the
509 int ufs_delete_entry(struct inode
*inode
, struct ufs_dir_entry
*dir
,
512 struct super_block
*sb
= inode
->i_sb
;
513 struct address_space
*mapping
= page
->mapping
;
514 char *kaddr
= page_address(page
);
515 unsigned from
= ((char*)dir
- kaddr
) & ~(UFS_SB(sb
)->s_uspi
->s_dirblksize
- 1);
516 unsigned to
= ((char*)dir
- kaddr
) + fs16_to_cpu(sb
, dir
->d_reclen
);
517 struct ufs_dir_entry
*pde
= NULL
;
518 struct ufs_dir_entry
*de
= (struct ufs_dir_entry
*) (kaddr
+ from
);
523 UFSD("ino %u, reclen %u, namlen %u, name %s\n",
524 fs32_to_cpu(sb
, de
->d_ino
),
525 fs16_to_cpu(sb
, de
->d_reclen
),
526 ufs_get_de_namlen(sb
, de
), de
->d_name
);
528 while ((char*)de
< (char*)dir
) {
529 if (de
->d_reclen
== 0) {
530 ufs_error(inode
->i_sb
, __FUNCTION__
,
531 "zero-length directory entry");
536 de
= ufs_next_entry(sb
, de
);
539 from
= (char*)pde
- (char*)page_address(page
);
541 err
= mapping
->a_ops
->prepare_write(NULL
, page
, from
, to
);
544 pde
->d_reclen
= cpu_to_fs16(sb
, to
-from
);
546 err
= ufs_commit_chunk(page
, from
, to
);
547 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME_SEC
;
548 mark_inode_dirty(inode
);
555 int ufs_make_empty(struct inode
* inode
, struct inode
*dir
)
557 struct super_block
* sb
= dir
->i_sb
;
558 struct address_space
*mapping
= inode
->i_mapping
;
559 struct page
*page
= grab_cache_page(mapping
, 0);
560 const unsigned int chunk_size
= UFS_SB(sb
)->s_uspi
->s_dirblksize
;
561 struct ufs_dir_entry
* de
;
568 err
= mapping
->a_ops
->prepare_write(NULL
, page
, 0, chunk_size
);
575 base
= (char*)page_address(page
);
576 memset(base
, 0, PAGE_CACHE_SIZE
);
578 de
= (struct ufs_dir_entry
*) base
;
580 de
->d_ino
= cpu_to_fs32(sb
, inode
->i_ino
);
581 ufs_set_de_type(sb
, de
, inode
->i_mode
);
582 ufs_set_de_namlen(sb
, de
, 1);
583 de
->d_reclen
= cpu_to_fs16(sb
, UFS_DIR_REC_LEN(1));
584 strcpy (de
->d_name
, ".");
585 de
= (struct ufs_dir_entry
*)
586 ((char *)de
+ fs16_to_cpu(sb
, de
->d_reclen
));
587 de
->d_ino
= cpu_to_fs32(sb
, dir
->i_ino
);
588 ufs_set_de_type(sb
, de
, dir
->i_mode
);
589 de
->d_reclen
= cpu_to_fs16(sb
, chunk_size
- UFS_DIR_REC_LEN(1));
590 ufs_set_de_namlen(sb
, de
, 2);
591 strcpy (de
->d_name
, "..");
593 err
= ufs_commit_chunk(page
, 0, chunk_size
);
596 page_cache_release(page
);
601 * routine to check that the specified directory is empty (for rmdir)
603 int ufs_empty_dir(struct inode
* inode
)
605 struct super_block
*sb
= inode
->i_sb
;
606 struct page
*page
= NULL
;
607 unsigned long i
, npages
= ufs_dir_pages(inode
);
609 for (i
= 0; i
< npages
; i
++) {
611 struct ufs_dir_entry
*de
;
612 page
= ufs_get_page(inode
, i
);
617 kaddr
= page_address(page
);
618 de
= (struct ufs_dir_entry
*)kaddr
;
619 kaddr
+= ufs_last_byte(inode
, i
) - UFS_DIR_REC_LEN(1);
621 while ((char *)de
<= kaddr
) {
622 if (de
->d_reclen
== 0) {
623 ufs_error(inode
->i_sb
, __FUNCTION__
,
624 "zero-length directory entry: "
625 "kaddr=%p, de=%p\n", kaddr
, de
);
629 u16 namelen
=ufs_get_de_namlen(sb
, de
);
630 /* check for . and .. */
631 if (de
->d_name
[0] != '.')
637 fs32_to_cpu(sb
, de
->d_ino
))
639 } else if (de
->d_name
[1] != '.')
642 de
= ufs_next_entry(sb
, de
);
653 const struct file_operations ufs_dir_operations
= {
654 .read
= generic_read_dir
,
655 .readdir
= ufs_readdir
,