[JFFS2] Check for all-zero node headers
[linux-2.6/kmemtrace.git] / fs / minix / dir.c
blobcb4cb571fddfc507f20a26b19b341efd8fdb2465
1 /*
2 * linux/fs/minix/dir.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * minix directory handling functions
8 * Updated to filesystem version 3 by Daniel Aragones
9 */
11 #include "minix.h"
12 #include <linux/highmem.h>
13 #include <linux/smp_lock.h>
15 typedef struct minix_dir_entry minix_dirent;
16 typedef struct minix3_dir_entry minix3_dirent;
18 static int minix_readdir(struct file *, void *, filldir_t);
20 const struct file_operations minix_dir_operations = {
21 .read = generic_read_dir,
22 .readdir = minix_readdir,
23 .fsync = minix_sync_file,
26 static inline void dir_put_page(struct page *page)
28 kunmap(page);
29 page_cache_release(page);
33 * Return the offset into page `page_nr' of the last valid
34 * byte in that page, plus one.
36 static unsigned
37 minix_last_byte(struct inode *inode, unsigned long page_nr)
39 unsigned last_byte = PAGE_CACHE_SIZE;
41 if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT))
42 last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1);
43 return last_byte;
46 static inline unsigned long dir_pages(struct inode *inode)
48 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
51 static int dir_commit_chunk(struct page *page, unsigned from, unsigned to)
53 struct inode *dir = (struct inode *)page->mapping->host;
54 int err = 0;
55 page->mapping->a_ops->commit_write(NULL, page, from, to);
56 if (IS_DIRSYNC(dir))
57 err = write_one_page(page, 1);
58 else
59 unlock_page(page);
60 return err;
63 static struct page * dir_get_page(struct inode *dir, unsigned long n)
65 struct address_space *mapping = dir->i_mapping;
66 struct page *page = read_mapping_page(mapping, n, NULL);
67 if (!IS_ERR(page)) {
68 wait_on_page_locked(page);
69 kmap(page);
70 if (!PageUptodate(page))
71 goto fail;
73 return page;
75 fail:
76 dir_put_page(page);
77 return ERR_PTR(-EIO);
80 static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
82 return (void*)((char*)de + sbi->s_dirsize);
85 static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir)
87 unsigned long pos = filp->f_pos;
88 struct inode *inode = filp->f_path.dentry->d_inode;
89 struct super_block *sb = inode->i_sb;
90 unsigned offset = pos & ~PAGE_CACHE_MASK;
91 unsigned long n = pos >> PAGE_CACHE_SHIFT;
92 unsigned long npages = dir_pages(inode);
93 struct minix_sb_info *sbi = minix_sb(sb);
94 unsigned chunk_size = sbi->s_dirsize;
95 char *name;
96 __u32 inumber;
98 lock_kernel();
100 pos = (pos + chunk_size-1) & ~(chunk_size-1);
101 if (pos >= inode->i_size)
102 goto done;
104 for ( ; n < npages; n++, offset = 0) {
105 char *p, *kaddr, *limit;
106 struct page *page = dir_get_page(inode, n);
108 if (IS_ERR(page))
109 continue;
110 kaddr = (char *)page_address(page);
111 p = kaddr+offset;
112 limit = kaddr + minix_last_byte(inode, n) - chunk_size;
113 for ( ; p <= limit; p = minix_next_entry(p, sbi)) {
114 if (sbi->s_version == MINIX_V3) {
115 minix3_dirent *de3 = (minix3_dirent *)p;
116 name = de3->name;
117 inumber = de3->inode;
118 } else {
119 minix_dirent *de = (minix_dirent *)p;
120 name = de->name;
121 inumber = de->inode;
123 if (inumber) {
124 int over;
126 unsigned l = strnlen(name, sbi->s_namelen);
127 offset = p - kaddr;
128 over = filldir(dirent, name, l,
129 (n << PAGE_CACHE_SHIFT) | offset,
130 inumber, DT_UNKNOWN);
131 if (over) {
132 dir_put_page(page);
133 goto done;
137 dir_put_page(page);
140 done:
141 filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
142 unlock_kernel();
143 return 0;
146 static inline int namecompare(int len, int maxlen,
147 const char * name, const char * buffer)
149 if (len < maxlen && buffer[len])
150 return 0;
151 return !memcmp(name, buffer, len);
155 * minix_find_entry()
157 * finds an entry in the specified directory with the wanted name. It
158 * returns the cache buffer in which the entry was found, and the entry
159 * itself (as a parameter - res_dir). It does NOT read the inode of the
160 * entry - you'll have to do that yourself if you want to.
162 minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
164 const char * name = dentry->d_name.name;
165 int namelen = dentry->d_name.len;
166 struct inode * dir = dentry->d_parent->d_inode;
167 struct super_block * sb = dir->i_sb;
168 struct minix_sb_info * sbi = minix_sb(sb);
169 unsigned long n;
170 unsigned long npages = dir_pages(dir);
171 struct page *page = NULL;
172 char *p;
174 char *namx;
175 __u32 inumber;
176 *res_page = NULL;
178 for (n = 0; n < npages; n++) {
179 char *kaddr, *limit;
181 page = dir_get_page(dir, n);
182 if (IS_ERR(page))
183 continue;
185 kaddr = (char*)page_address(page);
186 limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize;
187 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
188 if (sbi->s_version == MINIX_V3) {
189 minix3_dirent *de3 = (minix3_dirent *)p;
190 namx = de3->name;
191 inumber = de3->inode;
192 } else {
193 minix_dirent *de = (minix_dirent *)p;
194 namx = de->name;
195 inumber = de->inode;
197 if (!inumber)
198 continue;
199 if (namecompare(namelen, sbi->s_namelen, name, namx))
200 goto found;
202 dir_put_page(page);
204 return NULL;
206 found:
207 *res_page = page;
208 return (minix_dirent *)p;
211 int minix_add_link(struct dentry *dentry, struct inode *inode)
213 struct inode *dir = dentry->d_parent->d_inode;
214 const char * name = dentry->d_name.name;
215 int namelen = dentry->d_name.len;
216 struct super_block * sb = dir->i_sb;
217 struct minix_sb_info * sbi = minix_sb(sb);
218 struct page *page = NULL;
219 unsigned long npages = dir_pages(dir);
220 unsigned long n;
221 char *kaddr, *p;
222 minix_dirent *de;
223 minix3_dirent *de3;
224 unsigned from, to;
225 int err;
226 char *namx = NULL;
227 __u32 inumber;
230 * We take care of directory expansion in the same loop
231 * This code plays outside i_size, so it locks the page
232 * to protect that region.
234 for (n = 0; n <= npages; n++) {
235 char *limit, *dir_end;
237 page = dir_get_page(dir, n);
238 err = PTR_ERR(page);
239 if (IS_ERR(page))
240 goto out;
241 lock_page(page);
242 kaddr = (char*)page_address(page);
243 dir_end = kaddr + minix_last_byte(dir, n);
244 limit = kaddr + PAGE_CACHE_SIZE - sbi->s_dirsize;
245 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
246 de = (minix_dirent *)p;
247 de3 = (minix3_dirent *)p;
248 if (sbi->s_version == MINIX_V3) {
249 namx = de3->name;
250 inumber = de3->inode;
251 } else {
252 namx = de->name;
253 inumber = de->inode;
255 if (p == dir_end) {
256 /* We hit i_size */
257 if (sbi->s_version == MINIX_V3)
258 de3->inode = 0;
259 else
260 de->inode = 0;
261 goto got_it;
263 if (!inumber)
264 goto got_it;
265 err = -EEXIST;
266 if (namecompare(namelen, sbi->s_namelen, name, namx))
267 goto out_unlock;
269 unlock_page(page);
270 dir_put_page(page);
272 BUG();
273 return -EINVAL;
275 got_it:
276 from = p - (char*)page_address(page);
277 to = from + sbi->s_dirsize;
278 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
279 if (err)
280 goto out_unlock;
281 memcpy (namx, name, namelen);
282 if (sbi->s_version == MINIX_V3) {
283 memset (namx + namelen, 0, sbi->s_dirsize - namelen - 4);
284 de3->inode = inode->i_ino;
285 } else {
286 memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2);
287 de->inode = inode->i_ino;
289 err = dir_commit_chunk(page, from, to);
290 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
291 mark_inode_dirty(dir);
292 out_put:
293 dir_put_page(page);
294 out:
295 return err;
296 out_unlock:
297 unlock_page(page);
298 goto out_put;
301 int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
303 struct address_space *mapping = page->mapping;
304 struct inode *inode = (struct inode*)mapping->host;
305 char *kaddr = page_address(page);
306 unsigned from = (char*)de - kaddr;
307 unsigned to = from + minix_sb(inode->i_sb)->s_dirsize;
308 int err;
310 lock_page(page);
311 err = mapping->a_ops->prepare_write(NULL, page, from, to);
312 if (err == 0) {
313 de->inode = 0;
314 err = dir_commit_chunk(page, from, to);
315 } else {
316 unlock_page(page);
318 dir_put_page(page);
319 inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
320 mark_inode_dirty(inode);
321 return err;
324 int minix_make_empty(struct inode *inode, struct inode *dir)
326 struct address_space *mapping = inode->i_mapping;
327 struct page *page = grab_cache_page(mapping, 0);
328 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
329 char *kaddr;
330 int err;
332 if (!page)
333 return -ENOMEM;
334 err = mapping->a_ops->prepare_write(NULL, page, 0, 2 * sbi->s_dirsize);
335 if (err) {
336 unlock_page(page);
337 goto fail;
340 kaddr = kmap_atomic(page, KM_USER0);
341 memset(kaddr, 0, PAGE_CACHE_SIZE);
343 if (sbi->s_version == MINIX_V3) {
344 minix3_dirent *de3 = (minix3_dirent *)kaddr;
346 de3->inode = inode->i_ino;
347 strcpy(de3->name, ".");
348 de3 = minix_next_entry(de3, sbi);
349 de3->inode = dir->i_ino;
350 strcpy(de3->name, "..");
351 } else {
352 minix_dirent *de = (minix_dirent *)kaddr;
354 de->inode = inode->i_ino;
355 strcpy(de->name, ".");
356 de = minix_next_entry(de, sbi);
357 de->inode = dir->i_ino;
358 strcpy(de->name, "..");
360 kunmap_atomic(kaddr, KM_USER0);
362 err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
363 fail:
364 page_cache_release(page);
365 return err;
369 * routine to check that the specified directory is empty (for rmdir)
371 int minix_empty_dir(struct inode * inode)
373 struct page *page = NULL;
374 unsigned long i, npages = dir_pages(inode);
375 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
376 char *name;
377 __u32 inumber;
379 for (i = 0; i < npages; i++) {
380 char *p, *kaddr, *limit;
382 page = dir_get_page(inode, i);
383 if (IS_ERR(page))
384 continue;
386 kaddr = (char *)page_address(page);
387 limit = kaddr + minix_last_byte(inode, i) - sbi->s_dirsize;
388 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
389 if (sbi->s_version == MINIX_V3) {
390 minix3_dirent *de3 = (minix3_dirent *)p;
391 name = de3->name;
392 inumber = de3->inode;
393 } else {
394 minix_dirent *de = (minix_dirent *)p;
395 name = de->name;
396 inumber = de->inode;
399 if (inumber != 0) {
400 /* check for . and .. */
401 if (name[0] != '.')
402 goto not_empty;
403 if (!name[1]) {
404 if (inumber != inode->i_ino)
405 goto not_empty;
406 } else if (name[1] != '.')
407 goto not_empty;
408 else if (name[2])
409 goto not_empty;
412 dir_put_page(page);
414 return 1;
416 not_empty:
417 dir_put_page(page);
418 return 0;
421 /* Releases the page */
422 void minix_set_link(struct minix_dir_entry *de, struct page *page,
423 struct inode *inode)
425 struct inode *dir = (struct inode*)page->mapping->host;
426 struct minix_sb_info *sbi = minix_sb(dir->i_sb);
427 unsigned from = (char *)de-(char*)page_address(page);
428 unsigned to = from + sbi->s_dirsize;
429 int err;
431 lock_page(page);
432 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
433 if (err == 0) {
434 de->inode = inode->i_ino;
435 err = dir_commit_chunk(page, from, to);
436 } else {
437 unlock_page(page);
439 dir_put_page(page);
440 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
441 mark_inode_dirty(dir);
444 struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p)
446 struct page *page = dir_get_page(dir, 0);
447 struct minix_sb_info *sbi = minix_sb(dir->i_sb);
448 struct minix_dir_entry *de = NULL;
450 if (!IS_ERR(page)) {
451 de = minix_next_entry(page_address(page), sbi);
452 *p = page;
454 return de;
457 ino_t minix_inode_by_name(struct dentry *dentry)
459 struct page *page;
460 struct minix_dir_entry *de = minix_find_entry(dentry, &page);
461 ino_t res = 0;
463 if (de) {
464 res = de->inode;
465 dir_put_page(page);
467 return res;