ACPI: thinkpad-acpi: keep track of module state
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / minix / dir.c
blobe207cbe709512f4ad7c03bde69a63df3c3fcd905
1 /*
2 * linux/fs/minix/dir.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * minix directory handling functions
8 * Updated to filesystem version 3 by Daniel Aragones
9 */
11 #include "minix.h"
12 #include <linux/highmem.h>
13 #include <linux/smp_lock.h>
15 typedef struct minix_dir_entry minix_dirent;
16 typedef struct minix3_dir_entry minix3_dirent;
18 static int minix_readdir(struct file *, void *, filldir_t);
20 const struct file_operations minix_dir_operations = {
21 .read = generic_read_dir,
22 .readdir = minix_readdir,
23 .fsync = minix_sync_file,
26 static inline void dir_put_page(struct page *page)
28 kunmap(page);
29 page_cache_release(page);
33 * Return the offset into page `page_nr' of the last valid
34 * byte in that page, plus one.
36 static unsigned
37 minix_last_byte(struct inode *inode, unsigned long page_nr)
39 unsigned last_byte = PAGE_CACHE_SIZE;
41 if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT))
42 last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1);
43 return last_byte;
46 static inline unsigned long dir_pages(struct inode *inode)
48 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
51 static int dir_commit_chunk(struct page *page, unsigned from, unsigned to)
53 struct inode *dir = (struct inode *)page->mapping->host;
54 int err = 0;
55 page->mapping->a_ops->commit_write(NULL, page, from, to);
56 if (IS_DIRSYNC(dir))
57 err = write_one_page(page, 1);
58 else
59 unlock_page(page);
60 return err;
63 static struct page * dir_get_page(struct inode *dir, unsigned long n)
65 struct address_space *mapping = dir->i_mapping;
66 struct page *page = read_mapping_page(mapping, n, NULL);
67 if (!IS_ERR(page)) {
68 kmap(page);
69 if (!PageUptodate(page))
70 goto fail;
72 return page;
74 fail:
75 dir_put_page(page);
76 return ERR_PTR(-EIO);
79 static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
81 return (void*)((char*)de + sbi->s_dirsize);
84 static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir)
86 unsigned long pos = filp->f_pos;
87 struct inode *inode = filp->f_path.dentry->d_inode;
88 struct super_block *sb = inode->i_sb;
89 unsigned offset = pos & ~PAGE_CACHE_MASK;
90 unsigned long n = pos >> PAGE_CACHE_SHIFT;
91 unsigned long npages = dir_pages(inode);
92 struct minix_sb_info *sbi = minix_sb(sb);
93 unsigned chunk_size = sbi->s_dirsize;
94 char *name;
95 __u32 inumber;
97 lock_kernel();
99 pos = (pos + chunk_size-1) & ~(chunk_size-1);
100 if (pos >= inode->i_size)
101 goto done;
103 for ( ; n < npages; n++, offset = 0) {
104 char *p, *kaddr, *limit;
105 struct page *page = dir_get_page(inode, n);
107 if (IS_ERR(page))
108 continue;
109 kaddr = (char *)page_address(page);
110 p = kaddr+offset;
111 limit = kaddr + minix_last_byte(inode, n) - chunk_size;
112 for ( ; p <= limit; p = minix_next_entry(p, sbi)) {
113 if (sbi->s_version == MINIX_V3) {
114 minix3_dirent *de3 = (minix3_dirent *)p;
115 name = de3->name;
116 inumber = de3->inode;
117 } else {
118 minix_dirent *de = (minix_dirent *)p;
119 name = de->name;
120 inumber = de->inode;
122 if (inumber) {
123 int over;
125 unsigned l = strnlen(name, sbi->s_namelen);
126 offset = p - kaddr;
127 over = filldir(dirent, name, l,
128 (n << PAGE_CACHE_SHIFT) | offset,
129 inumber, DT_UNKNOWN);
130 if (over) {
131 dir_put_page(page);
132 goto done;
136 dir_put_page(page);
139 done:
140 filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
141 unlock_kernel();
142 return 0;
145 static inline int namecompare(int len, int maxlen,
146 const char * name, const char * buffer)
148 if (len < maxlen && buffer[len])
149 return 0;
150 return !memcmp(name, buffer, len);
154 * minix_find_entry()
156 * finds an entry in the specified directory with the wanted name. It
157 * returns the cache buffer in which the entry was found, and the entry
158 * itself (as a parameter - res_dir). It does NOT read the inode of the
159 * entry - you'll have to do that yourself if you want to.
161 minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
163 const char * name = dentry->d_name.name;
164 int namelen = dentry->d_name.len;
165 struct inode * dir = dentry->d_parent->d_inode;
166 struct super_block * sb = dir->i_sb;
167 struct minix_sb_info * sbi = minix_sb(sb);
168 unsigned long n;
169 unsigned long npages = dir_pages(dir);
170 struct page *page = NULL;
171 char *p;
173 char *namx;
174 __u32 inumber;
175 *res_page = NULL;
177 for (n = 0; n < npages; n++) {
178 char *kaddr, *limit;
180 page = dir_get_page(dir, n);
181 if (IS_ERR(page))
182 continue;
184 kaddr = (char*)page_address(page);
185 limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize;
186 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
187 if (sbi->s_version == MINIX_V3) {
188 minix3_dirent *de3 = (minix3_dirent *)p;
189 namx = de3->name;
190 inumber = de3->inode;
191 } else {
192 minix_dirent *de = (minix_dirent *)p;
193 namx = de->name;
194 inumber = de->inode;
196 if (!inumber)
197 continue;
198 if (namecompare(namelen, sbi->s_namelen, name, namx))
199 goto found;
201 dir_put_page(page);
203 return NULL;
205 found:
206 *res_page = page;
207 return (minix_dirent *)p;
210 int minix_add_link(struct dentry *dentry, struct inode *inode)
212 struct inode *dir = dentry->d_parent->d_inode;
213 const char * name = dentry->d_name.name;
214 int namelen = dentry->d_name.len;
215 struct super_block * sb = dir->i_sb;
216 struct minix_sb_info * sbi = minix_sb(sb);
217 struct page *page = NULL;
218 unsigned long npages = dir_pages(dir);
219 unsigned long n;
220 char *kaddr, *p;
221 minix_dirent *de;
222 minix3_dirent *de3;
223 unsigned from, to;
224 int err;
225 char *namx = NULL;
226 __u32 inumber;
229 * We take care of directory expansion in the same loop
230 * This code plays outside i_size, so it locks the page
231 * to protect that region.
233 for (n = 0; n <= npages; n++) {
234 char *limit, *dir_end;
236 page = dir_get_page(dir, n);
237 err = PTR_ERR(page);
238 if (IS_ERR(page))
239 goto out;
240 lock_page(page);
241 kaddr = (char*)page_address(page);
242 dir_end = kaddr + minix_last_byte(dir, n);
243 limit = kaddr + PAGE_CACHE_SIZE - sbi->s_dirsize;
244 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
245 de = (minix_dirent *)p;
246 de3 = (minix3_dirent *)p;
247 if (sbi->s_version == MINIX_V3) {
248 namx = de3->name;
249 inumber = de3->inode;
250 } else {
251 namx = de->name;
252 inumber = de->inode;
254 if (p == dir_end) {
255 /* We hit i_size */
256 if (sbi->s_version == MINIX_V3)
257 de3->inode = 0;
258 else
259 de->inode = 0;
260 goto got_it;
262 if (!inumber)
263 goto got_it;
264 err = -EEXIST;
265 if (namecompare(namelen, sbi->s_namelen, name, namx))
266 goto out_unlock;
268 unlock_page(page);
269 dir_put_page(page);
271 BUG();
272 return -EINVAL;
274 got_it:
275 from = p - (char*)page_address(page);
276 to = from + sbi->s_dirsize;
277 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
278 if (err)
279 goto out_unlock;
280 memcpy (namx, name, namelen);
281 if (sbi->s_version == MINIX_V3) {
282 memset (namx + namelen, 0, sbi->s_dirsize - namelen - 4);
283 de3->inode = inode->i_ino;
284 } else {
285 memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2);
286 de->inode = inode->i_ino;
288 err = dir_commit_chunk(page, from, to);
289 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
290 mark_inode_dirty(dir);
291 out_put:
292 dir_put_page(page);
293 out:
294 return err;
295 out_unlock:
296 unlock_page(page);
297 goto out_put;
300 int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
302 struct address_space *mapping = page->mapping;
303 struct inode *inode = (struct inode*)mapping->host;
304 char *kaddr = page_address(page);
305 unsigned from = (char*)de - kaddr;
306 unsigned to = from + minix_sb(inode->i_sb)->s_dirsize;
307 int err;
309 lock_page(page);
310 err = mapping->a_ops->prepare_write(NULL, page, from, to);
311 if (err == 0) {
312 de->inode = 0;
313 err = dir_commit_chunk(page, from, to);
314 } else {
315 unlock_page(page);
317 dir_put_page(page);
318 inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
319 mark_inode_dirty(inode);
320 return err;
323 int minix_make_empty(struct inode *inode, struct inode *dir)
325 struct address_space *mapping = inode->i_mapping;
326 struct page *page = grab_cache_page(mapping, 0);
327 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
328 char *kaddr;
329 int err;
331 if (!page)
332 return -ENOMEM;
333 err = mapping->a_ops->prepare_write(NULL, page, 0, 2 * sbi->s_dirsize);
334 if (err) {
335 unlock_page(page);
336 goto fail;
339 kaddr = kmap_atomic(page, KM_USER0);
340 memset(kaddr, 0, PAGE_CACHE_SIZE);
342 if (sbi->s_version == MINIX_V3) {
343 minix3_dirent *de3 = (minix3_dirent *)kaddr;
345 de3->inode = inode->i_ino;
346 strcpy(de3->name, ".");
347 de3 = minix_next_entry(de3, sbi);
348 de3->inode = dir->i_ino;
349 strcpy(de3->name, "..");
350 } else {
351 minix_dirent *de = (minix_dirent *)kaddr;
353 de->inode = inode->i_ino;
354 strcpy(de->name, ".");
355 de = minix_next_entry(de, sbi);
356 de->inode = dir->i_ino;
357 strcpy(de->name, "..");
359 kunmap_atomic(kaddr, KM_USER0);
361 err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
362 fail:
363 page_cache_release(page);
364 return err;
368 * routine to check that the specified directory is empty (for rmdir)
370 int minix_empty_dir(struct inode * inode)
372 struct page *page = NULL;
373 unsigned long i, npages = dir_pages(inode);
374 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
375 char *name;
376 __u32 inumber;
378 for (i = 0; i < npages; i++) {
379 char *p, *kaddr, *limit;
381 page = dir_get_page(inode, i);
382 if (IS_ERR(page))
383 continue;
385 kaddr = (char *)page_address(page);
386 limit = kaddr + minix_last_byte(inode, i) - sbi->s_dirsize;
387 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
388 if (sbi->s_version == MINIX_V3) {
389 minix3_dirent *de3 = (minix3_dirent *)p;
390 name = de3->name;
391 inumber = de3->inode;
392 } else {
393 minix_dirent *de = (minix_dirent *)p;
394 name = de->name;
395 inumber = de->inode;
398 if (inumber != 0) {
399 /* check for . and .. */
400 if (name[0] != '.')
401 goto not_empty;
402 if (!name[1]) {
403 if (inumber != inode->i_ino)
404 goto not_empty;
405 } else if (name[1] != '.')
406 goto not_empty;
407 else if (name[2])
408 goto not_empty;
411 dir_put_page(page);
413 return 1;
415 not_empty:
416 dir_put_page(page);
417 return 0;
420 /* Releases the page */
421 void minix_set_link(struct minix_dir_entry *de, struct page *page,
422 struct inode *inode)
424 struct inode *dir = (struct inode*)page->mapping->host;
425 struct minix_sb_info *sbi = minix_sb(dir->i_sb);
426 unsigned from = (char *)de-(char*)page_address(page);
427 unsigned to = from + sbi->s_dirsize;
428 int err;
430 lock_page(page);
431 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
432 if (err == 0) {
433 de->inode = inode->i_ino;
434 err = dir_commit_chunk(page, from, to);
435 } else {
436 unlock_page(page);
438 dir_put_page(page);
439 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
440 mark_inode_dirty(dir);
443 struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p)
445 struct page *page = dir_get_page(dir, 0);
446 struct minix_sb_info *sbi = minix_sb(dir->i_sb);
447 struct minix_dir_entry *de = NULL;
449 if (!IS_ERR(page)) {
450 de = minix_next_entry(page_address(page), sbi);
451 *p = page;
453 return de;
456 ino_t minix_inode_by_name(struct dentry *dentry)
458 struct page *page;
459 struct minix_dir_entry *de = minix_find_entry(dentry, &page);
460 ino_t res = 0;
462 if (de) {
463 res = de->inode;
464 dir_put_page(page);
466 return res;