[JFFS2] Fix NOR specific scan BUG
[linux-2.6/mini2440.git] / fs / jffs2 / build.c
blob3dd5394921c9c6bb09a1dae11f87799f3c8be68f
1 /*
2 * JFFS2 -- Journalling Flash File System, Version 2.
4 * Copyright (C) 2001-2003 Red Hat, Inc.
6 * Created by David Woodhouse <dwmw2@infradead.org>
8 * For licensing information, see the file 'LICENCE' in this directory.
10 * $Id: build.c,v 1.70 2005/02/28 08:21:05 dedekind Exp $
14 #include <linux/kernel.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include <linux/mtd/mtd.h>
19 #include "nodelist.h"
21 static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, struct jffs2_inode_cache *, struct jffs2_full_dirent **);
23 static inline struct jffs2_inode_cache *
24 first_inode_chain(int *i, struct jffs2_sb_info *c)
26 for (; *i < INOCACHE_HASHSIZE; (*i)++) {
27 if (c->inocache_list[*i])
28 return c->inocache_list[*i];
30 return NULL;
33 static inline struct jffs2_inode_cache *
34 next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
36 /* More in this chain? */
37 if (ic->next)
38 return ic->next;
39 (*i)++;
40 return first_inode_chain(i, c);
43 #define for_each_inode(i, c, ic) \
44 for (i = 0, ic = first_inode_chain(&i, (c)); \
45 ic; \
46 ic = next_inode(&i, ic, (c)))
49 static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
51 struct jffs2_full_dirent *fd;
53 D1(printk(KERN_DEBUG "jffs2_build_inode building directory inode #%u\n", ic->ino));
55 /* For each child, increase nlink */
56 for(fd = ic->scan_dents; fd; fd = fd->next) {
57 struct jffs2_inode_cache *child_ic;
58 if (!fd->ino)
59 continue;
61 /* XXX: Can get high latency here with huge directories */
63 child_ic = jffs2_get_ino_cache(c, fd->ino);
64 if (!child_ic) {
65 printk(KERN_NOTICE "Eep. Child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
66 fd->name, fd->ino, ic->ino);
67 jffs2_mark_node_obsolete(c, fd->raw);
68 continue;
71 if (child_ic->nlink++ && fd->type == DT_DIR) {
72 printk(KERN_NOTICE "Child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", fd->name, fd->ino, ic->ino);
73 if (fd->ino == 1 && ic->ino == 1) {
74 printk(KERN_NOTICE "This is mostly harmless, and probably caused by creating a JFFS2 image\n");
75 printk(KERN_NOTICE "using a buggy version of mkfs.jffs2. Use at least v1.17.\n");
77 /* What do we do about it? */
79 D1(printk(KERN_DEBUG "Increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino));
80 /* Can't free them. We might need them in pass 2 */
84 /* Scan plan:
85 - Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go
86 - Scan directory tree from top down, setting nlink in inocaches
87 - Scan inocaches for inodes with nlink==0
89 static int jffs2_build_filesystem(struct jffs2_sb_info *c)
91 int ret;
92 int i;
93 struct jffs2_inode_cache *ic;
94 struct jffs2_full_dirent *fd;
95 struct jffs2_full_dirent *dead_fds = NULL;
97 /* First, scan the medium and build all the inode caches with
98 lists of physical nodes */
100 c->flags |= JFFS2_SB_FLAG_SCANNING;
101 ret = jffs2_scan_medium(c);
102 c->flags &= ~JFFS2_SB_FLAG_SCANNING;
103 if (ret)
104 goto exit;
106 D1(printk(KERN_DEBUG "Scanned flash completely\n"));
107 D2(jffs2_dump_block_lists(c));
109 c->flags |= JFFS2_SB_FLAG_BUILDING;
110 /* Now scan the directory tree, increasing nlink according to every dirent found. */
111 for_each_inode(i, c, ic) {
112 D1(printk(KERN_DEBUG "Pass 1: ino #%u\n", ic->ino));
114 D1(BUG_ON(ic->ino > c->highest_ino));
116 if (ic->scan_dents) {
117 jffs2_build_inode_pass1(c, ic);
118 cond_resched();
122 D1(printk(KERN_DEBUG "Pass 1 complete\n"));
124 /* Next, scan for inodes with nlink == 0 and remove them. If
125 they were directories, then decrement the nlink of their
126 children too, and repeat the scan. As that's going to be
127 a fairly uncommon occurrence, it's not so evil to do it this
128 way. Recursion bad. */
129 D1(printk(KERN_DEBUG "Pass 2 starting\n"));
131 for_each_inode(i, c, ic) {
132 D1(printk(KERN_DEBUG "Pass 2: ino #%u, nlink %d, ic %p, nodes %p\n", ic->ino, ic->nlink, ic, ic->nodes));
133 if (ic->nlink)
134 continue;
136 jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
137 cond_resched();
140 D1(printk(KERN_DEBUG "Pass 2a starting\n"));
142 while (dead_fds) {
143 fd = dead_fds;
144 dead_fds = fd->next;
146 ic = jffs2_get_ino_cache(c, fd->ino);
147 D1(printk(KERN_DEBUG "Removing dead_fd ino #%u (\"%s\"), ic at %p\n", fd->ino, fd->name, ic));
149 if (ic)
150 jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
151 jffs2_free_full_dirent(fd);
154 D1(printk(KERN_DEBUG "Pass 2 complete\n"));
156 /* Finally, we can scan again and free the dirent structs */
157 for_each_inode(i, c, ic) {
158 D1(printk(KERN_DEBUG "Pass 3: ino #%u, ic %p, nodes %p\n", ic->ino, ic, ic->nodes));
160 while(ic->scan_dents) {
161 fd = ic->scan_dents;
162 ic->scan_dents = fd->next;
163 jffs2_free_full_dirent(fd);
165 ic->scan_dents = NULL;
166 cond_resched();
168 c->flags &= ~JFFS2_SB_FLAG_BUILDING;
170 D1(printk(KERN_DEBUG "Pass 3 complete\n"));
171 D2(jffs2_dump_block_lists(c));
173 /* Rotate the lists by some number to ensure wear levelling */
174 jffs2_rotate_lists(c);
176 ret = 0;
178 exit:
179 if (ret) {
180 for_each_inode(i, c, ic) {
181 while(ic->scan_dents) {
182 fd = ic->scan_dents;
183 ic->scan_dents = fd->next;
184 jffs2_free_full_dirent(fd);
189 return ret;
192 static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, struct jffs2_full_dirent **dead_fds)
194 struct jffs2_raw_node_ref *raw;
195 struct jffs2_full_dirent *fd;
197 D1(printk(KERN_DEBUG "JFFS2: Removing ino #%u with nlink == zero.\n", ic->ino));
199 raw = ic->nodes;
200 while (raw != (void *)ic) {
201 struct jffs2_raw_node_ref *next = raw->next_in_ino;
202 D1(printk(KERN_DEBUG "obsoleting node at 0x%08x\n", ref_offset(raw)));
203 jffs2_mark_node_obsolete(c, raw);
204 raw = next;
207 if (ic->scan_dents) {
208 int whinged = 0;
209 D1(printk(KERN_DEBUG "Inode #%u was a directory which may have children...\n", ic->ino));
211 while(ic->scan_dents) {
212 struct jffs2_inode_cache *child_ic;
214 fd = ic->scan_dents;
215 ic->scan_dents = fd->next;
217 if (!fd->ino) {
218 /* It's a deletion dirent. Ignore it */
219 D1(printk(KERN_DEBUG "Child \"%s\" is a deletion dirent, skipping...\n", fd->name));
220 jffs2_free_full_dirent(fd);
221 continue;
223 if (!whinged) {
224 whinged = 1;
225 printk(KERN_NOTICE "Inode #%u was a directory with children - removing those too...\n", ic->ino);
228 D1(printk(KERN_DEBUG "Removing child \"%s\", ino #%u\n",
229 fd->name, fd->ino));
231 child_ic = jffs2_get_ino_cache(c, fd->ino);
232 if (!child_ic) {
233 printk(KERN_NOTICE "Cannot remove child \"%s\", ino #%u, because it doesn't exist\n", fd->name, fd->ino);
234 jffs2_free_full_dirent(fd);
235 continue;
238 /* Reduce nlink of the child. If it's now zero, stick it on the
239 dead_fds list to be cleaned up later. Else just free the fd */
241 child_ic->nlink--;
243 if (!child_ic->nlink) {
244 D1(printk(KERN_DEBUG "Inode #%u (\"%s\") has now got zero nlink. Adding to dead_fds list.\n",
245 fd->ino, fd->name));
246 fd->next = *dead_fds;
247 *dead_fds = fd;
248 } else {
249 D1(printk(KERN_DEBUG "Inode #%u (\"%s\") has now got nlink %d. Ignoring.\n",
250 fd->ino, fd->name, child_ic->nlink));
251 jffs2_free_full_dirent(fd);
257 We don't delete the inocache from the hash list and free it yet.
258 The erase code will do that, when all the nodes are completely gone.
262 static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
264 uint32_t size;
266 /* Deletion should almost _always_ be allowed. We're fairly
267 buggered once we stop allowing people to delete stuff
268 because there's not enough free space... */
269 c->resv_blocks_deletion = 2;
271 /* Be conservative about how much space we need before we allow writes.
272 On top of that which is required for deletia, require an extra 2%
273 of the medium to be available, for overhead caused by nodes being
274 split across blocks, etc. */
276 size = c->flash_size / 50; /* 2% of flash size */
277 size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */
278 size += c->sector_size - 1; /* ... and round up */
280 c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size);
282 /* When do we let the GC thread run in the background */
284 c->resv_blocks_gctrigger = c->resv_blocks_write + 1;
286 /* When do we allow garbage collection to merge nodes to make
287 long-term progress at the expense of short-term space exhaustion? */
288 c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1;
290 /* When do we allow garbage collection to eat from bad blocks rather
291 than actually making progress? */
292 c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2;
294 /* If there's less than this amount of dirty space, don't bother
295 trying to GC to make more space. It'll be a fruitless task */
296 c->nospc_dirty_size = c->sector_size + (c->flash_size / 100);
298 D1(printk(KERN_DEBUG "JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
299 c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks));
300 D1(printk(KERN_DEBUG "Blocks required to allow deletion: %d (%d KiB)\n",
301 c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024));
302 D1(printk(KERN_DEBUG "Blocks required to allow writes: %d (%d KiB)\n",
303 c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024));
304 D1(printk(KERN_DEBUG "Blocks required to quiesce GC thread: %d (%d KiB)\n",
305 c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024));
306 D1(printk(KERN_DEBUG "Blocks required to allow GC merges: %d (%d KiB)\n",
307 c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024));
308 D1(printk(KERN_DEBUG "Blocks required to GC bad blocks: %d (%d KiB)\n",
309 c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024));
310 D1(printk(KERN_DEBUG "Amount of dirty space required to GC: %d bytes\n",
311 c->nospc_dirty_size));
314 int jffs2_do_mount_fs(struct jffs2_sb_info *c)
316 int i;
318 c->free_size = c->flash_size;
319 c->nr_blocks = c->flash_size / c->sector_size;
320 if (c->mtd->flags & MTD_NO_VIRTBLOCKS)
321 c->blocks = vmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks);
322 else
323 c->blocks = kmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks, GFP_KERNEL);
324 if (!c->blocks)
325 return -ENOMEM;
326 for (i=0; i<c->nr_blocks; i++) {
327 INIT_LIST_HEAD(&c->blocks[i].list);
328 c->blocks[i].offset = i * c->sector_size;
329 c->blocks[i].free_size = c->sector_size;
330 c->blocks[i].dirty_size = 0;
331 c->blocks[i].wasted_size = 0;
332 c->blocks[i].unchecked_size = 0;
333 c->blocks[i].used_size = 0;
334 c->blocks[i].first_node = NULL;
335 c->blocks[i].last_node = NULL;
336 c->blocks[i].bad_count = 0;
339 init_MUTEX(&c->alloc_sem);
340 init_MUTEX(&c->erase_free_sem);
341 init_waitqueue_head(&c->erase_wait);
342 init_waitqueue_head(&c->inocache_wq);
343 spin_lock_init(&c->erase_completion_lock);
344 spin_lock_init(&c->inocache_lock);
346 INIT_LIST_HEAD(&c->clean_list);
347 INIT_LIST_HEAD(&c->very_dirty_list);
348 INIT_LIST_HEAD(&c->dirty_list);
349 INIT_LIST_HEAD(&c->erasable_list);
350 INIT_LIST_HEAD(&c->erasing_list);
351 INIT_LIST_HEAD(&c->erase_pending_list);
352 INIT_LIST_HEAD(&c->erasable_pending_wbuf_list);
353 INIT_LIST_HEAD(&c->erase_complete_list);
354 INIT_LIST_HEAD(&c->free_list);
355 INIT_LIST_HEAD(&c->bad_list);
356 INIT_LIST_HEAD(&c->bad_used_list);
357 c->highest_ino = 1;
359 if (jffs2_build_filesystem(c)) {
360 D1(printk(KERN_DEBUG "build_fs failed\n"));
361 jffs2_free_ino_caches(c);
362 jffs2_free_raw_node_refs(c);
363 if (c->mtd->flags & MTD_NO_VIRTBLOCKS) {
364 vfree(c->blocks);
365 } else {
366 kfree(c->blocks);
368 return -EIO;
371 jffs2_calc_trigger_levels(c);
373 return 0;