NTFS: Correct sparse file handling. The compressed values need to be
[linux-2.6/cjktty.git] / fs / jffs2 / nodemgmt.c
blob2651135bdf427b417d652fa2839afb289d2b0d09
1 /*
2 * JFFS2 -- Journalling Flash File System, Version 2.
4 * Copyright (C) 2001-2003 Red Hat, Inc.
6 * Created by David Woodhouse <dwmw2@infradead.org>
8 * For licensing information, see the file 'LICENCE' in this directory.
10 * $Id: nodemgmt.c,v 1.115 2004/11/22 11:07:21 dwmw2 Exp $
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/compiler.h>
18 #include <linux/sched.h> /* For cond_resched() */
19 #include "nodelist.h"
21 /**
22 * jffs2_reserve_space - request physical space to write nodes to flash
23 * @c: superblock info
24 * @minsize: Minimum acceptable size of allocation
25 * @ofs: Returned value of node offset
26 * @len: Returned value of allocation length
27 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
29 * Requests a block of physical space on the flash. Returns zero for success
30 * and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC
31 * or other error if appropriate.
33 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
34 * allocation semaphore, to prevent more than one allocation from being
35 * active at any time. The semaphore is later released by jffs2_commit_allocation()
37 * jffs2_reserve_space() may trigger garbage collection in order to make room
38 * for the requested allocation.
41 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len);
43 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio)
45 int ret = -EAGAIN;
46 int blocksneeded = c->resv_blocks_write;
47 /* align it */
48 minsize = PAD(minsize);
50 D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
51 down(&c->alloc_sem);
53 D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
55 spin_lock(&c->erase_completion_lock);
57 /* this needs a little more thought (true <tglx> :)) */
58 while(ret == -EAGAIN) {
59 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
60 int ret;
61 uint32_t dirty, avail;
63 /* calculate real dirty size
64 * dirty_size contains blocks on erase_pending_list
65 * those blocks are counted in c->nr_erasing_blocks.
66 * If one block is actually erased, it is not longer counted as dirty_space
67 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
68 * with c->nr_erasing_blocks * c->sector_size again.
69 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
70 * This helps us to force gc and pick eventually a clean block to spread the load.
71 * We add unchecked_size here, as we hopefully will find some space to use.
72 * This will affect the sum only once, as gc first finishes checking
73 * of nodes.
75 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
76 if (dirty < c->nospc_dirty_size) {
77 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
78 printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n");
79 break;
81 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
82 dirty, c->unchecked_size, c->sector_size));
84 spin_unlock(&c->erase_completion_lock);
85 up(&c->alloc_sem);
86 return -ENOSPC;
89 /* Calc possibly available space. Possibly available means that we
90 * don't know, if unchecked size contains obsoleted nodes, which could give us some
91 * more usable space. This will affect the sum only once, as gc first finishes checking
92 * of nodes.
93 + Return -ENOSPC, if the maximum possibly available space is less or equal than
94 * blocksneeded * sector_size.
95 * This blocks endless gc looping on a filesystem, which is nearly full, even if
96 * the check above passes.
98 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
99 if ( (avail / c->sector_size) <= blocksneeded) {
100 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
101 printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n");
102 break;
105 D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
106 avail, blocksneeded * c->sector_size));
107 spin_unlock(&c->erase_completion_lock);
108 up(&c->alloc_sem);
109 return -ENOSPC;
112 up(&c->alloc_sem);
114 D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
115 c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
116 c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
117 spin_unlock(&c->erase_completion_lock);
119 ret = jffs2_garbage_collect_pass(c);
120 if (ret)
121 return ret;
123 cond_resched();
125 if (signal_pending(current))
126 return -EINTR;
128 down(&c->alloc_sem);
129 spin_lock(&c->erase_completion_lock);
132 ret = jffs2_do_reserve_space(c, minsize, ofs, len);
133 if (ret) {
134 D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
137 spin_unlock(&c->erase_completion_lock);
138 if (ret)
139 up(&c->alloc_sem);
140 return ret;
143 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
145 int ret = -EAGAIN;
146 minsize = PAD(minsize);
148 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
150 spin_lock(&c->erase_completion_lock);
151 while(ret == -EAGAIN) {
152 ret = jffs2_do_reserve_space(c, minsize, ofs, len);
153 if (ret) {
154 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
157 spin_unlock(&c->erase_completion_lock);
158 return ret;
161 /* Called with alloc sem _and_ erase_completion_lock */
162 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
164 struct jffs2_eraseblock *jeb = c->nextblock;
166 restart:
167 if (jeb && minsize > jeb->free_size) {
168 /* Skip the end of this block and file it as having some dirty space */
169 /* If there's a pending write to it, flush now */
170 if (jffs2_wbuf_dirty(c)) {
171 spin_unlock(&c->erase_completion_lock);
172 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
173 jffs2_flush_wbuf_pad(c);
174 spin_lock(&c->erase_completion_lock);
175 jeb = c->nextblock;
176 goto restart;
178 c->wasted_size += jeb->free_size;
179 c->free_size -= jeb->free_size;
180 jeb->wasted_size += jeb->free_size;
181 jeb->free_size = 0;
183 /* Check, if we have a dirty block now, or if it was dirty already */
184 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
185 c->dirty_size += jeb->wasted_size;
186 c->wasted_size -= jeb->wasted_size;
187 jeb->dirty_size += jeb->wasted_size;
188 jeb->wasted_size = 0;
189 if (VERYDIRTY(c, jeb->dirty_size)) {
190 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
191 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
192 list_add_tail(&jeb->list, &c->very_dirty_list);
193 } else {
194 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
195 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
196 list_add_tail(&jeb->list, &c->dirty_list);
198 } else {
199 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
200 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
201 list_add_tail(&jeb->list, &c->clean_list);
203 c->nextblock = jeb = NULL;
206 if (!jeb) {
207 struct list_head *next;
208 /* Take the next block off the 'free' list */
210 if (list_empty(&c->free_list)) {
212 if (!c->nr_erasing_blocks &&
213 !list_empty(&c->erasable_list)) {
214 struct jffs2_eraseblock *ejeb;
216 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
217 list_del(&ejeb->list);
218 list_add_tail(&ejeb->list, &c->erase_pending_list);
219 c->nr_erasing_blocks++;
220 jffs2_erase_pending_trigger(c);
221 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n",
222 ejeb->offset));
225 if (!c->nr_erasing_blocks &&
226 !list_empty(&c->erasable_pending_wbuf_list)) {
227 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
228 /* c->nextblock is NULL, no update to c->nextblock allowed */
229 spin_unlock(&c->erase_completion_lock);
230 jffs2_flush_wbuf_pad(c);
231 spin_lock(&c->erase_completion_lock);
232 /* Have another go. It'll be on the erasable_list now */
233 return -EAGAIN;
236 if (!c->nr_erasing_blocks) {
237 /* Ouch. We're in GC, or we wouldn't have got here.
238 And there's no space left. At all. */
239 printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
240 c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
241 list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
242 return -ENOSPC;
245 spin_unlock(&c->erase_completion_lock);
246 /* Don't wait for it; just erase one right now */
247 jffs2_erase_pending_blocks(c, 1);
248 spin_lock(&c->erase_completion_lock);
250 /* An erase may have failed, decreasing the
251 amount of free space available. So we must
252 restart from the beginning */
253 return -EAGAIN;
256 next = c->free_list.next;
257 list_del(next);
258 c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list);
259 c->nr_free_blocks--;
261 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
262 printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
263 goto restart;
266 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
267 enough space */
268 *ofs = jeb->offset + (c->sector_size - jeb->free_size);
269 *len = jeb->free_size;
271 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
272 !jeb->first_node->next_in_ino) {
273 /* Only node in it beforehand was a CLEANMARKER node (we think).
274 So mark it obsolete now that there's going to be another node
275 in the block. This will reduce used_size to zero but We've
276 already set c->nextblock so that jffs2_mark_node_obsolete()
277 won't try to refile it to the dirty_list.
279 spin_unlock(&c->erase_completion_lock);
280 jffs2_mark_node_obsolete(c, jeb->first_node);
281 spin_lock(&c->erase_completion_lock);
284 D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
285 return 0;
289 * jffs2_add_physical_node_ref - add a physical node reference to the list
290 * @c: superblock info
291 * @new: new node reference to add
292 * @len: length of this physical node
293 * @dirty: dirty flag for new node
295 * Should only be used to report nodes for which space has been allocated
296 * by jffs2_reserve_space.
298 * Must be called with the alloc_sem held.
301 int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
303 struct jffs2_eraseblock *jeb;
304 uint32_t len;
306 jeb = &c->blocks[new->flash_offset / c->sector_size];
307 len = ref_totlen(c, jeb, new);
309 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
310 #if 1
311 if (jeb != c->nextblock || (ref_offset(new)) != jeb->offset + (c->sector_size - jeb->free_size)) {
312 printk(KERN_WARNING "argh. node added in wrong place\n");
313 jffs2_free_raw_node_ref(new);
314 return -EINVAL;
316 #endif
317 spin_lock(&c->erase_completion_lock);
319 if (!jeb->first_node)
320 jeb->first_node = new;
321 if (jeb->last_node)
322 jeb->last_node->next_phys = new;
323 jeb->last_node = new;
325 jeb->free_size -= len;
326 c->free_size -= len;
327 if (ref_obsolete(new)) {
328 jeb->dirty_size += len;
329 c->dirty_size += len;
330 } else {
331 jeb->used_size += len;
332 c->used_size += len;
335 if (!jeb->free_size && !jeb->dirty_size) {
336 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
337 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
338 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
339 if (jffs2_wbuf_dirty(c)) {
340 /* Flush the last write in the block if it's outstanding */
341 spin_unlock(&c->erase_completion_lock);
342 jffs2_flush_wbuf_pad(c);
343 spin_lock(&c->erase_completion_lock);
346 list_add_tail(&jeb->list, &c->clean_list);
347 c->nextblock = NULL;
349 ACCT_SANITY_CHECK(c,jeb);
350 D1(ACCT_PARANOIA_CHECK(jeb));
352 spin_unlock(&c->erase_completion_lock);
354 return 0;
358 void jffs2_complete_reservation(struct jffs2_sb_info *c)
360 D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
361 jffs2_garbage_collect_trigger(c);
362 up(&c->alloc_sem);
365 static inline int on_list(struct list_head *obj, struct list_head *head)
367 struct list_head *this;
369 list_for_each(this, head) {
370 if (this == obj) {
371 D1(printk("%p is on list at %p\n", obj, head));
372 return 1;
376 return 0;
379 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
381 struct jffs2_eraseblock *jeb;
382 int blocknr;
383 struct jffs2_unknown_node n;
384 int ret, addedsize;
385 size_t retlen;
387 if(!ref) {
388 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
389 return;
391 if (ref_obsolete(ref)) {
392 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
393 return;
395 blocknr = ref->flash_offset / c->sector_size;
396 if (blocknr >= c->nr_blocks) {
397 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
398 BUG();
400 jeb = &c->blocks[blocknr];
402 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
403 !(c->flags & JFFS2_SB_FLAG_MOUNTING)) {
404 /* Hm. This may confuse static lock analysis. If any of the above
405 three conditions is false, we're going to return from this
406 function without actually obliterating any nodes or freeing
407 any jffs2_raw_node_refs. So we don't need to stop erases from
408 happening, or protect against people holding an obsolete
409 jffs2_raw_node_ref without the erase_completion_lock. */
410 down(&c->erase_free_sem);
413 spin_lock(&c->erase_completion_lock);
415 if (ref_flags(ref) == REF_UNCHECKED) {
416 D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) {
417 printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
418 ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
419 BUG();
421 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
422 jeb->unchecked_size -= ref_totlen(c, jeb, ref);
423 c->unchecked_size -= ref_totlen(c, jeb, ref);
424 } else {
425 D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) {
426 printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
427 ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
428 BUG();
430 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
431 jeb->used_size -= ref_totlen(c, jeb, ref);
432 c->used_size -= ref_totlen(c, jeb, ref);
435 // Take care, that wasted size is taken into concern
436 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) {
437 D1(printk("Dirtying\n"));
438 addedsize = ref_totlen(c, jeb, ref);
439 jeb->dirty_size += ref_totlen(c, jeb, ref);
440 c->dirty_size += ref_totlen(c, jeb, ref);
442 /* Convert wasted space to dirty, if not a bad block */
443 if (jeb->wasted_size) {
444 if (on_list(&jeb->list, &c->bad_used_list)) {
445 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
446 jeb->offset));
447 addedsize = 0; /* To fool the refiling code later */
448 } else {
449 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
450 jeb->wasted_size, jeb->offset));
451 addedsize += jeb->wasted_size;
452 jeb->dirty_size += jeb->wasted_size;
453 c->dirty_size += jeb->wasted_size;
454 c->wasted_size -= jeb->wasted_size;
455 jeb->wasted_size = 0;
458 } else {
459 D1(printk("Wasting\n"));
460 addedsize = 0;
461 jeb->wasted_size += ref_totlen(c, jeb, ref);
462 c->wasted_size += ref_totlen(c, jeb, ref);
464 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
466 ACCT_SANITY_CHECK(c, jeb);
468 D1(ACCT_PARANOIA_CHECK(jeb));
470 if (c->flags & JFFS2_SB_FLAG_MOUNTING) {
471 /* Mount in progress. Don't muck about with the block
472 lists because they're not ready yet, and don't actually
473 obliterate nodes that look obsolete. If they weren't
474 marked obsolete on the flash at the time they _became_
475 obsolete, there was probably a reason for that. */
476 spin_unlock(&c->erase_completion_lock);
477 /* We didn't lock the erase_free_sem */
478 return;
481 if (jeb == c->nextblock) {
482 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
483 } else if (!jeb->used_size && !jeb->unchecked_size) {
484 if (jeb == c->gcblock) {
485 D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
486 c->gcblock = NULL;
487 } else {
488 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
489 list_del(&jeb->list);
491 if (jffs2_wbuf_dirty(c)) {
492 D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
493 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
494 } else {
495 if (jiffies & 127) {
496 /* Most of the time, we just erase it immediately. Otherwise we
497 spend ages scanning it on mount, etc. */
498 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
499 list_add_tail(&jeb->list, &c->erase_pending_list);
500 c->nr_erasing_blocks++;
501 jffs2_erase_pending_trigger(c);
502 } else {
503 /* Sometimes, however, we leave it elsewhere so it doesn't get
504 immediately reused, and we spread the load a bit. */
505 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
506 list_add_tail(&jeb->list, &c->erasable_list);
509 D1(printk(KERN_DEBUG "Done OK\n"));
510 } else if (jeb == c->gcblock) {
511 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
512 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
513 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
514 list_del(&jeb->list);
515 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
516 list_add_tail(&jeb->list, &c->dirty_list);
517 } else if (VERYDIRTY(c, jeb->dirty_size) &&
518 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
519 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
520 list_del(&jeb->list);
521 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
522 list_add_tail(&jeb->list, &c->very_dirty_list);
523 } else {
524 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
525 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
528 spin_unlock(&c->erase_completion_lock);
530 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c)) {
531 /* We didn't lock the erase_free_sem */
532 return;
535 /* The erase_free_sem is locked, and has been since before we marked the node obsolete
536 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
537 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
538 by jffs2_free_all_node_refs() in erase.c. Which is nice. */
540 D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
541 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
542 if (ret) {
543 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
544 goto out_erase_sem;
546 if (retlen != sizeof(n)) {
547 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
548 goto out_erase_sem;
550 if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) {
551 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref));
552 goto out_erase_sem;
554 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
555 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
556 goto out_erase_sem;
558 /* XXX FIXME: This is ugly now */
559 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
560 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
561 if (ret) {
562 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
563 goto out_erase_sem;
565 if (retlen != sizeof(n)) {
566 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
567 goto out_erase_sem;
570 /* Nodes which have been marked obsolete no longer need to be
571 associated with any inode. Remove them from the per-inode list.
573 Note we can't do this for NAND at the moment because we need
574 obsolete dirent nodes to stay on the lists, because of the
575 horridness in jffs2_garbage_collect_deletion_dirent(). Also
576 because we delete the inocache, and on NAND we need that to
577 stay around until all the nodes are actually erased, in order
578 to stop us from giving the same inode number to another newly
579 created inode. */
580 if (ref->next_in_ino) {
581 struct jffs2_inode_cache *ic;
582 struct jffs2_raw_node_ref **p;
584 spin_lock(&c->erase_completion_lock);
586 ic = jffs2_raw_ref_to_ic(ref);
587 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
590 *p = ref->next_in_ino;
591 ref->next_in_ino = NULL;
593 if (ic->nodes == (void *)ic) {
594 D1(printk(KERN_DEBUG "inocache for ino #%u is all gone now. Freeing\n", ic->ino));
595 jffs2_del_ino_cache(c, ic);
596 jffs2_free_inode_cache(ic);
599 spin_unlock(&c->erase_completion_lock);
603 /* Merge with the next node in the physical list, if there is one
604 and if it's also obsolete and if it doesn't belong to any inode */
605 if (ref->next_phys && ref_obsolete(ref->next_phys) &&
606 !ref->next_phys->next_in_ino) {
607 struct jffs2_raw_node_ref *n = ref->next_phys;
609 spin_lock(&c->erase_completion_lock);
611 ref->__totlen += n->__totlen;
612 ref->next_phys = n->next_phys;
613 if (jeb->last_node == n) jeb->last_node = ref;
614 if (jeb->gc_node == n) {
615 /* gc will be happy continuing gc on this node */
616 jeb->gc_node=ref;
618 spin_unlock(&c->erase_completion_lock);
620 jffs2_free_raw_node_ref(n);
623 /* Also merge with the previous node in the list, if there is one
624 and that one is obsolete */
625 if (ref != jeb->first_node ) {
626 struct jffs2_raw_node_ref *p = jeb->first_node;
628 spin_lock(&c->erase_completion_lock);
630 while (p->next_phys != ref)
631 p = p->next_phys;
633 if (ref_obsolete(p) && !ref->next_in_ino) {
634 p->__totlen += ref->__totlen;
635 if (jeb->last_node == ref) {
636 jeb->last_node = p;
638 if (jeb->gc_node == ref) {
639 /* gc will be happy continuing gc on this node */
640 jeb->gc_node=p;
642 p->next_phys = ref->next_phys;
643 jffs2_free_raw_node_ref(ref);
645 spin_unlock(&c->erase_completion_lock);
647 out_erase_sem:
648 up(&c->erase_free_sem);
651 #if CONFIG_JFFS2_FS_DEBUG >= 2
652 void jffs2_dump_block_lists(struct jffs2_sb_info *c)
656 printk(KERN_DEBUG "jffs2_dump_block_lists:\n");
657 printk(KERN_DEBUG "flash_size: %08x\n", c->flash_size);
658 printk(KERN_DEBUG "used_size: %08x\n", c->used_size);
659 printk(KERN_DEBUG "dirty_size: %08x\n", c->dirty_size);
660 printk(KERN_DEBUG "wasted_size: %08x\n", c->wasted_size);
661 printk(KERN_DEBUG "unchecked_size: %08x\n", c->unchecked_size);
662 printk(KERN_DEBUG "free_size: %08x\n", c->free_size);
663 printk(KERN_DEBUG "erasing_size: %08x\n", c->erasing_size);
664 printk(KERN_DEBUG "bad_size: %08x\n", c->bad_size);
665 printk(KERN_DEBUG "sector_size: %08x\n", c->sector_size);
666 printk(KERN_DEBUG "jffs2_reserved_blocks size: %08x\n",c->sector_size * c->resv_blocks_write);
668 if (c->nextblock) {
669 printk(KERN_DEBUG "nextblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
670 c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size);
671 } else {
672 printk(KERN_DEBUG "nextblock: NULL\n");
674 if (c->gcblock) {
675 printk(KERN_DEBUG "gcblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
676 c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size);
677 } else {
678 printk(KERN_DEBUG "gcblock: NULL\n");
680 if (list_empty(&c->clean_list)) {
681 printk(KERN_DEBUG "clean_list: empty\n");
682 } else {
683 struct list_head *this;
684 int numblocks = 0;
685 uint32_t dirty = 0;
687 list_for_each(this, &c->clean_list) {
688 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
689 numblocks ++;
690 dirty += jeb->wasted_size;
691 printk(KERN_DEBUG "clean_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
693 printk (KERN_DEBUG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks);
695 if (list_empty(&c->very_dirty_list)) {
696 printk(KERN_DEBUG "very_dirty_list: empty\n");
697 } else {
698 struct list_head *this;
699 int numblocks = 0;
700 uint32_t dirty = 0;
702 list_for_each(this, &c->very_dirty_list) {
703 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
704 numblocks ++;
705 dirty += jeb->dirty_size;
706 printk(KERN_DEBUG "very_dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
707 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
709 printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
710 numblocks, dirty, dirty / numblocks);
712 if (list_empty(&c->dirty_list)) {
713 printk(KERN_DEBUG "dirty_list: empty\n");
714 } else {
715 struct list_head *this;
716 int numblocks = 0;
717 uint32_t dirty = 0;
719 list_for_each(this, &c->dirty_list) {
720 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
721 numblocks ++;
722 dirty += jeb->dirty_size;
723 printk(KERN_DEBUG "dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
724 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
726 printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
727 numblocks, dirty, dirty / numblocks);
729 if (list_empty(&c->erasable_list)) {
730 printk(KERN_DEBUG "erasable_list: empty\n");
731 } else {
732 struct list_head *this;
734 list_for_each(this, &c->erasable_list) {
735 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
736 printk(KERN_DEBUG "erasable_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
737 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
740 if (list_empty(&c->erasing_list)) {
741 printk(KERN_DEBUG "erasing_list: empty\n");
742 } else {
743 struct list_head *this;
745 list_for_each(this, &c->erasing_list) {
746 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
747 printk(KERN_DEBUG "erasing_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
748 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
751 if (list_empty(&c->erase_pending_list)) {
752 printk(KERN_DEBUG "erase_pending_list: empty\n");
753 } else {
754 struct list_head *this;
756 list_for_each(this, &c->erase_pending_list) {
757 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
758 printk(KERN_DEBUG "erase_pending_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
759 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
762 if (list_empty(&c->erasable_pending_wbuf_list)) {
763 printk(KERN_DEBUG "erasable_pending_wbuf_list: empty\n");
764 } else {
765 struct list_head *this;
767 list_for_each(this, &c->erasable_pending_wbuf_list) {
768 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
769 printk(KERN_DEBUG "erasable_pending_wbuf_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
770 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
773 if (list_empty(&c->free_list)) {
774 printk(KERN_DEBUG "free_list: empty\n");
775 } else {
776 struct list_head *this;
778 list_for_each(this, &c->free_list) {
779 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
780 printk(KERN_DEBUG "free_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
781 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
784 if (list_empty(&c->bad_list)) {
785 printk(KERN_DEBUG "bad_list: empty\n");
786 } else {
787 struct list_head *this;
789 list_for_each(this, &c->bad_list) {
790 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
791 printk(KERN_DEBUG "bad_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
792 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
795 if (list_empty(&c->bad_used_list)) {
796 printk(KERN_DEBUG "bad_used_list: empty\n");
797 } else {
798 struct list_head *this;
800 list_for_each(this, &c->bad_used_list) {
801 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
802 printk(KERN_DEBUG "bad_used_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
803 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
807 #endif /* CONFIG_JFFS2_FS_DEBUG */
809 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
811 int ret = 0;
812 uint32_t dirty;
814 if (c->unchecked_size) {
815 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
816 c->unchecked_size, c->checked_ino));
817 return 1;
820 /* dirty_size contains blocks on erase_pending_list
821 * those blocks are counted in c->nr_erasing_blocks.
822 * If one block is actually erased, it is not longer counted as dirty_space
823 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
824 * with c->nr_erasing_blocks * c->sector_size again.
825 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
826 * This helps us to force gc and pick eventually a clean block to spread the load.
828 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
830 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
831 (dirty > c->nospc_dirty_size))
832 ret = 1;
834 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
835 c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
837 return ret;