MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / fs / jffs2.org / nodemgmt.c
blob8129fe2f3aa060758cd42264fc5b214f0b9a5a6d
1 /*
2 * JFFS2 -- Journalling Flash File System, Version 2.
4 * Copyright (C) 2001-2003 Red Hat, Inc.
6 * Created by David Woodhouse <dwmw2@redhat.com>
8 * For licensing information, see the file 'LICENCE' in this directory.
10 * $Id: nodemgmt.c,v 1.107 2003/11/26 15:30:58 dwmw2 Exp $
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/compiler.h>
18 #include <linux/sched.h> /* For cond_resched() */
19 #include "nodelist.h"
21 /**
22 * jffs2_reserve_space - request physical space to write nodes to flash
23 * @c: superblock info
24 * @minsize: Minimum acceptable size of allocation
25 * @ofs: Returned value of node offset
26 * @len: Returned value of allocation length
27 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
29 * Requests a block of physical space on the flash. Returns zero for success
30 * and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC
31 * or other error if appropriate.
33 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
34 * allocation semaphore, to prevent more than one allocation from being
35 * active at any time. The semaphore is later released by jffs2_commit_allocation()
37 * jffs2_reserve_space() may trigger garbage collection in order to make room
38 * for the requested allocation.
41 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len);
43 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio)
45 int ret = -EAGAIN;
46 int blocksneeded = c->resv_blocks_write;
47 /* align it */
48 minsize = PAD(minsize);
50 D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
51 down(&c->alloc_sem);
53 D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
55 spin_lock(&c->erase_completion_lock);
57 /* this needs a little more thought (true <tglx> :)) */
58 while(ret == -EAGAIN) {
59 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
60 int ret;
61 uint32_t dirty, avail;
63 /* calculate real dirty size
64 * dirty_size contains blocks on erase_pending_list
65 * those blocks are counted in c->nr_erasing_blocks.
66 * If one block is actually erased, it is not longer counted as dirty_space
67 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
68 * with c->nr_erasing_blocks * c->sector_size again.
69 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
70 * This helps us to force gc and pick eventually a clean block to spread the load.
71 * We add unchecked_size here, as we hopefully will find some space to use.
72 * This will affect the sum only once, as gc first finishes checking
73 * of nodes.
75 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
76 if (dirty < c->nospc_dirty_size) {
77 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
78 #if 0 // mask by Victor Yu. 05-08-2006
79 printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n");
80 #endif
81 break;
83 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
84 dirty, c->unchecked_size, c->sector_size));
86 spin_unlock(&c->erase_completion_lock);
87 up(&c->alloc_sem);
88 return -ENOSPC;
91 /* Calc possibly available space. Possibly available means that we
92 * don't know, if unchecked size contains obsoleted nodes, which could give us some
93 * more usable space. This will affect the sum only once, as gc first finishes checking
94 * of nodes.
95 + Return -ENOSPC, if the maximum possibly available space is less or equal than
96 * blocksneeded * sector_size.
97 * This blocks endless gc looping on a filesystem, which is nearly full, even if
98 * the check above passes.
100 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
101 if ( (avail / c->sector_size) <= blocksneeded) {
102 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
103 printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n");
104 break;
107 D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
108 avail, blocksneeded * c->sector_size));
109 spin_unlock(&c->erase_completion_lock);
110 up(&c->alloc_sem);
111 return -ENOSPC;
114 up(&c->alloc_sem);
116 D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
117 c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
118 c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
119 spin_unlock(&c->erase_completion_lock);
121 ret = jffs2_garbage_collect_pass(c);
122 if (ret)
123 return ret;
125 cond_resched();
127 if (signal_pending(current))
128 return -EINTR;
130 down(&c->alloc_sem);
131 spin_lock(&c->erase_completion_lock);
134 ret = jffs2_do_reserve_space(c, minsize, ofs, len);
135 if (ret) {
136 D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
139 spin_unlock(&c->erase_completion_lock);
140 if (ret)
141 up(&c->alloc_sem);
142 return ret;
145 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
147 int ret = -EAGAIN;
148 minsize = PAD(minsize);
150 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
152 spin_lock(&c->erase_completion_lock);
153 while(ret == -EAGAIN) {
154 ret = jffs2_do_reserve_space(c, minsize, ofs, len);
155 if (ret) {
156 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
159 spin_unlock(&c->erase_completion_lock);
160 return ret;
163 /* Called with alloc sem _and_ erase_completion_lock */
164 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
166 struct jffs2_eraseblock *jeb = c->nextblock;
168 restart:
169 if (jeb && minsize > jeb->free_size) {
170 /* Skip the end of this block and file it as having some dirty space */
171 /* If there's a pending write to it, flush now */
172 if (jffs2_wbuf_dirty(c)) {
173 spin_unlock(&c->erase_completion_lock);
174 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
175 jffs2_flush_wbuf_pad(c);
176 spin_lock(&c->erase_completion_lock);
177 jeb = c->nextblock;
178 goto restart;
180 c->wasted_size += jeb->free_size;
181 c->free_size -= jeb->free_size;
182 jeb->wasted_size += jeb->free_size;
183 jeb->free_size = 0;
185 /* Check, if we have a dirty block now, or if it was dirty already */
186 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
187 c->dirty_size += jeb->wasted_size;
188 c->wasted_size -= jeb->wasted_size;
189 jeb->dirty_size += jeb->wasted_size;
190 jeb->wasted_size = 0;
191 if (VERYDIRTY(c, jeb->dirty_size)) {
192 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
193 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
194 list_add_tail(&jeb->list, &c->very_dirty_list);
195 } else {
196 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
197 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
198 list_add_tail(&jeb->list, &c->dirty_list);
200 } else {
201 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
202 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
203 list_add_tail(&jeb->list, &c->clean_list);
205 c->nextblock = jeb = NULL;
208 if (!jeb) {
209 struct list_head *next;
210 /* Take the next block off the 'free' list */
212 if (list_empty(&c->free_list)) {
214 if (!c->nr_erasing_blocks &&
215 !list_empty(&c->erasable_list)) {
216 struct jffs2_eraseblock *ejeb;
218 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
219 list_del(&ejeb->list);
220 list_add_tail(&ejeb->list, &c->erase_pending_list);
221 c->nr_erasing_blocks++;
222 jffs2_erase_pending_trigger(c);
223 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n",
224 ejeb->offset));
227 if (!c->nr_erasing_blocks &&
228 !list_empty(&c->erasable_pending_wbuf_list)) {
229 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
230 /* c->nextblock is NULL, no update to c->nextblock allowed */
231 spin_unlock(&c->erase_completion_lock);
232 jffs2_flush_wbuf_pad(c);
233 spin_lock(&c->erase_completion_lock);
234 /* Have another go. It'll be on the erasable_list now */
235 return -EAGAIN;
238 if (!c->nr_erasing_blocks) {
239 /* Ouch. We're in GC, or we wouldn't have got here.
240 And there's no space left. At all. */
241 printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
242 c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
243 list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
244 return -ENOSPC;
247 spin_unlock(&c->erase_completion_lock);
248 /* Don't wait for it; just erase one right now */
249 jffs2_erase_pending_blocks(c, 1);
250 spin_lock(&c->erase_completion_lock);
252 /* An erase may have failed, decreasing the
253 amount of free space available. So we must
254 restart from the beginning */
255 return -EAGAIN;
258 next = c->free_list.next;
259 list_del(next);
260 c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list);
261 c->nr_free_blocks--;
263 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
264 printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
265 goto restart;
268 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
269 enough space */
270 *ofs = jeb->offset + (c->sector_size - jeb->free_size);
271 *len = jeb->free_size;
273 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
274 !jeb->first_node->next_in_ino) {
275 /* Only node in it beforehand was a CLEANMARKER node (we think).
276 So mark it obsolete now that there's going to be another node
277 in the block. This will reduce used_size to zero but We've
278 already set c->nextblock so that jffs2_mark_node_obsolete()
279 won't try to refile it to the dirty_list.
281 spin_unlock(&c->erase_completion_lock);
282 jffs2_mark_node_obsolete(c, jeb->first_node);
283 spin_lock(&c->erase_completion_lock);
286 D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
287 return 0;
291 * jffs2_add_physical_node_ref - add a physical node reference to the list
292 * @c: superblock info
293 * @new: new node reference to add
294 * @len: length of this physical node
295 * @dirty: dirty flag for new node
297 * Should only be used to report nodes for which space has been allocated
298 * by jffs2_reserve_space.
300 * Must be called with the alloc_sem held.
303 int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
305 struct jffs2_eraseblock *jeb;
306 uint32_t len;
308 jeb = &c->blocks[new->flash_offset / c->sector_size];
309 len = ref_totlen(c, jeb, new);
311 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
312 #if 1
313 if (jeb != c->nextblock || (ref_offset(new)) != jeb->offset + (c->sector_size - jeb->free_size)) {
314 printk(KERN_WARNING "argh. node added in wrong place\n");
315 jffs2_free_raw_node_ref(new);
316 return -EINVAL;
318 #endif
319 spin_lock(&c->erase_completion_lock);
321 if (!jeb->first_node)
322 jeb->first_node = new;
323 if (jeb->last_node)
324 jeb->last_node->next_phys = new;
325 jeb->last_node = new;
327 jeb->free_size -= len;
328 c->free_size -= len;
329 if (ref_obsolete(new)) {
330 jeb->dirty_size += len;
331 c->dirty_size += len;
332 } else {
333 jeb->used_size += len;
334 c->used_size += len;
337 if (!jeb->free_size && !jeb->dirty_size) {
338 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
339 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
340 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
341 if (jffs2_wbuf_dirty(c)) {
342 /* Flush the last write in the block if it's outstanding */
343 spin_unlock(&c->erase_completion_lock);
344 jffs2_flush_wbuf_pad(c);
345 spin_lock(&c->erase_completion_lock);
348 list_add_tail(&jeb->list, &c->clean_list);
349 c->nextblock = NULL;
351 ACCT_SANITY_CHECK(c,jeb);
352 D1(ACCT_PARANOIA_CHECK(jeb));
354 spin_unlock(&c->erase_completion_lock);
356 return 0;
360 void jffs2_complete_reservation(struct jffs2_sb_info *c)
362 D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
363 jffs2_garbage_collect_trigger(c);
364 up(&c->alloc_sem);
367 static inline int on_list(struct list_head *obj, struct list_head *head)
369 struct list_head *this;
371 list_for_each(this, head) {
372 if (this == obj) {
373 D1(printk("%p is on list at %p\n", obj, head));
374 return 1;
378 return 0;
381 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
383 struct jffs2_eraseblock *jeb;
384 int blocknr;
385 struct jffs2_unknown_node n;
386 int ret, addedsize;
387 size_t retlen;
389 if(!ref) {
390 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
391 return;
393 if (ref_obsolete(ref)) {
394 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
395 return;
397 blocknr = ref->flash_offset / c->sector_size;
398 if (blocknr >= c->nr_blocks) {
399 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
400 BUG();
402 jeb = &c->blocks[blocknr];
404 spin_lock(&c->erase_completion_lock);
406 if (ref_flags(ref) == REF_UNCHECKED) {
407 D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) {
408 printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
409 ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
410 BUG();
412 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
413 jeb->unchecked_size -= ref_totlen(c, jeb, ref);
414 c->unchecked_size -= ref_totlen(c, jeb, ref);
415 } else {
416 D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) {
417 printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
418 ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
419 BUG();
421 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
422 jeb->used_size -= ref_totlen(c, jeb, ref);
423 c->used_size -= ref_totlen(c, jeb, ref);
426 // Take care, that wasted size is taken into concern
427 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) {
428 D1(printk("Dirtying\n"));
429 addedsize = ref_totlen(c, jeb, ref);
430 jeb->dirty_size += ref_totlen(c, jeb, ref);
431 c->dirty_size += ref_totlen(c, jeb, ref);
433 /* Convert wasted space to dirty, if not a bad block */
434 if (jeb->wasted_size) {
435 if (on_list(&jeb->list, &c->bad_used_list)) {
436 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
437 jeb->offset));
438 addedsize = 0; /* To fool the refiling code later */
439 } else {
440 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
441 jeb->wasted_size, jeb->offset));
442 addedsize += jeb->wasted_size;
443 jeb->dirty_size += jeb->wasted_size;
444 c->dirty_size += jeb->wasted_size;
445 c->wasted_size -= jeb->wasted_size;
446 jeb->wasted_size = 0;
449 } else {
450 D1(printk("Wasting\n"));
451 addedsize = 0;
452 jeb->wasted_size += ref_totlen(c, jeb, ref);
453 c->wasted_size += ref_totlen(c, jeb, ref);
455 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
457 ACCT_SANITY_CHECK(c, jeb);
459 D1(ACCT_PARANOIA_CHECK(jeb));
461 if (c->flags & JFFS2_SB_FLAG_MOUNTING) {
462 /* Mount in progress. Don't muck about with the block
463 lists because they're not ready yet, and don't actually
464 obliterate nodes that look obsolete. If they weren't
465 marked obsolete on the flash at the time they _became_
466 obsolete, there was probably a reason for that. */
467 spin_unlock(&c->erase_completion_lock);
468 return;
471 if (jeb == c->nextblock) {
472 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
473 } else if (!jeb->used_size && !jeb->unchecked_size) {
474 if (jeb == c->gcblock) {
475 D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
476 c->gcblock = NULL;
477 } else {
478 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
479 list_del(&jeb->list);
481 if (jffs2_wbuf_dirty(c)) {
482 D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
483 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
484 } else {
485 if (jiffies & 127) {
486 /* Most of the time, we just erase it immediately. Otherwise we
487 spend ages scanning it on mount, etc. */
488 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
489 list_add_tail(&jeb->list, &c->erase_pending_list);
490 c->nr_erasing_blocks++;
491 jffs2_erase_pending_trigger(c);
492 } else {
493 /* Sometimes, however, we leave it elsewhere so it doesn't get
494 immediately reused, and we spread the load a bit. */
495 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
496 list_add_tail(&jeb->list, &c->erasable_list);
499 D1(printk(KERN_DEBUG "Done OK\n"));
500 } else if (jeb == c->gcblock) {
501 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
502 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
503 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
504 list_del(&jeb->list);
505 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
506 list_add_tail(&jeb->list, &c->dirty_list);
507 } else if (VERYDIRTY(c, jeb->dirty_size) &&
508 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
509 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
510 list_del(&jeb->list);
511 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
512 list_add_tail(&jeb->list, &c->very_dirty_list);
513 } else {
514 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
515 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
518 spin_unlock(&c->erase_completion_lock);
520 if (!jffs2_can_mark_obsolete(c))
521 return;
522 if (jffs2_is_readonly(c))
523 return;
525 D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
526 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
527 if (ret) {
528 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
529 return;
531 if (retlen != sizeof(n)) {
532 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
533 return;
535 if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) {
536 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref));
537 return;
539 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
540 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
541 return;
543 /* XXX FIXME: This is ugly now */
544 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
545 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
546 if (ret) {
547 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
548 return;
550 if (retlen != sizeof(n)) {
551 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
552 return;
556 #if CONFIG_JFFS2_FS_DEBUG > 0
557 void jffs2_dump_block_lists(struct jffs2_sb_info *c)
561 printk(KERN_DEBUG "jffs2_dump_block_lists:\n");
562 printk(KERN_DEBUG "flash_size: %08x\n", c->flash_size);
563 printk(KERN_DEBUG "used_size: %08x\n", c->used_size);
564 printk(KERN_DEBUG "dirty_size: %08x\n", c->dirty_size);
565 printk(KERN_DEBUG "wasted_size: %08x\n", c->wasted_size);
566 printk(KERN_DEBUG "unchecked_size: %08x\n", c->unchecked_size);
567 printk(KERN_DEBUG "free_size: %08x\n", c->free_size);
568 printk(KERN_DEBUG "erasing_size: %08x\n", c->erasing_size);
569 printk(KERN_DEBUG "bad_size: %08x\n", c->bad_size);
570 printk(KERN_DEBUG "sector_size: %08x\n", c->sector_size);
571 printk(KERN_DEBUG "jffs2_reserved_blocks size: %08x\n",c->sector_size * c->resv_blocks_write);
573 if (c->nextblock) {
574 printk(KERN_DEBUG "nextblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
575 c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size);
576 } else {
577 printk(KERN_DEBUG "nextblock: NULL\n");
579 if (c->gcblock) {
580 printk(KERN_DEBUG "gcblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
581 c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size);
582 } else {
583 printk(KERN_DEBUG "gcblock: NULL\n");
585 if (list_empty(&c->clean_list)) {
586 printk(KERN_DEBUG "clean_list: empty\n");
587 } else {
588 struct list_head *this;
589 int numblocks = 0;
590 uint32_t dirty = 0;
592 list_for_each(this, &c->clean_list) {
593 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
594 numblocks ++;
595 dirty += jeb->wasted_size;
596 printk(KERN_DEBUG "clean_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
598 printk (KERN_DEBUG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks);
600 if (list_empty(&c->very_dirty_list)) {
601 printk(KERN_DEBUG "very_dirty_list: empty\n");
602 } else {
603 struct list_head *this;
604 int numblocks = 0;
605 uint32_t dirty = 0;
607 list_for_each(this, &c->very_dirty_list) {
608 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
609 numblocks ++;
610 dirty += jeb->dirty_size;
611 printk(KERN_DEBUG "very_dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
612 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
614 printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
615 numblocks, dirty, dirty / numblocks);
617 if (list_empty(&c->dirty_list)) {
618 printk(KERN_DEBUG "dirty_list: empty\n");
619 } else {
620 struct list_head *this;
621 int numblocks = 0;
622 uint32_t dirty = 0;
624 list_for_each(this, &c->dirty_list) {
625 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
626 numblocks ++;
627 dirty += jeb->dirty_size;
628 printk(KERN_DEBUG "dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
629 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
631 printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
632 numblocks, dirty, dirty / numblocks);
634 if (list_empty(&c->erasable_list)) {
635 printk(KERN_DEBUG "erasable_list: empty\n");
636 } else {
637 struct list_head *this;
639 list_for_each(this, &c->erasable_list) {
640 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
641 printk(KERN_DEBUG "erasable_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
642 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
645 if (list_empty(&c->erasing_list)) {
646 printk(KERN_DEBUG "erasing_list: empty\n");
647 } else {
648 struct list_head *this;
650 list_for_each(this, &c->erasing_list) {
651 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
652 printk(KERN_DEBUG "erasing_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
653 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
656 if (list_empty(&c->erase_pending_list)) {
657 printk(KERN_DEBUG "erase_pending_list: empty\n");
658 } else {
659 struct list_head *this;
661 list_for_each(this, &c->erase_pending_list) {
662 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
663 printk(KERN_DEBUG "erase_pending_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
664 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
667 if (list_empty(&c->erasable_pending_wbuf_list)) {
668 printk(KERN_DEBUG "erasable_pending_wbuf_list: empty\n");
669 } else {
670 struct list_head *this;
672 list_for_each(this, &c->erasable_pending_wbuf_list) {
673 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
674 printk(KERN_DEBUG "erasable_pending_wbuf_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
675 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
678 if (list_empty(&c->free_list)) {
679 printk(KERN_DEBUG "free_list: empty\n");
680 } else {
681 struct list_head *this;
683 list_for_each(this, &c->free_list) {
684 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
685 printk(KERN_DEBUG "free_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
686 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
689 if (list_empty(&c->bad_list)) {
690 printk(KERN_DEBUG "bad_list: empty\n");
691 } else {
692 struct list_head *this;
694 list_for_each(this, &c->bad_list) {
695 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
696 printk(KERN_DEBUG "bad_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
697 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
700 if (list_empty(&c->bad_used_list)) {
701 printk(KERN_DEBUG "bad_used_list: empty\n");
702 } else {
703 struct list_head *this;
705 list_for_each(this, &c->bad_used_list) {
706 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
707 printk(KERN_DEBUG "bad_used_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
708 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
712 #endif /* CONFIG_JFFS2_FS_DEBUG */
714 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
716 int ret = 0;
717 uint32_t dirty;
719 if (c->unchecked_size) {
720 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
721 c->unchecked_size, c->checked_ino));
722 return 1;
725 /* dirty_size contains blocks on erase_pending_list
726 * those blocks are counted in c->nr_erasing_blocks.
727 * If one block is actually erased, it is not longer counted as dirty_space
728 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
729 * with c->nr_erasing_blocks * c->sector_size again.
730 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
731 * This helps us to force gc and pick eventually a clean block to spread the load.
733 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
735 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
736 (dirty > c->nospc_dirty_size))
737 ret = 1;
739 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
740 c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
742 return ret;