[JFFS2] Reduce calls to ref_totlen() in jffs2_mark_node_obsolete()
[linux-2.6/btrfs-unstable.git] / fs / jffs2 / nodemgmt.c
blob0e1f58aa606c8fb88ec52752e5fcad4164bee8d6
1 /*
2 * JFFS2 -- Journalling Flash File System, Version 2.
4 * Copyright (C) 2001-2003 Red Hat, Inc.
6 * Created by David Woodhouse <dwmw2@infradead.org>
8 * For licensing information, see the file 'LICENCE' in this directory.
10 * $Id: nodemgmt.c,v 1.127 2005/09/20 15:49:12 dedekind Exp $
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/compiler.h>
18 #include <linux/sched.h> /* For cond_resched() */
19 #include "nodelist.h"
20 #include "debug.h"
22 /**
23 * jffs2_reserve_space - request physical space to write nodes to flash
24 * @c: superblock info
25 * @minsize: Minimum acceptable size of allocation
26 * @ofs: Returned value of node offset
27 * @len: Returned value of allocation length
28 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
30 * Requests a block of physical space on the flash. Returns zero for success
31 * and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC
32 * or other error if appropriate.
34 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
35 * allocation semaphore, to prevent more than one allocation from being
36 * active at any time. The semaphore is later released by jffs2_commit_allocation()
38 * jffs2_reserve_space() may trigger garbage collection in order to make room
39 * for the requested allocation.
42 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
43 uint32_t *ofs, uint32_t *len, uint32_t sumsize);
45 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
46 uint32_t *len, int prio, uint32_t sumsize)
48 int ret = -EAGAIN;
49 int blocksneeded = c->resv_blocks_write;
50 /* align it */
51 minsize = PAD(minsize);
53 D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
54 down(&c->alloc_sem);
56 D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
58 spin_lock(&c->erase_completion_lock);
60 /* this needs a little more thought (true <tglx> :)) */
61 while(ret == -EAGAIN) {
62 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
63 int ret;
64 uint32_t dirty, avail;
66 /* calculate real dirty size
67 * dirty_size contains blocks on erase_pending_list
68 * those blocks are counted in c->nr_erasing_blocks.
69 * If one block is actually erased, it is not longer counted as dirty_space
70 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
71 * with c->nr_erasing_blocks * c->sector_size again.
72 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
73 * This helps us to force gc and pick eventually a clean block to spread the load.
74 * We add unchecked_size here, as we hopefully will find some space to use.
75 * This will affect the sum only once, as gc first finishes checking
76 * of nodes.
78 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
79 if (dirty < c->nospc_dirty_size) {
80 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
81 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
82 break;
84 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
85 dirty, c->unchecked_size, c->sector_size));
87 spin_unlock(&c->erase_completion_lock);
88 up(&c->alloc_sem);
89 return -ENOSPC;
92 /* Calc possibly available space. Possibly available means that we
93 * don't know, if unchecked size contains obsoleted nodes, which could give us some
94 * more usable space. This will affect the sum only once, as gc first finishes checking
95 * of nodes.
96 + Return -ENOSPC, if the maximum possibly available space is less or equal than
97 * blocksneeded * sector_size.
98 * This blocks endless gc looping on a filesystem, which is nearly full, even if
99 * the check above passes.
101 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
102 if ( (avail / c->sector_size) <= blocksneeded) {
103 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
104 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
105 break;
108 D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
109 avail, blocksneeded * c->sector_size));
110 spin_unlock(&c->erase_completion_lock);
111 up(&c->alloc_sem);
112 return -ENOSPC;
115 up(&c->alloc_sem);
117 D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
118 c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
119 c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
120 spin_unlock(&c->erase_completion_lock);
122 ret = jffs2_garbage_collect_pass(c);
123 if (ret)
124 return ret;
126 cond_resched();
128 if (signal_pending(current))
129 return -EINTR;
131 down(&c->alloc_sem);
132 spin_lock(&c->erase_completion_lock);
135 ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize);
136 if (ret) {
137 D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
140 spin_unlock(&c->erase_completion_lock);
141 if (ret)
142 up(&c->alloc_sem);
143 return ret;
146 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
147 uint32_t *len, uint32_t sumsize)
149 int ret = -EAGAIN;
150 minsize = PAD(minsize);
152 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
154 spin_lock(&c->erase_completion_lock);
155 while(ret == -EAGAIN) {
156 ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize);
157 if (ret) {
158 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
161 spin_unlock(&c->erase_completion_lock);
162 return ret;
166 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
168 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
171 /* Check, if we have a dirty block now, or if it was dirty already */
172 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
173 c->dirty_size += jeb->wasted_size;
174 c->wasted_size -= jeb->wasted_size;
175 jeb->dirty_size += jeb->wasted_size;
176 jeb->wasted_size = 0;
177 if (VERYDIRTY(c, jeb->dirty_size)) {
178 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
179 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
180 list_add_tail(&jeb->list, &c->very_dirty_list);
181 } else {
182 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
183 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
184 list_add_tail(&jeb->list, &c->dirty_list);
186 } else {
187 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
188 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
189 list_add_tail(&jeb->list, &c->clean_list);
191 c->nextblock = NULL;
195 /* Select a new jeb for nextblock */
197 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
199 struct list_head *next;
201 /* Take the next block off the 'free' list */
203 if (list_empty(&c->free_list)) {
205 if (!c->nr_erasing_blocks &&
206 !list_empty(&c->erasable_list)) {
207 struct jffs2_eraseblock *ejeb;
209 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
210 list_del(&ejeb->list);
211 list_add_tail(&ejeb->list, &c->erase_pending_list);
212 c->nr_erasing_blocks++;
213 jffs2_erase_pending_trigger(c);
214 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
215 ejeb->offset));
218 if (!c->nr_erasing_blocks &&
219 !list_empty(&c->erasable_pending_wbuf_list)) {
220 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
221 /* c->nextblock is NULL, no update to c->nextblock allowed */
222 spin_unlock(&c->erase_completion_lock);
223 jffs2_flush_wbuf_pad(c);
224 spin_lock(&c->erase_completion_lock);
225 /* Have another go. It'll be on the erasable_list now */
226 return -EAGAIN;
229 if (!c->nr_erasing_blocks) {
230 /* Ouch. We're in GC, or we wouldn't have got here.
231 And there's no space left. At all. */
232 printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
233 c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
234 list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
235 return -ENOSPC;
238 spin_unlock(&c->erase_completion_lock);
239 /* Don't wait for it; just erase one right now */
240 jffs2_erase_pending_blocks(c, 1);
241 spin_lock(&c->erase_completion_lock);
243 /* An erase may have failed, decreasing the
244 amount of free space available. So we must
245 restart from the beginning */
246 return -EAGAIN;
249 next = c->free_list.next;
250 list_del(next);
251 c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
252 c->nr_free_blocks--;
254 jffs2_sum_reset_collected(c->summary); /* reset collected summary */
256 D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
258 return 0;
261 /* Called with alloc sem _and_ erase_completion_lock */
262 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, uint32_t sumsize)
264 struct jffs2_eraseblock *jeb = c->nextblock;
265 uint32_t reserved_size; /* for summary information at the end of the jeb */
266 int ret;
268 restart:
269 reserved_size = 0;
271 if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
272 /* NOSUM_SIZE means not to generate summary */
274 if (jeb) {
275 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
276 dbg_summary("minsize=%d , jeb->free=%d ,"
277 "summary->size=%d , sumsize=%d\n",
278 minsize, jeb->free_size,
279 c->summary->sum_size, sumsize);
282 /* Is there enough space for writing out the current node, or we have to
283 write out summary information now, close this jeb and select new nextblock? */
284 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
285 JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
287 /* Has summary been disabled for this jeb? */
288 if (jffs2_sum_is_disabled(c->summary)) {
289 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
290 goto restart;
293 /* Writing out the collected summary information */
294 dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
295 ret = jffs2_sum_write_sumnode(c);
297 if (ret)
298 return ret;
300 if (jffs2_sum_is_disabled(c->summary)) {
301 /* jffs2_write_sumnode() couldn't write out the summary information
302 diabling summary for this jeb and free the collected information
304 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
305 goto restart;
308 jffs2_close_nextblock(c, jeb);
309 jeb = NULL;
310 /* keep always valid value in reserved_size */
311 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
313 } else {
314 if (jeb && minsize > jeb->free_size) {
315 /* Skip the end of this block and file it as having some dirty space */
316 /* If there's a pending write to it, flush now */
318 if (jffs2_wbuf_dirty(c)) {
319 spin_unlock(&c->erase_completion_lock);
320 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
321 jffs2_flush_wbuf_pad(c);
322 spin_lock(&c->erase_completion_lock);
323 jeb = c->nextblock;
324 goto restart;
327 c->wasted_size += jeb->free_size;
328 c->free_size -= jeb->free_size;
329 jeb->wasted_size += jeb->free_size;
330 jeb->free_size = 0;
332 jffs2_close_nextblock(c, jeb);
333 jeb = NULL;
337 if (!jeb) {
339 ret = jffs2_find_nextblock(c);
340 if (ret)
341 return ret;
343 jeb = c->nextblock;
345 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
346 printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
347 goto restart;
350 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
351 enough space */
352 *ofs = jeb->offset + (c->sector_size - jeb->free_size);
353 *len = jeb->free_size - reserved_size;
355 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
356 !jeb->first_node->next_in_ino) {
357 /* Only node in it beforehand was a CLEANMARKER node (we think).
358 So mark it obsolete now that there's going to be another node
359 in the block. This will reduce used_size to zero but We've
360 already set c->nextblock so that jffs2_mark_node_obsolete()
361 won't try to refile it to the dirty_list.
363 spin_unlock(&c->erase_completion_lock);
364 jffs2_mark_node_obsolete(c, jeb->first_node);
365 spin_lock(&c->erase_completion_lock);
368 D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
369 return 0;
373 * jffs2_add_physical_node_ref - add a physical node reference to the list
374 * @c: superblock info
375 * @new: new node reference to add
376 * @len: length of this physical node
377 * @dirty: dirty flag for new node
379 * Should only be used to report nodes for which space has been allocated
380 * by jffs2_reserve_space.
382 * Must be called with the alloc_sem held.
385 int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
387 struct jffs2_eraseblock *jeb;
388 uint32_t len;
390 jeb = &c->blocks[new->flash_offset / c->sector_size];
391 len = ref_totlen(c, jeb, new);
393 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
394 #if 1
395 /* we could get some obsolete nodes after nextblock was refiled
396 in wbuf.c */
397 if ((c->nextblock || !ref_obsolete(new))
398 &&(jeb != c->nextblock || ref_offset(new) != jeb->offset + (c->sector_size - jeb->free_size))) {
399 printk(KERN_WARNING "argh. node added in wrong place\n");
400 jffs2_free_raw_node_ref(new);
401 return -EINVAL;
403 #endif
404 spin_lock(&c->erase_completion_lock);
406 if (!jeb->first_node)
407 jeb->first_node = new;
408 if (jeb->last_node)
409 jeb->last_node->next_phys = new;
410 jeb->last_node = new;
412 jeb->free_size -= len;
413 c->free_size -= len;
414 if (ref_obsolete(new)) {
415 jeb->dirty_size += len;
416 c->dirty_size += len;
417 } else {
418 jeb->used_size += len;
419 c->used_size += len;
422 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
423 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
424 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
425 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
426 if (jffs2_wbuf_dirty(c)) {
427 /* Flush the last write in the block if it's outstanding */
428 spin_unlock(&c->erase_completion_lock);
429 jffs2_flush_wbuf_pad(c);
430 spin_lock(&c->erase_completion_lock);
433 list_add_tail(&jeb->list, &c->clean_list);
434 c->nextblock = NULL;
436 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
437 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
439 spin_unlock(&c->erase_completion_lock);
441 return 0;
445 void jffs2_complete_reservation(struct jffs2_sb_info *c)
447 D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
448 jffs2_garbage_collect_trigger(c);
449 up(&c->alloc_sem);
452 static inline int on_list(struct list_head *obj, struct list_head *head)
454 struct list_head *this;
456 list_for_each(this, head) {
457 if (this == obj) {
458 D1(printk("%p is on list at %p\n", obj, head));
459 return 1;
463 return 0;
466 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
468 struct jffs2_eraseblock *jeb;
469 int blocknr;
470 struct jffs2_unknown_node n;
471 int ret, addedsize;
472 size_t retlen;
473 uint32_t freed_len;
475 if(!ref) {
476 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
477 return;
479 if (ref_obsolete(ref)) {
480 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
481 return;
483 blocknr = ref->flash_offset / c->sector_size;
484 if (blocknr >= c->nr_blocks) {
485 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
486 BUG();
488 jeb = &c->blocks[blocknr];
490 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
491 !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
492 /* Hm. This may confuse static lock analysis. If any of the above
493 three conditions is false, we're going to return from this
494 function without actually obliterating any nodes or freeing
495 any jffs2_raw_node_refs. So we don't need to stop erases from
496 happening, or protect against people holding an obsolete
497 jffs2_raw_node_ref without the erase_completion_lock. */
498 down(&c->erase_free_sem);
501 spin_lock(&c->erase_completion_lock);
503 freed_len = ref_totlen(c, jeb, ref);
505 if (ref_flags(ref) == REF_UNCHECKED) {
506 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
507 printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
508 freed_len, blocknr, ref->flash_offset, jeb->used_size);
509 BUG();
511 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
512 jeb->unchecked_size -= freed_len;
513 c->unchecked_size -= freed_len;
514 } else {
515 D1(if (unlikely(jeb->used_size < freed_len)) {
516 printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
517 freed_len, blocknr, ref->flash_offset, jeb->used_size);
518 BUG();
520 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
521 jeb->used_size -= freed_len;
522 c->used_size -= freed_len;
525 // Take care, that wasted size is taken into concern
526 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
527 D1(printk(KERN_DEBUG "Dirtying\n"));
528 addedsize = freed_len;
529 jeb->dirty_size += freed_len;
530 c->dirty_size += freed_len;
532 /* Convert wasted space to dirty, if not a bad block */
533 if (jeb->wasted_size) {
534 if (on_list(&jeb->list, &c->bad_used_list)) {
535 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
536 jeb->offset));
537 addedsize = 0; /* To fool the refiling code later */
538 } else {
539 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
540 jeb->wasted_size, jeb->offset));
541 addedsize += jeb->wasted_size;
542 jeb->dirty_size += jeb->wasted_size;
543 c->dirty_size += jeb->wasted_size;
544 c->wasted_size -= jeb->wasted_size;
545 jeb->wasted_size = 0;
548 } else {
549 D1(printk(KERN_DEBUG "Wasting\n"));
550 addedsize = 0;
551 jeb->wasted_size += freed_len;
552 c->wasted_size += freed_len;
554 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
556 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
557 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
559 if (c->flags & JFFS2_SB_FLAG_SCANNING) {
560 /* Flash scanning is in progress. Don't muck about with the block
561 lists because they're not ready yet, and don't actually
562 obliterate nodes that look obsolete. If they weren't
563 marked obsolete on the flash at the time they _became_
564 obsolete, there was probably a reason for that. */
565 spin_unlock(&c->erase_completion_lock);
566 /* We didn't lock the erase_free_sem */
567 return;
570 if (jeb == c->nextblock) {
571 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
572 } else if (!jeb->used_size && !jeb->unchecked_size) {
573 if (jeb == c->gcblock) {
574 D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
575 c->gcblock = NULL;
576 } else {
577 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
578 list_del(&jeb->list);
580 if (jffs2_wbuf_dirty(c)) {
581 D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
582 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
583 } else {
584 if (jiffies & 127) {
585 /* Most of the time, we just erase it immediately. Otherwise we
586 spend ages scanning it on mount, etc. */
587 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
588 list_add_tail(&jeb->list, &c->erase_pending_list);
589 c->nr_erasing_blocks++;
590 jffs2_erase_pending_trigger(c);
591 } else {
592 /* Sometimes, however, we leave it elsewhere so it doesn't get
593 immediately reused, and we spread the load a bit. */
594 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
595 list_add_tail(&jeb->list, &c->erasable_list);
598 D1(printk(KERN_DEBUG "Done OK\n"));
599 } else if (jeb == c->gcblock) {
600 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
601 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
602 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
603 list_del(&jeb->list);
604 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
605 list_add_tail(&jeb->list, &c->dirty_list);
606 } else if (VERYDIRTY(c, jeb->dirty_size) &&
607 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
608 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
609 list_del(&jeb->list);
610 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
611 list_add_tail(&jeb->list, &c->very_dirty_list);
612 } else {
613 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
614 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
617 spin_unlock(&c->erase_completion_lock);
619 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
620 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
621 /* We didn't lock the erase_free_sem */
622 return;
625 /* The erase_free_sem is locked, and has been since before we marked the node obsolete
626 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
627 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
628 by jffs2_free_all_node_refs() in erase.c. Which is nice. */
630 D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
631 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
632 if (ret) {
633 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
634 goto out_erase_sem;
636 if (retlen != sizeof(n)) {
637 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
638 goto out_erase_sem;
640 if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
641 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
642 goto out_erase_sem;
644 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
645 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
646 goto out_erase_sem;
648 /* XXX FIXME: This is ugly now */
649 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
650 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
651 if (ret) {
652 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
653 goto out_erase_sem;
655 if (retlen != sizeof(n)) {
656 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
657 goto out_erase_sem;
660 /* Nodes which have been marked obsolete no longer need to be
661 associated with any inode. Remove them from the per-inode list.
663 Note we can't do this for NAND at the moment because we need
664 obsolete dirent nodes to stay on the lists, because of the
665 horridness in jffs2_garbage_collect_deletion_dirent(). Also
666 because we delete the inocache, and on NAND we need that to
667 stay around until all the nodes are actually erased, in order
668 to stop us from giving the same inode number to another newly
669 created inode. */
670 if (ref->next_in_ino) {
671 struct jffs2_inode_cache *ic;
672 struct jffs2_raw_node_ref **p;
674 spin_lock(&c->erase_completion_lock);
676 ic = jffs2_raw_ref_to_ic(ref);
677 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
680 *p = ref->next_in_ino;
681 ref->next_in_ino = NULL;
683 if (ic->nodes == (void *)ic && ic->nlink == 0)
684 jffs2_del_ino_cache(c, ic);
686 spin_unlock(&c->erase_completion_lock);
690 /* Merge with the next node in the physical list, if there is one
691 and if it's also obsolete and if it doesn't belong to any inode */
692 if (ref->next_phys && ref_obsolete(ref->next_phys) &&
693 !ref->next_phys->next_in_ino) {
694 struct jffs2_raw_node_ref *n = ref->next_phys;
696 spin_lock(&c->erase_completion_lock);
698 ref->__totlen += n->__totlen;
699 ref->next_phys = n->next_phys;
700 if (jeb->last_node == n) jeb->last_node = ref;
701 if (jeb->gc_node == n) {
702 /* gc will be happy continuing gc on this node */
703 jeb->gc_node=ref;
705 spin_unlock(&c->erase_completion_lock);
707 jffs2_free_raw_node_ref(n);
710 /* Also merge with the previous node in the list, if there is one
711 and that one is obsolete */
712 if (ref != jeb->first_node ) {
713 struct jffs2_raw_node_ref *p = jeb->first_node;
715 spin_lock(&c->erase_completion_lock);
717 while (p->next_phys != ref)
718 p = p->next_phys;
720 if (ref_obsolete(p) && !ref->next_in_ino) {
721 p->__totlen += ref->__totlen;
722 if (jeb->last_node == ref) {
723 jeb->last_node = p;
725 if (jeb->gc_node == ref) {
726 /* gc will be happy continuing gc on this node */
727 jeb->gc_node=p;
729 p->next_phys = ref->next_phys;
730 jffs2_free_raw_node_ref(ref);
732 spin_unlock(&c->erase_completion_lock);
734 out_erase_sem:
735 up(&c->erase_free_sem);
738 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
740 int ret = 0;
741 uint32_t dirty;
743 if (c->unchecked_size) {
744 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
745 c->unchecked_size, c->checked_ino));
746 return 1;
749 /* dirty_size contains blocks on erase_pending_list
750 * those blocks are counted in c->nr_erasing_blocks.
751 * If one block is actually erased, it is not longer counted as dirty_space
752 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
753 * with c->nr_erasing_blocks * c->sector_size again.
754 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
755 * This helps us to force gc and pick eventually a clean block to spread the load.
757 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
759 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
760 (dirty > c->nospc_dirty_size))
761 ret = 1;
763 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
764 c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
766 return ret;