[JFFS2] Extend jffs2_link_node_ref() to link into per-inode list too.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / jffs2 / wbuf.c
blob62f685faeba8407c4c64e3f31ef55075d0db6b7d
1 /*
2 * JFFS2 -- Journalling Flash File System, Version 2.
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 * Copyright (C) 2004 Thomas Gleixner <tglx@linutronix.de>
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
10 * For licensing information, see the file 'LICENCE' in this directory.
12 * $Id: wbuf.c,v 1.100 2005/09/30 13:59:13 dedekind Exp $
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/crc32.h>
20 #include <linux/mtd/nand.h>
21 #include <linux/jiffies.h>
23 #include "nodelist.h"
25 /* For testing write failures */
26 #undef BREAKME
27 #undef BREAKMEHEADER
29 #ifdef BREAKME
30 static unsigned char *brokenbuf;
31 #endif
33 #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
34 #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
36 /* max. erase failures before we mark a block bad */
37 #define MAX_ERASE_FAILURES 2
39 struct jffs2_inodirty {
40 uint32_t ino;
41 struct jffs2_inodirty *next;
44 static struct jffs2_inodirty inodirty_nomem;
46 static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
48 struct jffs2_inodirty *this = c->wbuf_inodes;
50 /* If a malloc failed, consider _everything_ dirty */
51 if (this == &inodirty_nomem)
52 return 1;
54 /* If ino == 0, _any_ non-GC writes mean 'yes' */
55 if (this && !ino)
56 return 1;
58 /* Look to see if the inode in question is pending in the wbuf */
59 while (this) {
60 if (this->ino == ino)
61 return 1;
62 this = this->next;
64 return 0;
67 static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
69 struct jffs2_inodirty *this;
71 this = c->wbuf_inodes;
73 if (this != &inodirty_nomem) {
74 while (this) {
75 struct jffs2_inodirty *next = this->next;
76 kfree(this);
77 this = next;
80 c->wbuf_inodes = NULL;
83 static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
85 struct jffs2_inodirty *new;
87 /* Mark the superblock dirty so that kupdated will flush... */
88 jffs2_erase_pending_trigger(c);
90 if (jffs2_wbuf_pending_for_ino(c, ino))
91 return;
93 new = kmalloc(sizeof(*new), GFP_KERNEL);
94 if (!new) {
95 D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n"));
96 jffs2_clear_wbuf_ino_list(c);
97 c->wbuf_inodes = &inodirty_nomem;
98 return;
100 new->ino = ino;
101 new->next = c->wbuf_inodes;
102 c->wbuf_inodes = new;
103 return;
106 static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
108 struct list_head *this, *next;
109 static int n;
111 if (list_empty(&c->erasable_pending_wbuf_list))
112 return;
114 list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
115 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
117 D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset));
118 list_del(this);
119 if ((jiffies + (n++)) & 127) {
120 /* Most of the time, we just erase it immediately. Otherwise we
121 spend ages scanning it on mount, etc. */
122 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
123 list_add_tail(&jeb->list, &c->erase_pending_list);
124 c->nr_erasing_blocks++;
125 jffs2_erase_pending_trigger(c);
126 } else {
127 /* Sometimes, however, we leave it elsewhere so it doesn't get
128 immediately reused, and we spread the load a bit. */
129 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
130 list_add_tail(&jeb->list, &c->erasable_list);
135 #define REFILE_NOTEMPTY 0
136 #define REFILE_ANYWAY 1
138 static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
140 D1(printk("About to refile bad block at %08x\n", jeb->offset));
142 /* File the existing block on the bad_used_list.... */
143 if (c->nextblock == jeb)
144 c->nextblock = NULL;
145 else /* Not sure this should ever happen... need more coffee */
146 list_del(&jeb->list);
147 if (jeb->first_node) {
148 D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
149 list_add(&jeb->list, &c->bad_used_list);
150 } else {
151 BUG_ON(allow_empty == REFILE_NOTEMPTY);
152 /* It has to have had some nodes or we couldn't be here */
153 D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
154 list_add(&jeb->list, &c->erase_pending_list);
155 c->nr_erasing_blocks++;
156 jffs2_erase_pending_trigger(c);
159 /* Adjust its size counts accordingly */
160 c->wasted_size += jeb->free_size;
161 c->free_size -= jeb->free_size;
162 jeb->wasted_size += jeb->free_size;
163 jeb->free_size = 0;
165 jffs2_dbg_dump_block_lists_nolock(c);
166 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
167 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
170 /* Recover from failure to write wbuf. Recover the nodes up to the
171 * wbuf, not the one which we were starting to try to write. */
173 static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
175 struct jffs2_eraseblock *jeb, *new_jeb;
176 struct jffs2_raw_node_ref **first_raw, **raw;
177 size_t retlen;
178 int ret;
179 unsigned char *buf;
180 uint32_t start, end, ofs, len;
182 spin_lock(&c->erase_completion_lock);
184 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
186 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
188 /* Find the first node to be recovered, by skipping over every
189 node which ends before the wbuf starts, or which is obsolete. */
190 first_raw = &jeb->first_node;
191 while (*first_raw &&
192 (ref_obsolete(*first_raw) ||
193 (ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) {
194 D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
195 ref_offset(*first_raw), ref_flags(*first_raw),
196 (ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw)),
197 c->wbuf_ofs));
198 first_raw = &(*first_raw)->next_phys;
201 if (!*first_raw) {
202 /* All nodes were obsolete. Nothing to recover. */
203 D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
204 spin_unlock(&c->erase_completion_lock);
205 return;
208 start = ref_offset(*first_raw);
209 end = ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw);
211 /* Find the last node to be recovered */
212 raw = first_raw;
213 while ((*raw)) {
214 if (!ref_obsolete(*raw))
215 end = ref_offset(*raw) + ref_totlen(c, jeb, *raw);
217 raw = &(*raw)->next_phys;
219 spin_unlock(&c->erase_completion_lock);
221 D1(printk(KERN_DEBUG "wbuf recover %08x-%08x\n", start, end));
223 buf = NULL;
224 if (start < c->wbuf_ofs) {
225 /* First affected node was already partially written.
226 * Attempt to reread the old data into our buffer. */
228 buf = kmalloc(end - start, GFP_KERNEL);
229 if (!buf) {
230 printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
232 goto read_failed;
235 /* Do the read... */
236 if (jffs2_cleanmarker_oob(c))
237 ret = c->mtd->read_ecc(c->mtd, start, c->wbuf_ofs - start, &retlen, buf, NULL, c->oobinfo);
238 else
239 ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
241 if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) {
242 /* ECC recovered */
243 ret = 0;
245 if (ret || retlen != c->wbuf_ofs - start) {
246 printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
248 kfree(buf);
249 buf = NULL;
250 read_failed:
251 first_raw = &(*first_raw)->next_phys;
252 /* If this was the only node to be recovered, give up */
253 if (!(*first_raw))
254 return;
256 /* It wasn't. Go on and try to recover nodes complete in the wbuf */
257 start = ref_offset(*first_raw);
258 } else {
259 /* Read succeeded. Copy the remaining data from the wbuf */
260 memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
263 /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
264 Either 'buf' contains the data, or we find it in the wbuf */
267 /* ... and get an allocation of space from a shiny new block instead */
268 ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len, JFFS2_SUMMARY_NOSUM_SIZE);
269 if (ret) {
270 printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
271 kfree(buf);
272 return;
274 if (end-start >= c->wbuf_pagesize) {
275 /* Need to do another write immediately, but it's possible
276 that this is just because the wbuf itself is completely
277 full, and there's nothing earlier read back from the
278 flash. Hence 'buf' isn't necessarily what we're writing
279 from. */
280 unsigned char *rewrite_buf = buf?:c->wbuf;
281 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
283 D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
284 towrite, ofs));
286 #ifdef BREAKMEHEADER
287 static int breakme;
288 if (breakme++ == 20) {
289 printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
290 breakme = 0;
291 c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen,
292 brokenbuf, NULL, c->oobinfo);
293 ret = -EIO;
294 } else
295 #endif
296 if (jffs2_cleanmarker_oob(c))
297 ret = c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen,
298 rewrite_buf, NULL, c->oobinfo);
299 else
300 ret = c->mtd->write(c->mtd, ofs, towrite, &retlen, rewrite_buf);
302 if (ret || retlen != towrite) {
303 /* Argh. We tried. Really we did. */
304 printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
305 kfree(buf);
307 if (retlen) {
308 struct jffs2_raw_node_ref *raw2;
310 raw2 = jffs2_alloc_raw_node_ref();
311 if (!raw2)
312 return;
314 raw2->flash_offset = ofs | REF_OBSOLETE;
316 jffs2_add_physical_node_ref(c, raw2, ref_totlen(c, jeb, *first_raw), NULL);
318 return;
320 printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
322 c->wbuf_len = (end - start) - towrite;
323 c->wbuf_ofs = ofs + towrite;
324 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
325 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
326 kfree(buf);
327 } else {
328 /* OK, now we're left with the dregs in whichever buffer we're using */
329 if (buf) {
330 memcpy(c->wbuf, buf, end-start);
331 kfree(buf);
332 } else {
333 memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
335 c->wbuf_ofs = ofs;
336 c->wbuf_len = end - start;
339 /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
340 new_jeb = &c->blocks[ofs / c->sector_size];
342 spin_lock(&c->erase_completion_lock);
343 if (new_jeb->first_node) {
344 /* Odd, but possible with ST flash later maybe */
345 new_jeb->last_node->next_phys = *first_raw;
346 } else {
347 new_jeb->first_node = *first_raw;
350 raw = first_raw;
351 while (*raw) {
352 uint32_t rawlen = ref_totlen(c, jeb, *raw);
354 D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
355 rawlen, ref_offset(*raw), ref_flags(*raw), ofs));
357 if (ref_obsolete(*raw)) {
358 /* Shouldn't really happen much */
359 new_jeb->dirty_size += rawlen;
360 new_jeb->free_size -= rawlen;
361 c->dirty_size += rawlen;
362 } else {
363 new_jeb->used_size += rawlen;
364 new_jeb->free_size -= rawlen;
365 jeb->dirty_size += rawlen;
366 jeb->used_size -= rawlen;
367 c->dirty_size += rawlen;
369 c->free_size -= rawlen;
370 (*raw)->flash_offset = ofs | ref_flags(*raw);
371 ofs += rawlen;
372 new_jeb->last_node = *raw;
374 raw = &(*raw)->next_phys;
377 /* Fix up the original jeb now it's on the bad_list */
378 *first_raw = NULL;
379 if (first_raw == &jeb->first_node) {
380 jeb->last_node = NULL;
381 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
382 list_del(&jeb->list);
383 list_add(&jeb->list, &c->erase_pending_list);
384 c->nr_erasing_blocks++;
385 jffs2_erase_pending_trigger(c);
387 else
388 jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys);
390 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
391 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
393 jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
394 jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
396 spin_unlock(&c->erase_completion_lock);
398 D1(printk(KERN_DEBUG "wbuf recovery completed OK\n"));
401 /* Meaning of pad argument:
402 0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
403 1: Pad, do not adjust nextblock free_size
404 2: Pad, adjust nextblock free_size
406 #define NOPAD 0
407 #define PAD_NOACCOUNT 1
408 #define PAD_ACCOUNTING 2
410 static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
412 int ret;
413 size_t retlen;
415 /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
416 del_timer() the timer we never initialised. */
417 if (!jffs2_is_writebuffered(c))
418 return 0;
420 if (!down_trylock(&c->alloc_sem)) {
421 up(&c->alloc_sem);
422 printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
423 BUG();
426 if (!c->wbuf_len) /* already checked c->wbuf above */
427 return 0;
429 /* claim remaining space on the page
430 this happens, if we have a change to a new block,
431 or if fsync forces us to flush the writebuffer.
432 if we have a switch to next page, we will not have
433 enough remaining space for this.
435 if (pad ) {
436 c->wbuf_len = PAD(c->wbuf_len);
438 /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR
439 with 8 byte page size */
440 memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
442 if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
443 struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
444 padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
445 padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
446 padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
447 padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
450 /* else jffs2_flash_writev has actually filled in the rest of the
451 buffer for us, and will deal with the node refs etc. later. */
453 #ifdef BREAKME
454 static int breakme;
455 if (breakme++ == 20) {
456 printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
457 breakme = 0;
458 c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
459 &retlen, brokenbuf, NULL, c->oobinfo);
460 ret = -EIO;
461 } else
462 #endif
464 if (jffs2_cleanmarker_oob(c))
465 ret = c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf, NULL, c->oobinfo);
466 else
467 ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
469 if (ret || retlen != c->wbuf_pagesize) {
470 if (ret)
471 printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n",ret);
472 else {
473 printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
474 retlen, c->wbuf_pagesize);
475 ret = -EIO;
478 jffs2_wbuf_recover(c);
480 return ret;
483 /* Adjust free size of the block if we padded. */
484 if (pad) {
485 struct jffs2_eraseblock *jeb;
486 struct jffs2_raw_node_ref *ref;
487 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
489 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
491 D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
492 (jeb==c->nextblock)?"next":"", jeb->offset));
494 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
495 padded. If there is less free space in the block than that,
496 something screwed up */
497 if (jeb->free_size < waste) {
498 printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
499 c->wbuf_ofs, c->wbuf_len, waste);
500 printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
501 jeb->offset, jeb->free_size);
502 BUG();
504 ref = jffs2_alloc_raw_node_ref();
505 if (!ref)
506 return -ENOMEM;
507 ref->flash_offset = c->wbuf_ofs + c->wbuf_len;
508 ref->flash_offset |= REF_OBSOLETE;
510 spin_lock(&c->erase_completion_lock);
512 jffs2_link_node_ref(c, jeb, ref, waste, NULL);
513 /* FIXME: that made it count as dirty. Convert to wasted */
514 jeb->dirty_size -= waste;
515 c->dirty_size -= waste;
516 jeb->wasted_size += waste;
517 c->wasted_size += waste;
518 } else
519 spin_lock(&c->erase_completion_lock);
521 /* Stick any now-obsoleted blocks on the erase_pending_list */
522 jffs2_refile_wbuf_blocks(c);
523 jffs2_clear_wbuf_ino_list(c);
524 spin_unlock(&c->erase_completion_lock);
526 memset(c->wbuf,0xff,c->wbuf_pagesize);
527 /* adjust write buffer offset, else we get a non contiguous write bug */
528 c->wbuf_ofs += c->wbuf_pagesize;
529 c->wbuf_len = 0;
530 return 0;
533 /* Trigger garbage collection to flush the write-buffer.
534 If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
535 outstanding. If ino arg non-zero, do it only if a write for the
536 given inode is outstanding. */
537 int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
539 uint32_t old_wbuf_ofs;
540 uint32_t old_wbuf_len;
541 int ret = 0;
543 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino));
545 if (!c->wbuf)
546 return 0;
548 down(&c->alloc_sem);
549 if (!jffs2_wbuf_pending_for_ino(c, ino)) {
550 D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
551 up(&c->alloc_sem);
552 return 0;
555 old_wbuf_ofs = c->wbuf_ofs;
556 old_wbuf_len = c->wbuf_len;
558 if (c->unchecked_size) {
559 /* GC won't make any progress for a while */
560 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
561 down_write(&c->wbuf_sem);
562 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
563 /* retry flushing wbuf in case jffs2_wbuf_recover
564 left some data in the wbuf */
565 if (ret)
566 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
567 up_write(&c->wbuf_sem);
568 } else while (old_wbuf_len &&
569 old_wbuf_ofs == c->wbuf_ofs) {
571 up(&c->alloc_sem);
573 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
575 ret = jffs2_garbage_collect_pass(c);
576 if (ret) {
577 /* GC failed. Flush it with padding instead */
578 down(&c->alloc_sem);
579 down_write(&c->wbuf_sem);
580 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
581 /* retry flushing wbuf in case jffs2_wbuf_recover
582 left some data in the wbuf */
583 if (ret)
584 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
585 up_write(&c->wbuf_sem);
586 break;
588 down(&c->alloc_sem);
591 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
593 up(&c->alloc_sem);
594 return ret;
597 /* Pad write-buffer to end and write it, wasting space. */
598 int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
600 int ret;
602 if (!c->wbuf)
603 return 0;
605 down_write(&c->wbuf_sem);
606 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
607 /* retry - maybe wbuf recover left some data in wbuf. */
608 if (ret)
609 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
610 up_write(&c->wbuf_sem);
612 return ret;
614 int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino)
616 struct kvec outvecs[3];
617 uint32_t totlen = 0;
618 uint32_t split_ofs = 0;
619 uint32_t old_totlen;
620 int ret, splitvec = -1;
621 int invec, outvec;
622 size_t wbuf_retlen;
623 unsigned char *wbuf_ptr;
624 size_t donelen = 0;
625 uint32_t outvec_to = to;
627 /* If not NAND flash, don't bother */
628 if (!jffs2_is_writebuffered(c))
629 return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
631 down_write(&c->wbuf_sem);
633 /* If wbuf_ofs is not initialized, set it to target address */
634 if (c->wbuf_ofs == 0xFFFFFFFF) {
635 c->wbuf_ofs = PAGE_DIV(to);
636 c->wbuf_len = PAGE_MOD(to);
637 memset(c->wbuf,0xff,c->wbuf_pagesize);
640 /* Fixup the wbuf if we are moving to a new eraseblock. The checks below
641 fail for ECC'd NOR because cleanmarker == 16, so a block starts at
642 xxx0010. */
643 if (jffs2_nor_ecc(c)) {
644 if (((c->wbuf_ofs % c->sector_size) == 0) && !c->wbuf_len) {
645 c->wbuf_ofs = PAGE_DIV(to);
646 c->wbuf_len = PAGE_MOD(to);
647 memset(c->wbuf,0xff,c->wbuf_pagesize);
651 /* Sanity checks on target address.
652 It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs),
653 and it's permitted to write at the beginning of a new
654 erase block. Anything else, and you die.
655 New block starts at xxx000c (0-b = block header)
657 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
658 /* It's a write to a new block */
659 if (c->wbuf_len) {
660 D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx causes flush of wbuf at 0x%08x\n", (unsigned long)to, c->wbuf_ofs));
661 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
662 if (ret) {
663 /* the underlying layer has to check wbuf_len to do the cleanup */
664 D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret));
665 *retlen = 0;
666 goto exit;
669 /* set pointer to new block */
670 c->wbuf_ofs = PAGE_DIV(to);
671 c->wbuf_len = PAGE_MOD(to);
674 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
675 /* We're not writing immediately after the writebuffer. Bad. */
676 printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write to %08lx\n", (unsigned long)to);
677 if (c->wbuf_len)
678 printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
679 c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
680 BUG();
683 /* Note outvecs[3] above. We know count is never greater than 2 */
684 if (count > 2) {
685 printk(KERN_CRIT "jffs2_flash_writev(): count is %ld\n", count);
686 BUG();
689 invec = 0;
690 outvec = 0;
692 /* Fill writebuffer first, if already in use */
693 if (c->wbuf_len) {
694 uint32_t invec_ofs = 0;
696 /* adjust alignment offset */
697 if (c->wbuf_len != PAGE_MOD(to)) {
698 c->wbuf_len = PAGE_MOD(to);
699 /* take care of alignment to next page */
700 if (!c->wbuf_len)
701 c->wbuf_len = c->wbuf_pagesize;
704 while(c->wbuf_len < c->wbuf_pagesize) {
705 uint32_t thislen;
707 if (invec == count)
708 goto alldone;
710 thislen = c->wbuf_pagesize - c->wbuf_len;
712 if (thislen >= invecs[invec].iov_len)
713 thislen = invecs[invec].iov_len;
715 invec_ofs = thislen;
717 memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen);
718 c->wbuf_len += thislen;
719 donelen += thislen;
720 /* Get next invec, if actual did not fill the buffer */
721 if (c->wbuf_len < c->wbuf_pagesize)
722 invec++;
725 /* write buffer is full, flush buffer */
726 ret = __jffs2_flush_wbuf(c, NOPAD);
727 if (ret) {
728 /* the underlying layer has to check wbuf_len to do the cleanup */
729 D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret));
730 /* Retlen zero to make sure our caller doesn't mark the space dirty.
731 We've already done everything that's necessary */
732 *retlen = 0;
733 goto exit;
735 outvec_to += donelen;
736 c->wbuf_ofs = outvec_to;
738 /* All invecs done ? */
739 if (invec == count)
740 goto alldone;
742 /* Set up the first outvec, containing the remainder of the
743 invec we partially used */
744 if (invecs[invec].iov_len > invec_ofs) {
745 outvecs[0].iov_base = invecs[invec].iov_base+invec_ofs;
746 totlen = outvecs[0].iov_len = invecs[invec].iov_len-invec_ofs;
747 if (totlen > c->wbuf_pagesize) {
748 splitvec = outvec;
749 split_ofs = outvecs[0].iov_len - PAGE_MOD(totlen);
751 outvec++;
753 invec++;
756 /* OK, now we've flushed the wbuf and the start of the bits
757 we have been asked to write, now to write the rest.... */
759 /* totlen holds the amount of data still to be written */
760 old_totlen = totlen;
761 for ( ; invec < count; invec++,outvec++ ) {
762 outvecs[outvec].iov_base = invecs[invec].iov_base;
763 totlen += outvecs[outvec].iov_len = invecs[invec].iov_len;
764 if (PAGE_DIV(totlen) != PAGE_DIV(old_totlen)) {
765 splitvec = outvec;
766 split_ofs = outvecs[outvec].iov_len - PAGE_MOD(totlen);
767 old_totlen = totlen;
771 /* Now the outvecs array holds all the remaining data to write */
772 /* Up to splitvec,split_ofs is to be written immediately. The rest
773 goes into the (now-empty) wbuf */
775 if (splitvec != -1) {
776 uint32_t remainder;
778 remainder = outvecs[splitvec].iov_len - split_ofs;
779 outvecs[splitvec].iov_len = split_ofs;
781 /* We did cross a page boundary, so we write some now */
782 if (jffs2_cleanmarker_oob(c))
783 ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo);
784 else
785 ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen);
787 if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) {
788 /* At this point we have no problem,
789 c->wbuf is empty. However refile nextblock to avoid
790 writing again to same address.
792 struct jffs2_eraseblock *jeb;
794 spin_lock(&c->erase_completion_lock);
796 jeb = &c->blocks[outvec_to / c->sector_size];
797 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
799 *retlen = 0;
800 spin_unlock(&c->erase_completion_lock);
801 goto exit;
804 donelen += wbuf_retlen;
805 c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen);
807 if (remainder) {
808 outvecs[splitvec].iov_base += split_ofs;
809 outvecs[splitvec].iov_len = remainder;
810 } else {
811 splitvec++;
814 } else {
815 splitvec = 0;
818 /* Now splitvec points to the start of the bits we have to copy
819 into the wbuf */
820 wbuf_ptr = c->wbuf;
822 for ( ; splitvec < outvec; splitvec++) {
823 /* Don't copy the wbuf into itself */
824 if (outvecs[splitvec].iov_base == c->wbuf)
825 continue;
826 memcpy(wbuf_ptr, outvecs[splitvec].iov_base, outvecs[splitvec].iov_len);
827 wbuf_ptr += outvecs[splitvec].iov_len;
828 donelen += outvecs[splitvec].iov_len;
830 c->wbuf_len = wbuf_ptr - c->wbuf;
832 /* If there's a remainder in the wbuf and it's a non-GC write,
833 remember that the wbuf affects this ino */
834 alldone:
835 *retlen = donelen;
837 if (jffs2_sum_active()) {
838 int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
839 if (res)
840 return res;
843 if (c->wbuf_len && ino)
844 jffs2_wbuf_dirties_inode(c, ino);
846 ret = 0;
848 exit:
849 up_write(&c->wbuf_sem);
850 return ret;
854 * This is the entry for flash write.
855 * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
857 int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf)
859 struct kvec vecs[1];
861 if (!jffs2_is_writebuffered(c))
862 return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
864 vecs[0].iov_base = (unsigned char *) buf;
865 vecs[0].iov_len = len;
866 return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
870 Handle readback from writebuffer and ECC failure return
872 int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
874 loff_t orbf = 0, owbf = 0, lwbf = 0;
875 int ret;
877 if (!jffs2_is_writebuffered(c))
878 return c->mtd->read(c->mtd, ofs, len, retlen, buf);
880 /* Read flash */
881 down_read(&c->wbuf_sem);
882 if (jffs2_cleanmarker_oob(c))
883 ret = c->mtd->read_ecc(c->mtd, ofs, len, retlen, buf, NULL, c->oobinfo);
884 else
885 ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
887 if ( (ret == -EBADMSG) && (*retlen == len) ) {
888 printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
889 len, ofs);
891 * We have the raw data without ECC correction in the buffer, maybe
892 * we are lucky and all data or parts are correct. We check the node.
893 * If data are corrupted node check will sort it out.
894 * We keep this block, it will fail on write or erase and the we
895 * mark it bad. Or should we do that now? But we should give him a chance.
896 * Maybe we had a system crash or power loss before the ecc write or
897 * a erase was completed.
898 * So we return success. :)
900 ret = 0;
903 /* if no writebuffer available or write buffer empty, return */
904 if (!c->wbuf_pagesize || !c->wbuf_len)
905 goto exit;
907 /* if we read in a different block, return */
908 if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
909 goto exit;
911 if (ofs >= c->wbuf_ofs) {
912 owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */
913 if (owbf > c->wbuf_len) /* is read beyond write buffer ? */
914 goto exit;
915 lwbf = c->wbuf_len - owbf; /* number of bytes to copy */
916 if (lwbf > len)
917 lwbf = len;
918 } else {
919 orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */
920 if (orbf > len) /* is write beyond write buffer ? */
921 goto exit;
922 lwbf = len - orbf; /* number of bytes to copy */
923 if (lwbf > c->wbuf_len)
924 lwbf = c->wbuf_len;
926 if (lwbf > 0)
927 memcpy(buf+orbf,c->wbuf+owbf,lwbf);
929 exit:
930 up_read(&c->wbuf_sem);
931 return ret;
935 * Check, if the out of band area is empty
937 int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int mode)
939 unsigned char *buf;
940 int ret = 0;
941 int i,len,page;
942 size_t retlen;
943 int oob_size;
945 /* allocate a buffer for all oob data in this sector */
946 oob_size = c->mtd->oobsize;
947 len = 4 * oob_size;
948 buf = kmalloc(len, GFP_KERNEL);
949 if (!buf) {
950 printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n");
951 return -ENOMEM;
954 * if mode = 0, we scan for a total empty oob area, else we have
955 * to take care of the cleanmarker in the first page of the block
957 ret = jffs2_flash_read_oob(c, jeb->offset, len , &retlen, buf);
958 if (ret) {
959 D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
960 goto out;
963 if (retlen < len) {
964 D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read "
965 "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset));
966 ret = -EIO;
967 goto out;
970 /* Special check for first page */
971 for(i = 0; i < oob_size ; i++) {
972 /* Yeah, we know about the cleanmarker. */
973 if (mode && i >= c->fsdata_pos &&
974 i < c->fsdata_pos + c->fsdata_len)
975 continue;
977 if (buf[i] != 0xFF) {
978 D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n",
979 buf[i], i, jeb->offset));
980 ret = 1;
981 goto out;
985 /* we know, we are aligned :) */
986 for (page = oob_size; page < len; page += sizeof(long)) {
987 unsigned long dat = *(unsigned long *)(&buf[page]);
988 if(dat != -1) {
989 ret = 1;
990 goto out;
994 out:
995 kfree(buf);
997 return ret;
1001 * Scan for a valid cleanmarker and for bad blocks
1002 * For virtual blocks (concatenated physical blocks) check the cleanmarker
1003 * only in the first page of the first physical block, but scan for bad blocks in all
1004 * physical blocks
1006 int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1008 struct jffs2_unknown_node n;
1009 unsigned char buf[2 * NAND_MAX_OOBSIZE];
1010 unsigned char *p;
1011 int ret, i, cnt, retval = 0;
1012 size_t retlen, offset;
1013 int oob_size;
1015 offset = jeb->offset;
1016 oob_size = c->mtd->oobsize;
1018 /* Loop through the physical blocks */
1019 for (cnt = 0; cnt < (c->sector_size / c->mtd->erasesize); cnt++) {
1020 /* Check first if the block is bad. */
1021 if (c->mtd->block_isbad (c->mtd, offset)) {
1022 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Bad block at %08x\n", jeb->offset));
1023 return 2;
1026 * We read oob data from page 0 and 1 of the block.
1027 * page 0 contains cleanmarker and badblock info
1028 * page 1 contains failure count of this block
1030 ret = c->mtd->read_oob (c->mtd, offset, oob_size << 1, &retlen, buf);
1032 if (ret) {
1033 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
1034 return ret;
1036 if (retlen < (oob_size << 1)) {
1037 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB return short read (%zd bytes not %d) for block at %08x\n", retlen, oob_size << 1, jeb->offset));
1038 return -EIO;
1041 /* Check cleanmarker only on the first physical block */
1042 if (!cnt) {
1043 n.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK);
1044 n.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER);
1045 n.totlen = cpu_to_je32 (8);
1046 p = (unsigned char *) &n;
1048 for (i = 0; i < c->fsdata_len; i++) {
1049 if (buf[c->fsdata_pos + i] != p[i]) {
1050 retval = 1;
1053 D1(if (retval == 1) {
1054 printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): Cleanmarker node not detected in block at %08x\n", jeb->offset);
1055 printk(KERN_WARNING "OOB at %08x was ", offset);
1056 for (i=0; i < oob_size; i++) {
1057 printk("%02x ", buf[i]);
1059 printk("\n");
1062 offset += c->mtd->erasesize;
1064 return retval;
1067 int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1069 struct jffs2_unknown_node n;
1070 int ret;
1071 size_t retlen;
1073 n.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
1074 n.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER);
1075 n.totlen = cpu_to_je32(8);
1077 ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n);
1079 if (ret) {
1080 D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1081 return ret;
1083 if (retlen != c->fsdata_len) {
1084 D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Short write for block at %08x: %zd not %d\n", jeb->offset, retlen, c->fsdata_len));
1085 return ret;
1087 return 0;
1091 * On NAND we try to mark this block bad. If the block was erased more
1092 * than MAX_ERASE_FAILURES we mark it finaly bad.
1093 * Don't care about failures. This block remains on the erase-pending
1094 * or badblock list as long as nobody manipulates the flash with
1095 * a bootloader or something like that.
1098 int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1100 int ret;
1102 /* if the count is < max, we try to write the counter to the 2nd page oob area */
1103 if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1104 return 0;
1106 if (!c->mtd->block_markbad)
1107 return 1; // What else can we do?
1109 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset));
1110 ret = c->mtd->block_markbad(c->mtd, bad_offset);
1112 if (ret) {
1113 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1114 return ret;
1116 return 1;
1119 #define NAND_JFFS2_OOB16_FSDALEN 8
1121 static struct nand_oobinfo jffs2_oobinfo_docecc = {
1122 .useecc = MTD_NANDECC_PLACE,
1123 .eccbytes = 6,
1124 .eccpos = {0,1,2,3,4,5}
1128 static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c)
1130 struct nand_oobinfo *oinfo = &c->mtd->oobinfo;
1132 /* Do this only, if we have an oob buffer */
1133 if (!c->mtd->oobsize)
1134 return 0;
1136 /* Cleanmarker is out-of-band, so inline size zero */
1137 c->cleanmarker_size = 0;
1139 /* Should we use autoplacement ? */
1140 if (oinfo && oinfo->useecc == MTD_NANDECC_AUTOPLACE) {
1141 D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n"));
1142 /* Get the position of the free bytes */
1143 if (!oinfo->oobfree[0][1]) {
1144 printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep. Autoplacement selected and no empty space in oob\n");
1145 return -ENOSPC;
1147 c->fsdata_pos = oinfo->oobfree[0][0];
1148 c->fsdata_len = oinfo->oobfree[0][1];
1149 if (c->fsdata_len > 8)
1150 c->fsdata_len = 8;
1151 } else {
1152 /* This is just a legacy fallback and should go away soon */
1153 switch(c->mtd->ecctype) {
1154 case MTD_ECC_RS_DiskOnChip:
1155 printk(KERN_WARNING "JFFS2 using DiskOnChip hardware ECC without autoplacement. Fix it!\n");
1156 c->oobinfo = &jffs2_oobinfo_docecc;
1157 c->fsdata_pos = 6;
1158 c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN;
1159 c->badblock_pos = 15;
1160 break;
1162 default:
1163 D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n"));
1164 return -EINVAL;
1167 return 0;
1170 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1172 int res;
1174 /* Initialise write buffer */
1175 init_rwsem(&c->wbuf_sem);
1176 c->wbuf_pagesize = c->mtd->oobblock;
1177 c->wbuf_ofs = 0xFFFFFFFF;
1179 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1180 if (!c->wbuf)
1181 return -ENOMEM;
1183 res = jffs2_nand_set_oobinfo(c);
1185 #ifdef BREAKME
1186 if (!brokenbuf)
1187 brokenbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1188 if (!brokenbuf) {
1189 kfree(c->wbuf);
1190 return -ENOMEM;
1192 memset(brokenbuf, 0xdb, c->wbuf_pagesize);
1193 #endif
1194 return res;
1197 void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1199 kfree(c->wbuf);
1202 int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1203 c->cleanmarker_size = 0; /* No cleanmarkers needed */
1205 /* Initialize write buffer */
1206 init_rwsem(&c->wbuf_sem);
1209 c->wbuf_pagesize = c->mtd->erasesize;
1211 /* Find a suitable c->sector_size
1212 * - Not too much sectors
1213 * - Sectors have to be at least 4 K + some bytes
1214 * - All known dataflashes have erase sizes of 528 or 1056
1215 * - we take at least 8 eraseblocks and want to have at least 8K size
1216 * - The concatenation should be a power of 2
1219 c->sector_size = 8 * c->mtd->erasesize;
1221 while (c->sector_size < 8192) {
1222 c->sector_size *= 2;
1225 /* It may be necessary to adjust the flash size */
1226 c->flash_size = c->mtd->size;
1228 if ((c->flash_size % c->sector_size) != 0) {
1229 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1230 printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
1233 c->wbuf_ofs = 0xFFFFFFFF;
1234 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1235 if (!c->wbuf)
1236 return -ENOMEM;
1238 printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
1240 return 0;
1243 void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1244 kfree(c->wbuf);
1247 int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c) {
1248 /* Cleanmarker is actually larger on the flashes */
1249 c->cleanmarker_size = 16;
1251 /* Initialize write buffer */
1252 init_rwsem(&c->wbuf_sem);
1253 c->wbuf_pagesize = c->mtd->eccsize;
1254 c->wbuf_ofs = 0xFFFFFFFF;
1256 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1257 if (!c->wbuf)
1258 return -ENOMEM;
1260 return 0;
1263 void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c) {
1264 kfree(c->wbuf);
1267 int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1268 /* Cleanmarker currently occupies a whole programming region */
1269 c->cleanmarker_size = MTD_PROGREGION_SIZE(c->mtd);
1271 /* Initialize write buffer */
1272 init_rwsem(&c->wbuf_sem);
1273 c->wbuf_pagesize = MTD_PROGREGION_SIZE(c->mtd);
1274 c->wbuf_ofs = 0xFFFFFFFF;
1276 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1277 if (!c->wbuf)
1278 return -ENOMEM;
1280 return 0;
1283 void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1284 kfree(c->wbuf);