[PATCH] x86_64 irq: Allocate a vector across all cpus for genapic_flat.
[linux-2.6/kvm.git] / fs / reiserfs / journal.c
blobad8cbc49883ab7d1e1051f9c72dff211b45a80e8
1 /*
2 ** Write ahead logging implementation copyright Chris Mason 2000
3 **
4 ** The background commits make this code very interelated, and
5 ** overly complex. I need to rethink things a bit....The major players:
6 **
7 ** journal_begin -- call with the number of blocks you expect to log.
8 ** If the current transaction is too
9 ** old, it will block until the current transaction is
10 ** finished, and then start a new one.
11 ** Usually, your transaction will get joined in with
12 ** previous ones for speed.
14 ** journal_join -- same as journal_begin, but won't block on the current
15 ** transaction regardless of age. Don't ever call
16 ** this. Ever. There are only two places it should be
17 ** called from, and they are both inside this file.
19 ** journal_mark_dirty -- adds blocks into this transaction. clears any flags
20 ** that might make them get sent to disk
21 ** and then marks them BH_JDirty. Puts the buffer head
22 ** into the current transaction hash.
24 ** journal_end -- if the current transaction is batchable, it does nothing
25 ** otherwise, it could do an async/synchronous commit, or
26 ** a full flush of all log and real blocks in the
27 ** transaction.
29 ** flush_old_commits -- if the current transaction is too old, it is ended and
30 ** commit blocks are sent to disk. Forces commit blocks
31 ** to disk for all backgrounded commits that have been
32 ** around too long.
33 ** -- Note, if you call this as an immediate flush from
34 ** from within kupdate, it will ignore the immediate flag
37 #include <asm/uaccess.h>
38 #include <asm/system.h>
40 #include <linux/time.h>
41 #include <asm/semaphore.h>
43 #include <linux/vmalloc.h>
44 #include <linux/reiserfs_fs.h>
46 #include <linux/kernel.h>
47 #include <linux/errno.h>
48 #include <linux/fcntl.h>
49 #include <linux/stat.h>
50 #include <linux/string.h>
51 #include <linux/smp_lock.h>
52 #include <linux/buffer_head.h>
53 #include <linux/workqueue.h>
54 #include <linux/writeback.h>
55 #include <linux/blkdev.h>
57 /* gets a struct reiserfs_journal_list * from a list head */
58 #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
59 j_list))
60 #define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
61 j_working_list))
63 /* the number of mounted filesystems. This is used to decide when to
64 ** start and kill the commit workqueue
66 static int reiserfs_mounted_fs_count;
68 static struct workqueue_struct *commit_wq;
70 #define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit
71 structs at 4k */
72 #define BUFNR 64 /*read ahead */
74 /* cnode stat bits. Move these into reiserfs_fs.h */
76 #define BLOCK_FREED 2 /* this block was freed, and can't be written. */
77 #define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */
79 #define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */
80 #define BLOCK_DIRTIED 5
82 /* journal list state bits */
83 #define LIST_TOUCHED 1
84 #define LIST_DIRTY 2
85 #define LIST_COMMIT_PENDING 4 /* someone will commit this list */
87 /* flags for do_journal_end */
88 #define FLUSH_ALL 1 /* flush commit and real blocks */
89 #define COMMIT_NOW 2 /* end and commit this transaction */
90 #define WAIT 4 /* wait for the log blocks to hit the disk */
92 static int do_journal_end(struct reiserfs_transaction_handle *,
93 struct super_block *, unsigned long nblocks,
94 int flags);
95 static int flush_journal_list(struct super_block *s,
96 struct reiserfs_journal_list *jl, int flushall);
97 static int flush_commit_list(struct super_block *s,
98 struct reiserfs_journal_list *jl, int flushall);
99 static int can_dirty(struct reiserfs_journal_cnode *cn);
100 static int journal_join(struct reiserfs_transaction_handle *th,
101 struct super_block *p_s_sb, unsigned long nblocks);
102 static int release_journal_dev(struct super_block *super,
103 struct reiserfs_journal *journal);
104 static int dirty_one_transaction(struct super_block *s,
105 struct reiserfs_journal_list *jl);
106 static void flush_async_commits(void *p);
107 static void queue_log_writer(struct super_block *s);
109 /* values for join in do_journal_begin_r */
110 enum {
111 JBEGIN_REG = 0, /* regular journal begin */
112 JBEGIN_JOIN = 1, /* join the running transaction if at all possible */
113 JBEGIN_ABORT = 2, /* called from cleanup code, ignores aborted flag */
116 static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
117 struct super_block *p_s_sb,
118 unsigned long nblocks, int join);
120 static void init_journal_hash(struct super_block *p_s_sb)
122 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
123 memset(journal->j_hash_table, 0,
124 JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
128 ** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to
129 ** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for
130 ** more details.
132 static int reiserfs_clean_and_file_buffer(struct buffer_head *bh)
134 if (bh) {
135 clear_buffer_dirty(bh);
136 clear_buffer_journal_test(bh);
138 return 0;
141 static void disable_barrier(struct super_block *s)
143 REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_BARRIER_FLUSH);
144 printk("reiserfs: disabling flush barriers on %s\n",
145 reiserfs_bdevname(s));
148 static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block
149 *p_s_sb)
151 struct reiserfs_bitmap_node *bn;
152 static int id;
154 bn = kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS);
155 if (!bn) {
156 return NULL;
158 bn->data = kzalloc(p_s_sb->s_blocksize, GFP_NOFS);
159 if (!bn->data) {
160 kfree(bn);
161 return NULL;
163 bn->id = id++;
164 INIT_LIST_HEAD(&bn->list);
165 return bn;
168 static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *p_s_sb)
170 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
171 struct reiserfs_bitmap_node *bn = NULL;
172 struct list_head *entry = journal->j_bitmap_nodes.next;
174 journal->j_used_bitmap_nodes++;
175 repeat:
177 if (entry != &journal->j_bitmap_nodes) {
178 bn = list_entry(entry, struct reiserfs_bitmap_node, list);
179 list_del(entry);
180 memset(bn->data, 0, p_s_sb->s_blocksize);
181 journal->j_free_bitmap_nodes--;
182 return bn;
184 bn = allocate_bitmap_node(p_s_sb);
185 if (!bn) {
186 yield();
187 goto repeat;
189 return bn;
191 static inline void free_bitmap_node(struct super_block *p_s_sb,
192 struct reiserfs_bitmap_node *bn)
194 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
195 journal->j_used_bitmap_nodes--;
196 if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
197 kfree(bn->data);
198 kfree(bn);
199 } else {
200 list_add(&bn->list, &journal->j_bitmap_nodes);
201 journal->j_free_bitmap_nodes++;
205 static void allocate_bitmap_nodes(struct super_block *p_s_sb)
207 int i;
208 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
209 struct reiserfs_bitmap_node *bn = NULL;
210 for (i = 0; i < REISERFS_MIN_BITMAP_NODES; i++) {
211 bn = allocate_bitmap_node(p_s_sb);
212 if (bn) {
213 list_add(&bn->list, &journal->j_bitmap_nodes);
214 journal->j_free_bitmap_nodes++;
215 } else {
216 break; // this is ok, we'll try again when more are needed
221 static int set_bit_in_list_bitmap(struct super_block *p_s_sb, int block,
222 struct reiserfs_list_bitmap *jb)
224 int bmap_nr = block / (p_s_sb->s_blocksize << 3);
225 int bit_nr = block % (p_s_sb->s_blocksize << 3);
227 if (!jb->bitmaps[bmap_nr]) {
228 jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb);
230 set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data);
231 return 0;
234 static void cleanup_bitmap_list(struct super_block *p_s_sb,
235 struct reiserfs_list_bitmap *jb)
237 int i;
238 if (jb->bitmaps == NULL)
239 return;
241 for (i = 0; i < SB_BMAP_NR(p_s_sb); i++) {
242 if (jb->bitmaps[i]) {
243 free_bitmap_node(p_s_sb, jb->bitmaps[i]);
244 jb->bitmaps[i] = NULL;
250 ** only call this on FS unmount.
252 static int free_list_bitmaps(struct super_block *p_s_sb,
253 struct reiserfs_list_bitmap *jb_array)
255 int i;
256 struct reiserfs_list_bitmap *jb;
257 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
258 jb = jb_array + i;
259 jb->journal_list = NULL;
260 cleanup_bitmap_list(p_s_sb, jb);
261 vfree(jb->bitmaps);
262 jb->bitmaps = NULL;
264 return 0;
267 static int free_bitmap_nodes(struct super_block *p_s_sb)
269 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
270 struct list_head *next = journal->j_bitmap_nodes.next;
271 struct reiserfs_bitmap_node *bn;
273 while (next != &journal->j_bitmap_nodes) {
274 bn = list_entry(next, struct reiserfs_bitmap_node, list);
275 list_del(next);
276 kfree(bn->data);
277 kfree(bn);
278 next = journal->j_bitmap_nodes.next;
279 journal->j_free_bitmap_nodes--;
282 return 0;
286 ** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
287 ** jb_array is the array to be filled in.
289 int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
290 struct reiserfs_list_bitmap *jb_array,
291 int bmap_nr)
293 int i;
294 int failed = 0;
295 struct reiserfs_list_bitmap *jb;
296 int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *);
298 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
299 jb = jb_array + i;
300 jb->journal_list = NULL;
301 jb->bitmaps = vmalloc(mem);
302 if (!jb->bitmaps) {
303 reiserfs_warning(p_s_sb,
304 "clm-2000, unable to allocate bitmaps for journal lists");
305 failed = 1;
306 break;
308 memset(jb->bitmaps, 0, mem);
310 if (failed) {
311 free_list_bitmaps(p_s_sb, jb_array);
312 return -1;
314 return 0;
318 ** find an available list bitmap. If you can't find one, flush a commit list
319 ** and try again
321 static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb,
322 struct reiserfs_journal_list
323 *jl)
325 int i, j;
326 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
327 struct reiserfs_list_bitmap *jb = NULL;
329 for (j = 0; j < (JOURNAL_NUM_BITMAPS * 3); j++) {
330 i = journal->j_list_bitmap_index;
331 journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS;
332 jb = journal->j_list_bitmap + i;
333 if (journal->j_list_bitmap[i].journal_list) {
334 flush_commit_list(p_s_sb,
335 journal->j_list_bitmap[i].
336 journal_list, 1);
337 if (!journal->j_list_bitmap[i].journal_list) {
338 break;
340 } else {
341 break;
344 if (jb->journal_list) { /* double check to make sure if flushed correctly */
345 return NULL;
347 jb->journal_list = jl;
348 return jb;
352 ** allocates a new chunk of X nodes, and links them all together as a list.
353 ** Uses the cnode->next and cnode->prev pointers
354 ** returns NULL on failure
356 static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes)
358 struct reiserfs_journal_cnode *head;
359 int i;
360 if (num_cnodes <= 0) {
361 return NULL;
363 head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode));
364 if (!head) {
365 return NULL;
367 memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode));
368 head[0].prev = NULL;
369 head[0].next = head + 1;
370 for (i = 1; i < num_cnodes; i++) {
371 head[i].prev = head + (i - 1);
372 head[i].next = head + (i + 1); /* if last one, overwrite it after the if */
374 head[num_cnodes - 1].next = NULL;
375 return head;
379 ** pulls a cnode off the free list, or returns NULL on failure
381 static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb)
383 struct reiserfs_journal_cnode *cn;
384 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
386 reiserfs_check_lock_depth(p_s_sb, "get_cnode");
388 if (journal->j_cnode_free <= 0) {
389 return NULL;
391 journal->j_cnode_used++;
392 journal->j_cnode_free--;
393 cn = journal->j_cnode_free_list;
394 if (!cn) {
395 return cn;
397 if (cn->next) {
398 cn->next->prev = NULL;
400 journal->j_cnode_free_list = cn->next;
401 memset(cn, 0, sizeof(struct reiserfs_journal_cnode));
402 return cn;
406 ** returns a cnode to the free list
408 static void free_cnode(struct super_block *p_s_sb,
409 struct reiserfs_journal_cnode *cn)
411 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
413 reiserfs_check_lock_depth(p_s_sb, "free_cnode");
415 journal->j_cnode_used--;
416 journal->j_cnode_free++;
417 /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
418 cn->next = journal->j_cnode_free_list;
419 if (journal->j_cnode_free_list) {
420 journal->j_cnode_free_list->prev = cn;
422 cn->prev = NULL; /* not needed with the memset, but I might kill the memset, and forget to do this */
423 journal->j_cnode_free_list = cn;
426 static void clear_prepared_bits(struct buffer_head *bh)
428 clear_buffer_journal_prepared(bh);
429 clear_buffer_journal_restore_dirty(bh);
432 /* utility function to force a BUG if it is called without the big
433 ** kernel lock held. caller is the string printed just before calling BUG()
435 void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
437 #ifdef CONFIG_SMP
438 if (current->lock_depth < 0) {
439 reiserfs_panic(sb, "%s called without kernel lock held",
440 caller);
442 #else
444 #endif
447 /* return a cnode with same dev, block number and size in table, or null if not found */
448 static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct
449 super_block
450 *sb,
451 struct
452 reiserfs_journal_cnode
453 **table,
454 long bl)
456 struct reiserfs_journal_cnode *cn;
457 cn = journal_hash(table, sb, bl);
458 while (cn) {
459 if (cn->blocknr == bl && cn->sb == sb)
460 return cn;
461 cn = cn->hnext;
463 return (struct reiserfs_journal_cnode *)0;
467 ** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated
468 ** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
469 ** being overwritten by a replay after crashing.
471 ** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting
472 ** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make
473 ** sure you never write the block without logging it.
475 ** next_zero_bit is a suggestion about the next block to try for find_forward.
476 ** when bl is rejected because it is set in a journal list bitmap, we search
477 ** for the next zero bit in the bitmap that rejected bl. Then, we return that
478 ** through next_zero_bit for find_forward to try.
480 ** Just because we return something in next_zero_bit does not mean we won't
481 ** reject it on the next call to reiserfs_in_journal
484 int reiserfs_in_journal(struct super_block *p_s_sb,
485 int bmap_nr, int bit_nr, int search_all,
486 b_blocknr_t * next_zero_bit)
488 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
489 struct reiserfs_journal_cnode *cn;
490 struct reiserfs_list_bitmap *jb;
491 int i;
492 unsigned long bl;
494 *next_zero_bit = 0; /* always start this at zero. */
496 PROC_INFO_INC(p_s_sb, journal.in_journal);
497 /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
498 ** if we crash before the transaction that freed it commits, this transaction won't
499 ** have committed either, and the block will never be written
501 if (search_all) {
502 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
503 PROC_INFO_INC(p_s_sb, journal.in_journal_bitmap);
504 jb = journal->j_list_bitmap + i;
505 if (jb->journal_list && jb->bitmaps[bmap_nr] &&
506 test_bit(bit_nr,
507 (unsigned long *)jb->bitmaps[bmap_nr]->
508 data)) {
509 *next_zero_bit =
510 find_next_zero_bit((unsigned long *)
511 (jb->bitmaps[bmap_nr]->
512 data),
513 p_s_sb->s_blocksize << 3,
514 bit_nr + 1);
515 return 1;
520 bl = bmap_nr * (p_s_sb->s_blocksize << 3) + bit_nr;
521 /* is it in any old transactions? */
522 if (search_all
523 && (cn =
524 get_journal_hash_dev(p_s_sb, journal->j_list_hash_table, bl))) {
525 return 1;
528 /* is it in the current transaction. This should never happen */
529 if ((cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, bl))) {
530 BUG();
531 return 1;
534 PROC_INFO_INC(p_s_sb, journal.in_journal_reusable);
535 /* safe for reuse */
536 return 0;
539 /* insert cn into table
541 static inline void insert_journal_hash(struct reiserfs_journal_cnode **table,
542 struct reiserfs_journal_cnode *cn)
544 struct reiserfs_journal_cnode *cn_orig;
546 cn_orig = journal_hash(table, cn->sb, cn->blocknr);
547 cn->hnext = cn_orig;
548 cn->hprev = NULL;
549 if (cn_orig) {
550 cn_orig->hprev = cn;
552 journal_hash(table, cn->sb, cn->blocknr) = cn;
555 /* lock the current transaction */
556 static inline void lock_journal(struct super_block *p_s_sb)
558 PROC_INFO_INC(p_s_sb, journal.lock_journal);
559 down(&SB_JOURNAL(p_s_sb)->j_lock);
562 /* unlock the current transaction */
563 static inline void unlock_journal(struct super_block *p_s_sb)
565 up(&SB_JOURNAL(p_s_sb)->j_lock);
568 static inline void get_journal_list(struct reiserfs_journal_list *jl)
570 jl->j_refcount++;
573 static inline void put_journal_list(struct super_block *s,
574 struct reiserfs_journal_list *jl)
576 if (jl->j_refcount < 1) {
577 reiserfs_panic(s, "trans id %lu, refcount at %d",
578 jl->j_trans_id, jl->j_refcount);
580 if (--jl->j_refcount == 0)
581 kfree(jl);
585 ** this used to be much more involved, and I'm keeping it just in case things get ugly again.
586 ** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
587 ** transaction.
589 static void cleanup_freed_for_journal_list(struct super_block *p_s_sb,
590 struct reiserfs_journal_list *jl)
593 struct reiserfs_list_bitmap *jb = jl->j_list_bitmap;
594 if (jb) {
595 cleanup_bitmap_list(p_s_sb, jb);
597 jl->j_list_bitmap->journal_list = NULL;
598 jl->j_list_bitmap = NULL;
601 static int journal_list_still_alive(struct super_block *s,
602 unsigned long trans_id)
604 struct reiserfs_journal *journal = SB_JOURNAL(s);
605 struct list_head *entry = &journal->j_journal_list;
606 struct reiserfs_journal_list *jl;
608 if (!list_empty(entry)) {
609 jl = JOURNAL_LIST_ENTRY(entry->next);
610 if (jl->j_trans_id <= trans_id) {
611 return 1;
614 return 0;
617 static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
619 char b[BDEVNAME_SIZE];
621 if (buffer_journaled(bh)) {
622 reiserfs_warning(NULL,
623 "clm-2084: pinned buffer %lu:%s sent to disk",
624 bh->b_blocknr, bdevname(bh->b_bdev, b));
626 if (uptodate)
627 set_buffer_uptodate(bh);
628 else
629 clear_buffer_uptodate(bh);
630 unlock_buffer(bh);
631 put_bh(bh);
634 static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate)
636 if (uptodate)
637 set_buffer_uptodate(bh);
638 else
639 clear_buffer_uptodate(bh);
640 unlock_buffer(bh);
641 put_bh(bh);
644 static void submit_logged_buffer(struct buffer_head *bh)
646 get_bh(bh);
647 bh->b_end_io = reiserfs_end_buffer_io_sync;
648 clear_buffer_journal_new(bh);
649 clear_buffer_dirty(bh);
650 if (!test_clear_buffer_journal_test(bh))
651 BUG();
652 if (!buffer_uptodate(bh))
653 BUG();
654 submit_bh(WRITE, bh);
657 static void submit_ordered_buffer(struct buffer_head *bh)
659 get_bh(bh);
660 bh->b_end_io = reiserfs_end_ordered_io;
661 clear_buffer_dirty(bh);
662 if (!buffer_uptodate(bh))
663 BUG();
664 submit_bh(WRITE, bh);
667 static int submit_barrier_buffer(struct buffer_head *bh)
669 get_bh(bh);
670 bh->b_end_io = reiserfs_end_ordered_io;
671 clear_buffer_dirty(bh);
672 if (!buffer_uptodate(bh))
673 BUG();
674 return submit_bh(WRITE_BARRIER, bh);
677 static void check_barrier_completion(struct super_block *s,
678 struct buffer_head *bh)
680 if (buffer_eopnotsupp(bh)) {
681 clear_buffer_eopnotsupp(bh);
682 disable_barrier(s);
683 set_buffer_uptodate(bh);
684 set_buffer_dirty(bh);
685 sync_dirty_buffer(bh);
689 #define CHUNK_SIZE 32
690 struct buffer_chunk {
691 struct buffer_head *bh[CHUNK_SIZE];
692 int nr;
695 static void write_chunk(struct buffer_chunk *chunk)
697 int i;
698 get_fs_excl();
699 for (i = 0; i < chunk->nr; i++) {
700 submit_logged_buffer(chunk->bh[i]);
702 chunk->nr = 0;
703 put_fs_excl();
706 static void write_ordered_chunk(struct buffer_chunk *chunk)
708 int i;
709 get_fs_excl();
710 for (i = 0; i < chunk->nr; i++) {
711 submit_ordered_buffer(chunk->bh[i]);
713 chunk->nr = 0;
714 put_fs_excl();
717 static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh,
718 spinlock_t * lock, void (fn) (struct buffer_chunk *))
720 int ret = 0;
721 BUG_ON(chunk->nr >= CHUNK_SIZE);
722 chunk->bh[chunk->nr++] = bh;
723 if (chunk->nr >= CHUNK_SIZE) {
724 ret = 1;
725 if (lock)
726 spin_unlock(lock);
727 fn(chunk);
728 if (lock)
729 spin_lock(lock);
731 return ret;
734 static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0);
735 static struct reiserfs_jh *alloc_jh(void)
737 struct reiserfs_jh *jh;
738 while (1) {
739 jh = kmalloc(sizeof(*jh), GFP_NOFS);
740 if (jh) {
741 atomic_inc(&nr_reiserfs_jh);
742 return jh;
744 yield();
749 * we want to free the jh when the buffer has been written
750 * and waited on
752 void reiserfs_free_jh(struct buffer_head *bh)
754 struct reiserfs_jh *jh;
756 jh = bh->b_private;
757 if (jh) {
758 bh->b_private = NULL;
759 jh->bh = NULL;
760 list_del_init(&jh->list);
761 kfree(jh);
762 if (atomic_read(&nr_reiserfs_jh) <= 0)
763 BUG();
764 atomic_dec(&nr_reiserfs_jh);
765 put_bh(bh);
769 static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh,
770 int tail)
772 struct reiserfs_jh *jh;
774 if (bh->b_private) {
775 spin_lock(&j->j_dirty_buffers_lock);
776 if (!bh->b_private) {
777 spin_unlock(&j->j_dirty_buffers_lock);
778 goto no_jh;
780 jh = bh->b_private;
781 list_del_init(&jh->list);
782 } else {
783 no_jh:
784 get_bh(bh);
785 jh = alloc_jh();
786 spin_lock(&j->j_dirty_buffers_lock);
787 /* buffer must be locked for __add_jh, should be able to have
788 * two adds at the same time
790 BUG_ON(bh->b_private);
791 jh->bh = bh;
792 bh->b_private = jh;
794 jh->jl = j->j_current_jl;
795 if (tail)
796 list_add_tail(&jh->list, &jh->jl->j_tail_bh_list);
797 else {
798 list_add_tail(&jh->list, &jh->jl->j_bh_list);
800 spin_unlock(&j->j_dirty_buffers_lock);
801 return 0;
804 int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh)
806 return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1);
808 int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh)
810 return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0);
813 #define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)
814 static int write_ordered_buffers(spinlock_t * lock,
815 struct reiserfs_journal *j,
816 struct reiserfs_journal_list *jl,
817 struct list_head *list)
819 struct buffer_head *bh;
820 struct reiserfs_jh *jh;
821 int ret = j->j_errno;
822 struct buffer_chunk chunk;
823 struct list_head tmp;
824 INIT_LIST_HEAD(&tmp);
826 chunk.nr = 0;
827 spin_lock(lock);
828 while (!list_empty(list)) {
829 jh = JH_ENTRY(list->next);
830 bh = jh->bh;
831 get_bh(bh);
832 if (test_set_buffer_locked(bh)) {
833 if (!buffer_dirty(bh)) {
834 list_move(&jh->list, &tmp);
835 goto loop_next;
837 spin_unlock(lock);
838 if (chunk.nr)
839 write_ordered_chunk(&chunk);
840 wait_on_buffer(bh);
841 cond_resched();
842 spin_lock(lock);
843 goto loop_next;
845 /* in theory, dirty non-uptodate buffers should never get here,
846 * but the upper layer io error paths still have a few quirks.
847 * Handle them here as gracefully as we can
849 if (!buffer_uptodate(bh) && buffer_dirty(bh)) {
850 clear_buffer_dirty(bh);
851 ret = -EIO;
853 if (buffer_dirty(bh)) {
854 list_move(&jh->list, &tmp);
855 add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
856 } else {
857 reiserfs_free_jh(bh);
858 unlock_buffer(bh);
860 loop_next:
861 put_bh(bh);
862 cond_resched_lock(lock);
864 if (chunk.nr) {
865 spin_unlock(lock);
866 write_ordered_chunk(&chunk);
867 spin_lock(lock);
869 while (!list_empty(&tmp)) {
870 jh = JH_ENTRY(tmp.prev);
871 bh = jh->bh;
872 get_bh(bh);
873 reiserfs_free_jh(bh);
875 if (buffer_locked(bh)) {
876 spin_unlock(lock);
877 wait_on_buffer(bh);
878 spin_lock(lock);
880 if (!buffer_uptodate(bh)) {
881 ret = -EIO;
883 /* ugly interaction with invalidatepage here.
884 * reiserfs_invalidate_page will pin any buffer that has a valid
885 * journal head from an older transaction. If someone else sets
886 * our buffer dirty after we write it in the first loop, and
887 * then someone truncates the page away, nobody will ever write
888 * the buffer. We're safe if we write the page one last time
889 * after freeing the journal header.
891 if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
892 spin_unlock(lock);
893 ll_rw_block(WRITE, 1, &bh);
894 spin_lock(lock);
896 put_bh(bh);
897 cond_resched_lock(lock);
899 spin_unlock(lock);
900 return ret;
903 static int flush_older_commits(struct super_block *s,
904 struct reiserfs_journal_list *jl)
906 struct reiserfs_journal *journal = SB_JOURNAL(s);
907 struct reiserfs_journal_list *other_jl;
908 struct reiserfs_journal_list *first_jl;
909 struct list_head *entry;
910 unsigned long trans_id = jl->j_trans_id;
911 unsigned long other_trans_id;
912 unsigned long first_trans_id;
914 find_first:
916 * first we walk backwards to find the oldest uncommitted transation
918 first_jl = jl;
919 entry = jl->j_list.prev;
920 while (1) {
921 other_jl = JOURNAL_LIST_ENTRY(entry);
922 if (entry == &journal->j_journal_list ||
923 atomic_read(&other_jl->j_older_commits_done))
924 break;
926 first_jl = other_jl;
927 entry = other_jl->j_list.prev;
930 /* if we didn't find any older uncommitted transactions, return now */
931 if (first_jl == jl) {
932 return 0;
935 first_trans_id = first_jl->j_trans_id;
937 entry = &first_jl->j_list;
938 while (1) {
939 other_jl = JOURNAL_LIST_ENTRY(entry);
940 other_trans_id = other_jl->j_trans_id;
942 if (other_trans_id < trans_id) {
943 if (atomic_read(&other_jl->j_commit_left) != 0) {
944 flush_commit_list(s, other_jl, 0);
946 /* list we were called with is gone, return */
947 if (!journal_list_still_alive(s, trans_id))
948 return 1;
950 /* the one we just flushed is gone, this means all
951 * older lists are also gone, so first_jl is no longer
952 * valid either. Go back to the beginning.
954 if (!journal_list_still_alive
955 (s, other_trans_id)) {
956 goto find_first;
959 entry = entry->next;
960 if (entry == &journal->j_journal_list)
961 return 0;
962 } else {
963 return 0;
966 return 0;
968 int reiserfs_async_progress_wait(struct super_block *s)
970 DEFINE_WAIT(wait);
971 struct reiserfs_journal *j = SB_JOURNAL(s);
972 if (atomic_read(&j->j_async_throttle))
973 blk_congestion_wait(WRITE, HZ / 10);
974 return 0;
978 ** if this journal list still has commit blocks unflushed, send them to disk.
980 ** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
981 ** Before the commit block can by written, every other log block must be safely on disk
984 static int flush_commit_list(struct super_block *s,
985 struct reiserfs_journal_list *jl, int flushall)
987 int i;
988 int bn;
989 struct buffer_head *tbh = NULL;
990 unsigned long trans_id = jl->j_trans_id;
991 struct reiserfs_journal *journal = SB_JOURNAL(s);
992 int barrier = 0;
993 int retval = 0;
994 int write_len;
996 reiserfs_check_lock_depth(s, "flush_commit_list");
998 if (atomic_read(&jl->j_older_commits_done)) {
999 return 0;
1002 get_fs_excl();
1004 /* before we can put our commit blocks on disk, we have to make sure everyone older than
1005 ** us is on disk too
1007 BUG_ON(jl->j_len <= 0);
1008 BUG_ON(trans_id == journal->j_trans_id);
1010 get_journal_list(jl);
1011 if (flushall) {
1012 if (flush_older_commits(s, jl) == 1) {
1013 /* list disappeared during flush_older_commits. return */
1014 goto put_jl;
1018 /* make sure nobody is trying to flush this one at the same time */
1019 down(&jl->j_commit_lock);
1020 if (!journal_list_still_alive(s, trans_id)) {
1021 up(&jl->j_commit_lock);
1022 goto put_jl;
1024 BUG_ON(jl->j_trans_id == 0);
1026 /* this commit is done, exit */
1027 if (atomic_read(&(jl->j_commit_left)) <= 0) {
1028 if (flushall) {
1029 atomic_set(&(jl->j_older_commits_done), 1);
1031 up(&jl->j_commit_lock);
1032 goto put_jl;
1035 if (!list_empty(&jl->j_bh_list)) {
1036 int ret;
1037 unlock_kernel();
1038 ret = write_ordered_buffers(&journal->j_dirty_buffers_lock,
1039 journal, jl, &jl->j_bh_list);
1040 if (ret < 0 && retval == 0)
1041 retval = ret;
1042 lock_kernel();
1044 BUG_ON(!list_empty(&jl->j_bh_list));
1046 * for the description block and all the log blocks, submit any buffers
1047 * that haven't already reached the disk. Try to write at least 256
1048 * log blocks. later on, we will only wait on blocks that correspond
1049 * to this transaction, but while we're unplugging we might as well
1050 * get a chunk of data on there.
1052 atomic_inc(&journal->j_async_throttle);
1053 write_len = jl->j_len + 1;
1054 if (write_len < 256)
1055 write_len = 256;
1056 for (i = 0 ; i < write_len ; i++) {
1057 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) %
1058 SB_ONDISK_JOURNAL_SIZE(s);
1059 tbh = journal_find_get_block(s, bn);
1060 if (tbh) {
1061 if (buffer_dirty(tbh))
1062 ll_rw_block(WRITE, 1, &tbh) ;
1063 put_bh(tbh) ;
1066 atomic_dec(&journal->j_async_throttle);
1068 /* We're skipping the commit if there's an error */
1069 if (retval || reiserfs_is_journal_aborted(journal))
1070 barrier = 0;
1072 /* wait on everything written so far before writing the commit
1073 * if we are in barrier mode, send the commit down now
1075 barrier = reiserfs_barrier_flush(s);
1076 if (barrier) {
1077 int ret;
1078 lock_buffer(jl->j_commit_bh);
1079 ret = submit_barrier_buffer(jl->j_commit_bh);
1080 if (ret == -EOPNOTSUPP) {
1081 set_buffer_uptodate(jl->j_commit_bh);
1082 disable_barrier(s);
1083 barrier = 0;
1086 for (i = 0; i < (jl->j_len + 1); i++) {
1087 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
1088 (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
1089 tbh = journal_find_get_block(s, bn);
1090 wait_on_buffer(tbh);
1091 // since we're using ll_rw_blk above, it might have skipped over
1092 // a locked buffer. Double check here
1094 if (buffer_dirty(tbh)) /* redundant, sync_dirty_buffer() checks */
1095 sync_dirty_buffer(tbh);
1096 if (unlikely(!buffer_uptodate(tbh))) {
1097 #ifdef CONFIG_REISERFS_CHECK
1098 reiserfs_warning(s, "journal-601, buffer write failed");
1099 #endif
1100 retval = -EIO;
1102 put_bh(tbh); /* once for journal_find_get_block */
1103 put_bh(tbh); /* once due to original getblk in do_journal_end */
1104 atomic_dec(&(jl->j_commit_left));
1107 BUG_ON(atomic_read(&(jl->j_commit_left)) != 1);
1109 if (!barrier) {
1110 /* If there was a write error in the journal - we can't commit
1111 * this transaction - it will be invalid and, if successful,
1112 * will just end up propogating the write error out to
1113 * the file system. */
1114 if (likely(!retval && !reiserfs_is_journal_aborted (journal))) {
1115 if (buffer_dirty(jl->j_commit_bh))
1116 BUG();
1117 mark_buffer_dirty(jl->j_commit_bh) ;
1118 sync_dirty_buffer(jl->j_commit_bh) ;
1120 } else
1121 wait_on_buffer(jl->j_commit_bh);
1123 check_barrier_completion(s, jl->j_commit_bh);
1125 /* If there was a write error in the journal - we can't commit this
1126 * transaction - it will be invalid and, if successful, will just end
1127 * up propogating the write error out to the filesystem. */
1128 if (unlikely(!buffer_uptodate(jl->j_commit_bh))) {
1129 #ifdef CONFIG_REISERFS_CHECK
1130 reiserfs_warning(s, "journal-615: buffer write failed");
1131 #endif
1132 retval = -EIO;
1134 bforget(jl->j_commit_bh);
1135 if (journal->j_last_commit_id != 0 &&
1136 (jl->j_trans_id - journal->j_last_commit_id) != 1) {
1137 reiserfs_warning(s, "clm-2200: last commit %lu, current %lu",
1138 journal->j_last_commit_id, jl->j_trans_id);
1140 journal->j_last_commit_id = jl->j_trans_id;
1142 /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */
1143 cleanup_freed_for_journal_list(s, jl);
1145 retval = retval ? retval : journal->j_errno;
1147 /* mark the metadata dirty */
1148 if (!retval)
1149 dirty_one_transaction(s, jl);
1150 atomic_dec(&(jl->j_commit_left));
1152 if (flushall) {
1153 atomic_set(&(jl->j_older_commits_done), 1);
1155 up(&jl->j_commit_lock);
1156 put_jl:
1157 put_journal_list(s, jl);
1159 if (retval)
1160 reiserfs_abort(s, retval, "Journal write error in %s",
1161 __FUNCTION__);
1162 put_fs_excl();
1163 return retval;
1167 ** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
1168 ** returns NULL if it can't find anything
1170 static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
1171 reiserfs_journal_cnode
1172 *cn)
1174 struct super_block *sb = cn->sb;
1175 b_blocknr_t blocknr = cn->blocknr;
1177 cn = cn->hprev;
1178 while (cn) {
1179 if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) {
1180 return cn->jlist;
1182 cn = cn->hprev;
1184 return NULL;
1187 static int newer_jl_done(struct reiserfs_journal_cnode *cn)
1189 struct super_block *sb = cn->sb;
1190 b_blocknr_t blocknr = cn->blocknr;
1192 cn = cn->hprev;
1193 while (cn) {
1194 if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist &&
1195 atomic_read(&cn->jlist->j_commit_left) != 0)
1196 return 0;
1197 cn = cn->hprev;
1199 return 1;
1202 static void remove_journal_hash(struct super_block *,
1203 struct reiserfs_journal_cnode **,
1204 struct reiserfs_journal_list *, unsigned long,
1205 int);
1208 ** once all the real blocks have been flushed, it is safe to remove them from the
1209 ** journal list for this transaction. Aside from freeing the cnode, this also allows the
1210 ** block to be reallocated for data blocks if it had been deleted.
1212 static void remove_all_from_journal_list(struct super_block *p_s_sb,
1213 struct reiserfs_journal_list *jl,
1214 int debug)
1216 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1217 struct reiserfs_journal_cnode *cn, *last;
1218 cn = jl->j_realblock;
1220 /* which is better, to lock once around the whole loop, or
1221 ** to lock for each call to remove_journal_hash?
1223 while (cn) {
1224 if (cn->blocknr != 0) {
1225 if (debug) {
1226 reiserfs_warning(p_s_sb,
1227 "block %u, bh is %d, state %ld",
1228 cn->blocknr, cn->bh ? 1 : 0,
1229 cn->state);
1231 cn->state = 0;
1232 remove_journal_hash(p_s_sb, journal->j_list_hash_table,
1233 jl, cn->blocknr, 1);
1235 last = cn;
1236 cn = cn->next;
1237 free_cnode(p_s_sb, last);
1239 jl->j_realblock = NULL;
1243 ** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
1244 ** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
1245 ** releasing blocks in this transaction for reuse as data blocks.
1246 ** called by flush_journal_list, before it calls remove_all_from_journal_list
1249 static int _update_journal_header_block(struct super_block *p_s_sb,
1250 unsigned long offset,
1251 unsigned long trans_id)
1253 struct reiserfs_journal_header *jh;
1254 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1256 if (reiserfs_is_journal_aborted(journal))
1257 return -EIO;
1259 if (trans_id >= journal->j_last_flush_trans_id) {
1260 if (buffer_locked((journal->j_header_bh))) {
1261 wait_on_buffer((journal->j_header_bh));
1262 if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
1263 #ifdef CONFIG_REISERFS_CHECK
1264 reiserfs_warning(p_s_sb,
1265 "journal-699: buffer write failed");
1266 #endif
1267 return -EIO;
1270 journal->j_last_flush_trans_id = trans_id;
1271 journal->j_first_unflushed_offset = offset;
1272 jh = (struct reiserfs_journal_header *)(journal->j_header_bh->
1273 b_data);
1274 jh->j_last_flush_trans_id = cpu_to_le32(trans_id);
1275 jh->j_first_unflushed_offset = cpu_to_le32(offset);
1276 jh->j_mount_id = cpu_to_le32(journal->j_mount_id);
1278 if (reiserfs_barrier_flush(p_s_sb)) {
1279 int ret;
1280 lock_buffer(journal->j_header_bh);
1281 ret = submit_barrier_buffer(journal->j_header_bh);
1282 if (ret == -EOPNOTSUPP) {
1283 set_buffer_uptodate(journal->j_header_bh);
1284 disable_barrier(p_s_sb);
1285 goto sync;
1287 wait_on_buffer(journal->j_header_bh);
1288 check_barrier_completion(p_s_sb, journal->j_header_bh);
1289 } else {
1290 sync:
1291 set_buffer_dirty(journal->j_header_bh);
1292 sync_dirty_buffer(journal->j_header_bh);
1294 if (!buffer_uptodate(journal->j_header_bh)) {
1295 reiserfs_warning(p_s_sb,
1296 "journal-837: IO error during journal replay");
1297 return -EIO;
1300 return 0;
1303 static int update_journal_header_block(struct super_block *p_s_sb,
1304 unsigned long offset,
1305 unsigned long trans_id)
1307 return _update_journal_header_block(p_s_sb, offset, trans_id);
1311 ** flush any and all journal lists older than you are
1312 ** can only be called from flush_journal_list
1314 static int flush_older_journal_lists(struct super_block *p_s_sb,
1315 struct reiserfs_journal_list *jl)
1317 struct list_head *entry;
1318 struct reiserfs_journal_list *other_jl;
1319 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1320 unsigned long trans_id = jl->j_trans_id;
1322 /* we know we are the only ones flushing things, no extra race
1323 * protection is required.
1325 restart:
1326 entry = journal->j_journal_list.next;
1327 /* Did we wrap? */
1328 if (entry == &journal->j_journal_list)
1329 return 0;
1330 other_jl = JOURNAL_LIST_ENTRY(entry);
1331 if (other_jl->j_trans_id < trans_id) {
1332 BUG_ON(other_jl->j_refcount <= 0);
1333 /* do not flush all */
1334 flush_journal_list(p_s_sb, other_jl, 0);
1336 /* other_jl is now deleted from the list */
1337 goto restart;
1339 return 0;
1342 static void del_from_work_list(struct super_block *s,
1343 struct reiserfs_journal_list *jl)
1345 struct reiserfs_journal *journal = SB_JOURNAL(s);
1346 if (!list_empty(&jl->j_working_list)) {
1347 list_del_init(&jl->j_working_list);
1348 journal->j_num_work_lists--;
1352 /* flush a journal list, both commit and real blocks
1354 ** always set flushall to 1, unless you are calling from inside
1355 ** flush_journal_list
1357 ** IMPORTANT. This can only be called while there are no journal writers,
1358 ** and the journal is locked. That means it can only be called from
1359 ** do_journal_end, or by journal_release
1361 static int flush_journal_list(struct super_block *s,
1362 struct reiserfs_journal_list *jl, int flushall)
1364 struct reiserfs_journal_list *pjl;
1365 struct reiserfs_journal_cnode *cn, *last;
1366 int count;
1367 int was_jwait = 0;
1368 int was_dirty = 0;
1369 struct buffer_head *saved_bh;
1370 unsigned long j_len_saved = jl->j_len;
1371 struct reiserfs_journal *journal = SB_JOURNAL(s);
1372 int err = 0;
1374 BUG_ON(j_len_saved <= 0);
1376 if (atomic_read(&journal->j_wcount) != 0) {
1377 reiserfs_warning(s,
1378 "clm-2048: flush_journal_list called with wcount %d",
1379 atomic_read(&journal->j_wcount));
1381 BUG_ON(jl->j_trans_id == 0);
1383 /* if flushall == 0, the lock is already held */
1384 if (flushall) {
1385 down(&journal->j_flush_sem);
1386 } else if (!down_trylock(&journal->j_flush_sem)) {
1387 BUG();
1390 count = 0;
1391 if (j_len_saved > journal->j_trans_max) {
1392 reiserfs_panic(s,
1393 "journal-715: flush_journal_list, length is %lu, trans id %lu\n",
1394 j_len_saved, jl->j_trans_id);
1395 return 0;
1398 get_fs_excl();
1400 /* if all the work is already done, get out of here */
1401 if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
1402 atomic_read(&(jl->j_commit_left)) <= 0) {
1403 goto flush_older_and_return;
1406 /* start by putting the commit list on disk. This will also flush
1407 ** the commit lists of any olders transactions
1409 flush_commit_list(s, jl, 1);
1411 if (!(jl->j_state & LIST_DIRTY)
1412 && !reiserfs_is_journal_aborted(journal))
1413 BUG();
1415 /* are we done now? */
1416 if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
1417 atomic_read(&(jl->j_commit_left)) <= 0) {
1418 goto flush_older_and_return;
1421 /* loop through each cnode, see if we need to write it,
1422 ** or wait on a more recent transaction, or just ignore it
1424 if (atomic_read(&(journal->j_wcount)) != 0) {
1425 reiserfs_panic(s,
1426 "journal-844: panic journal list is flushing, wcount is not 0\n");
1428 cn = jl->j_realblock;
1429 while (cn) {
1430 was_jwait = 0;
1431 was_dirty = 0;
1432 saved_bh = NULL;
1433 /* blocknr of 0 is no longer in the hash, ignore it */
1434 if (cn->blocknr == 0) {
1435 goto free_cnode;
1438 /* This transaction failed commit. Don't write out to the disk */
1439 if (!(jl->j_state & LIST_DIRTY))
1440 goto free_cnode;
1442 pjl = find_newer_jl_for_cn(cn);
1443 /* the order is important here. We check pjl to make sure we
1444 ** don't clear BH_JDirty_wait if we aren't the one writing this
1445 ** block to disk
1447 if (!pjl && cn->bh) {
1448 saved_bh = cn->bh;
1450 /* we do this to make sure nobody releases the buffer while
1451 ** we are working with it
1453 get_bh(saved_bh);
1455 if (buffer_journal_dirty(saved_bh)) {
1456 BUG_ON(!can_dirty(cn));
1457 was_jwait = 1;
1458 was_dirty = 1;
1459 } else if (can_dirty(cn)) {
1460 /* everything with !pjl && jwait should be writable */
1461 BUG();
1465 /* if someone has this block in a newer transaction, just make
1466 ** sure they are commited, and don't try writing it to disk
1468 if (pjl) {
1469 if (atomic_read(&pjl->j_commit_left))
1470 flush_commit_list(s, pjl, 1);
1471 goto free_cnode;
1474 /* bh == NULL when the block got to disk on its own, OR,
1475 ** the block got freed in a future transaction
1477 if (saved_bh == NULL) {
1478 goto free_cnode;
1481 /* this should never happen. kupdate_one_transaction has this list
1482 ** locked while it works, so we should never see a buffer here that
1483 ** is not marked JDirty_wait
1485 if ((!was_jwait) && !buffer_locked(saved_bh)) {
1486 reiserfs_warning(s,
1487 "journal-813: BAD! buffer %llu %cdirty %cjwait, "
1488 "not in a newer tranasction",
1489 (unsigned long long)saved_bh->
1490 b_blocknr, was_dirty ? ' ' : '!',
1491 was_jwait ? ' ' : '!');
1493 if (was_dirty) {
1494 /* we inc again because saved_bh gets decremented at free_cnode */
1495 get_bh(saved_bh);
1496 set_bit(BLOCK_NEEDS_FLUSH, &cn->state);
1497 lock_buffer(saved_bh);
1498 BUG_ON(cn->blocknr != saved_bh->b_blocknr);
1499 if (buffer_dirty(saved_bh))
1500 submit_logged_buffer(saved_bh);
1501 else
1502 unlock_buffer(saved_bh);
1503 count++;
1504 } else {
1505 reiserfs_warning(s,
1506 "clm-2082: Unable to flush buffer %llu in %s",
1507 (unsigned long long)saved_bh->
1508 b_blocknr, __FUNCTION__);
1510 free_cnode:
1511 last = cn;
1512 cn = cn->next;
1513 if (saved_bh) {
1514 /* we incremented this to keep others from taking the buffer head away */
1515 put_bh(saved_bh);
1516 if (atomic_read(&(saved_bh->b_count)) < 0) {
1517 reiserfs_warning(s,
1518 "journal-945: saved_bh->b_count < 0");
1522 if (count > 0) {
1523 cn = jl->j_realblock;
1524 while (cn) {
1525 if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
1526 if (!cn->bh) {
1527 reiserfs_panic(s,
1528 "journal-1011: cn->bh is NULL\n");
1530 wait_on_buffer(cn->bh);
1531 if (!cn->bh) {
1532 reiserfs_panic(s,
1533 "journal-1012: cn->bh is NULL\n");
1535 if (unlikely(!buffer_uptodate(cn->bh))) {
1536 #ifdef CONFIG_REISERFS_CHECK
1537 reiserfs_warning(s,
1538 "journal-949: buffer write failed\n");
1539 #endif
1540 err = -EIO;
1542 /* note, we must clear the JDirty_wait bit after the up to date
1543 ** check, otherwise we race against our flushpage routine
1545 BUG_ON(!test_clear_buffer_journal_dirty
1546 (cn->bh));
1548 /* undo the inc from journal_mark_dirty */
1549 put_bh(cn->bh);
1550 brelse(cn->bh);
1552 cn = cn->next;
1556 if (err)
1557 reiserfs_abort(s, -EIO,
1558 "Write error while pushing transaction to disk in %s",
1559 __FUNCTION__);
1560 flush_older_and_return:
1562 /* before we can update the journal header block, we _must_ flush all
1563 ** real blocks from all older transactions to disk. This is because
1564 ** once the header block is updated, this transaction will not be
1565 ** replayed after a crash
1567 if (flushall) {
1568 flush_older_journal_lists(s, jl);
1571 err = journal->j_errno;
1572 /* before we can remove everything from the hash tables for this
1573 ** transaction, we must make sure it can never be replayed
1575 ** since we are only called from do_journal_end, we know for sure there
1576 ** are no allocations going on while we are flushing journal lists. So,
1577 ** we only need to update the journal header block for the last list
1578 ** being flushed
1580 if (!err && flushall) {
1581 err =
1582 update_journal_header_block(s,
1583 (jl->j_start + jl->j_len +
1584 2) % SB_ONDISK_JOURNAL_SIZE(s),
1585 jl->j_trans_id);
1586 if (err)
1587 reiserfs_abort(s, -EIO,
1588 "Write error while updating journal header in %s",
1589 __FUNCTION__);
1591 remove_all_from_journal_list(s, jl, 0);
1592 list_del_init(&jl->j_list);
1593 journal->j_num_lists--;
1594 del_from_work_list(s, jl);
1596 if (journal->j_last_flush_id != 0 &&
1597 (jl->j_trans_id - journal->j_last_flush_id) != 1) {
1598 reiserfs_warning(s, "clm-2201: last flush %lu, current %lu",
1599 journal->j_last_flush_id, jl->j_trans_id);
1601 journal->j_last_flush_id = jl->j_trans_id;
1603 /* not strictly required since we are freeing the list, but it should
1604 * help find code using dead lists later on
1606 jl->j_len = 0;
1607 atomic_set(&(jl->j_nonzerolen), 0);
1608 jl->j_start = 0;
1609 jl->j_realblock = NULL;
1610 jl->j_commit_bh = NULL;
1611 jl->j_trans_id = 0;
1612 jl->j_state = 0;
1613 put_journal_list(s, jl);
1614 if (flushall)
1615 up(&journal->j_flush_sem);
1616 put_fs_excl();
1617 return err;
1620 static int test_transaction(struct super_block *s,
1621 struct reiserfs_journal_list *jl)
1623 struct reiserfs_journal_cnode *cn;
1625 if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0)
1626 return 1;
1628 cn = jl->j_realblock;
1629 while (cn) {
1630 /* if the blocknr == 0, this has been cleared from the hash,
1631 ** skip it
1633 if (cn->blocknr == 0) {
1634 goto next;
1636 if (cn->bh && !newer_jl_done(cn))
1637 return 0;
1638 next:
1639 cn = cn->next;
1640 cond_resched();
1642 return 0;
1645 static int write_one_transaction(struct super_block *s,
1646 struct reiserfs_journal_list *jl,
1647 struct buffer_chunk *chunk)
1649 struct reiserfs_journal_cnode *cn;
1650 int ret = 0;
1652 jl->j_state |= LIST_TOUCHED;
1653 del_from_work_list(s, jl);
1654 if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) {
1655 return 0;
1658 cn = jl->j_realblock;
1659 while (cn) {
1660 /* if the blocknr == 0, this has been cleared from the hash,
1661 ** skip it
1663 if (cn->blocknr == 0) {
1664 goto next;
1666 if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) {
1667 struct buffer_head *tmp_bh;
1668 /* we can race against journal_mark_freed when we try
1669 * to lock_buffer(cn->bh), so we have to inc the buffer
1670 * count, and recheck things after locking
1672 tmp_bh = cn->bh;
1673 get_bh(tmp_bh);
1674 lock_buffer(tmp_bh);
1675 if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) {
1676 if (!buffer_journal_dirty(tmp_bh) ||
1677 buffer_journal_prepared(tmp_bh))
1678 BUG();
1679 add_to_chunk(chunk, tmp_bh, NULL, write_chunk);
1680 ret++;
1681 } else {
1682 /* note, cn->bh might be null now */
1683 unlock_buffer(tmp_bh);
1685 put_bh(tmp_bh);
1687 next:
1688 cn = cn->next;
1689 cond_resched();
1691 return ret;
1694 /* used by flush_commit_list */
1695 static int dirty_one_transaction(struct super_block *s,
1696 struct reiserfs_journal_list *jl)
1698 struct reiserfs_journal_cnode *cn;
1699 struct reiserfs_journal_list *pjl;
1700 int ret = 0;
1702 jl->j_state |= LIST_DIRTY;
1703 cn = jl->j_realblock;
1704 while (cn) {
1705 /* look for a more recent transaction that logged this
1706 ** buffer. Only the most recent transaction with a buffer in
1707 ** it is allowed to send that buffer to disk
1709 pjl = find_newer_jl_for_cn(cn);
1710 if (!pjl && cn->blocknr && cn->bh
1711 && buffer_journal_dirty(cn->bh)) {
1712 BUG_ON(!can_dirty(cn));
1713 /* if the buffer is prepared, it will either be logged
1714 * or restored. If restored, we need to make sure
1715 * it actually gets marked dirty
1717 clear_buffer_journal_new(cn->bh);
1718 if (buffer_journal_prepared(cn->bh)) {
1719 set_buffer_journal_restore_dirty(cn->bh);
1720 } else {
1721 set_buffer_journal_test(cn->bh);
1722 mark_buffer_dirty(cn->bh);
1725 cn = cn->next;
1727 return ret;
1730 static int kupdate_transactions(struct super_block *s,
1731 struct reiserfs_journal_list *jl,
1732 struct reiserfs_journal_list **next_jl,
1733 unsigned long *next_trans_id,
1734 int num_blocks, int num_trans)
1736 int ret = 0;
1737 int written = 0;
1738 int transactions_flushed = 0;
1739 unsigned long orig_trans_id = jl->j_trans_id;
1740 struct buffer_chunk chunk;
1741 struct list_head *entry;
1742 struct reiserfs_journal *journal = SB_JOURNAL(s);
1743 chunk.nr = 0;
1745 down(&journal->j_flush_sem);
1746 if (!journal_list_still_alive(s, orig_trans_id)) {
1747 goto done;
1750 /* we've got j_flush_sem held, nobody is going to delete any
1751 * of these lists out from underneath us
1753 while ((num_trans && transactions_flushed < num_trans) ||
1754 (!num_trans && written < num_blocks)) {
1756 if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) ||
1757 atomic_read(&jl->j_commit_left)
1758 || !(jl->j_state & LIST_DIRTY)) {
1759 del_from_work_list(s, jl);
1760 break;
1762 ret = write_one_transaction(s, jl, &chunk);
1764 if (ret < 0)
1765 goto done;
1766 transactions_flushed++;
1767 written += ret;
1768 entry = jl->j_list.next;
1770 /* did we wrap? */
1771 if (entry == &journal->j_journal_list) {
1772 break;
1774 jl = JOURNAL_LIST_ENTRY(entry);
1776 /* don't bother with older transactions */
1777 if (jl->j_trans_id <= orig_trans_id)
1778 break;
1780 if (chunk.nr) {
1781 write_chunk(&chunk);
1784 done:
1785 up(&journal->j_flush_sem);
1786 return ret;
1789 /* for o_sync and fsync heavy applications, they tend to use
1790 ** all the journa list slots with tiny transactions. These
1791 ** trigger lots and lots of calls to update the header block, which
1792 ** adds seeks and slows things down.
1794 ** This function tries to clear out a large chunk of the journal lists
1795 ** at once, which makes everything faster since only the newest journal
1796 ** list updates the header block
1798 static int flush_used_journal_lists(struct super_block *s,
1799 struct reiserfs_journal_list *jl)
1801 unsigned long len = 0;
1802 unsigned long cur_len;
1803 int ret;
1804 int i;
1805 int limit = 256;
1806 struct reiserfs_journal_list *tjl;
1807 struct reiserfs_journal_list *flush_jl;
1808 unsigned long trans_id;
1809 struct reiserfs_journal *journal = SB_JOURNAL(s);
1811 flush_jl = tjl = jl;
1813 /* in data logging mode, try harder to flush a lot of blocks */
1814 if (reiserfs_data_log(s))
1815 limit = 1024;
1816 /* flush for 256 transactions or limit blocks, whichever comes first */
1817 for (i = 0; i < 256 && len < limit; i++) {
1818 if (atomic_read(&tjl->j_commit_left) ||
1819 tjl->j_trans_id < jl->j_trans_id) {
1820 break;
1822 cur_len = atomic_read(&tjl->j_nonzerolen);
1823 if (cur_len > 0) {
1824 tjl->j_state &= ~LIST_TOUCHED;
1826 len += cur_len;
1827 flush_jl = tjl;
1828 if (tjl->j_list.next == &journal->j_journal_list)
1829 break;
1830 tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
1832 /* try to find a group of blocks we can flush across all the
1833 ** transactions, but only bother if we've actually spanned
1834 ** across multiple lists
1836 if (flush_jl != jl) {
1837 ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
1839 flush_journal_list(s, flush_jl, 1);
1840 return 0;
1844 ** removes any nodes in table with name block and dev as bh.
1845 ** only touchs the hnext and hprev pointers.
1847 void remove_journal_hash(struct super_block *sb,
1848 struct reiserfs_journal_cnode **table,
1849 struct reiserfs_journal_list *jl,
1850 unsigned long block, int remove_freed)
1852 struct reiserfs_journal_cnode *cur;
1853 struct reiserfs_journal_cnode **head;
1855 head = &(journal_hash(table, sb, block));
1856 if (!head) {
1857 return;
1859 cur = *head;
1860 while (cur) {
1861 if (cur->blocknr == block && cur->sb == sb
1862 && (jl == NULL || jl == cur->jlist)
1863 && (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) {
1864 if (cur->hnext) {
1865 cur->hnext->hprev = cur->hprev;
1867 if (cur->hprev) {
1868 cur->hprev->hnext = cur->hnext;
1869 } else {
1870 *head = cur->hnext;
1872 cur->blocknr = 0;
1873 cur->sb = NULL;
1874 cur->state = 0;
1875 if (cur->bh && cur->jlist) /* anybody who clears the cur->bh will also dec the nonzerolen */
1876 atomic_dec(&(cur->jlist->j_nonzerolen));
1877 cur->bh = NULL;
1878 cur->jlist = NULL;
1880 cur = cur->hnext;
1884 static void free_journal_ram(struct super_block *p_s_sb)
1886 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1887 kfree(journal->j_current_jl);
1888 journal->j_num_lists--;
1890 vfree(journal->j_cnode_free_orig);
1891 free_list_bitmaps(p_s_sb, journal->j_list_bitmap);
1892 free_bitmap_nodes(p_s_sb); /* must be after free_list_bitmaps */
1893 if (journal->j_header_bh) {
1894 brelse(journal->j_header_bh);
1896 /* j_header_bh is on the journal dev, make sure not to release the journal
1897 * dev until we brelse j_header_bh
1899 release_journal_dev(p_s_sb, journal);
1900 vfree(journal);
1904 ** call on unmount. Only set error to 1 if you haven't made your way out
1905 ** of read_super() yet. Any other caller must keep error at 0.
1907 static int do_journal_release(struct reiserfs_transaction_handle *th,
1908 struct super_block *p_s_sb, int error)
1910 struct reiserfs_transaction_handle myth;
1911 int flushed = 0;
1912 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1914 /* we only want to flush out transactions if we were called with error == 0
1916 if (!error && !(p_s_sb->s_flags & MS_RDONLY)) {
1917 /* end the current trans */
1918 BUG_ON(!th->t_trans_id);
1919 do_journal_end(th, p_s_sb, 10, FLUSH_ALL);
1921 /* make sure something gets logged to force our way into the flush code */
1922 if (!journal_join(&myth, p_s_sb, 1)) {
1923 reiserfs_prepare_for_journal(p_s_sb,
1924 SB_BUFFER_WITH_SB(p_s_sb),
1926 journal_mark_dirty(&myth, p_s_sb,
1927 SB_BUFFER_WITH_SB(p_s_sb));
1928 do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL);
1929 flushed = 1;
1933 /* this also catches errors during the do_journal_end above */
1934 if (!error && reiserfs_is_journal_aborted(journal)) {
1935 memset(&myth, 0, sizeof(myth));
1936 if (!journal_join_abort(&myth, p_s_sb, 1)) {
1937 reiserfs_prepare_for_journal(p_s_sb,
1938 SB_BUFFER_WITH_SB(p_s_sb),
1940 journal_mark_dirty(&myth, p_s_sb,
1941 SB_BUFFER_WITH_SB(p_s_sb));
1942 do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL);
1946 reiserfs_mounted_fs_count--;
1947 /* wait for all commits to finish */
1948 cancel_delayed_work(&SB_JOURNAL(p_s_sb)->j_work);
1949 flush_workqueue(commit_wq);
1950 if (!reiserfs_mounted_fs_count) {
1951 destroy_workqueue(commit_wq);
1952 commit_wq = NULL;
1955 free_journal_ram(p_s_sb);
1957 return 0;
1961 ** call on unmount. flush all journal trans, release all alloc'd ram
1963 int journal_release(struct reiserfs_transaction_handle *th,
1964 struct super_block *p_s_sb)
1966 return do_journal_release(th, p_s_sb, 0);
1970 ** only call from an error condition inside reiserfs_read_super!
1972 int journal_release_error(struct reiserfs_transaction_handle *th,
1973 struct super_block *p_s_sb)
1975 return do_journal_release(th, p_s_sb, 1);
1978 /* compares description block with commit block. returns 1 if they differ, 0 if they are the same */
1979 static int journal_compare_desc_commit(struct super_block *p_s_sb,
1980 struct reiserfs_journal_desc *desc,
1981 struct reiserfs_journal_commit *commit)
1983 if (get_commit_trans_id(commit) != get_desc_trans_id(desc) ||
1984 get_commit_trans_len(commit) != get_desc_trans_len(desc) ||
1985 get_commit_trans_len(commit) > SB_JOURNAL(p_s_sb)->j_trans_max ||
1986 get_commit_trans_len(commit) <= 0) {
1987 return 1;
1989 return 0;
1992 /* returns 0 if it did not find a description block
1993 ** returns -1 if it found a corrupt commit block
1994 ** returns 1 if both desc and commit were valid
1996 static int journal_transaction_is_valid(struct super_block *p_s_sb,
1997 struct buffer_head *d_bh,
1998 unsigned long *oldest_invalid_trans_id,
1999 unsigned long *newest_mount_id)
2001 struct reiserfs_journal_desc *desc;
2002 struct reiserfs_journal_commit *commit;
2003 struct buffer_head *c_bh;
2004 unsigned long offset;
2006 if (!d_bh)
2007 return 0;
2009 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2010 if (get_desc_trans_len(desc) > 0
2011 && !memcmp(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8)) {
2012 if (oldest_invalid_trans_id && *oldest_invalid_trans_id
2013 && get_desc_trans_id(desc) > *oldest_invalid_trans_id) {
2014 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2015 "journal-986: transaction "
2016 "is valid returning because trans_id %d is greater than "
2017 "oldest_invalid %lu",
2018 get_desc_trans_id(desc),
2019 *oldest_invalid_trans_id);
2020 return 0;
2022 if (newest_mount_id
2023 && *newest_mount_id > get_desc_mount_id(desc)) {
2024 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2025 "journal-1087: transaction "
2026 "is valid returning because mount_id %d is less than "
2027 "newest_mount_id %lu",
2028 get_desc_mount_id(desc),
2029 *newest_mount_id);
2030 return -1;
2032 if (get_desc_trans_len(desc) > SB_JOURNAL(p_s_sb)->j_trans_max) {
2033 reiserfs_warning(p_s_sb,
2034 "journal-2018: Bad transaction length %d encountered, ignoring transaction",
2035 get_desc_trans_len(desc));
2036 return -1;
2038 offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2040 /* ok, we have a journal description block, lets see if the transaction was valid */
2041 c_bh =
2042 journal_bread(p_s_sb,
2043 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2044 ((offset + get_desc_trans_len(desc) +
2045 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
2046 if (!c_bh)
2047 return 0;
2048 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
2049 if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
2050 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2051 "journal_transaction_is_valid, commit offset %ld had bad "
2052 "time %d or length %d",
2053 c_bh->b_blocknr -
2054 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2055 get_commit_trans_id(commit),
2056 get_commit_trans_len(commit));
2057 brelse(c_bh);
2058 if (oldest_invalid_trans_id) {
2059 *oldest_invalid_trans_id =
2060 get_desc_trans_id(desc);
2061 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2062 "journal-1004: "
2063 "transaction_is_valid setting oldest invalid trans_id "
2064 "to %d",
2065 get_desc_trans_id(desc));
2067 return -1;
2069 brelse(c_bh);
2070 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2071 "journal-1006: found valid "
2072 "transaction start offset %llu, len %d id %d",
2073 d_bh->b_blocknr -
2074 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2075 get_desc_trans_len(desc),
2076 get_desc_trans_id(desc));
2077 return 1;
2078 } else {
2079 return 0;
2083 static void brelse_array(struct buffer_head **heads, int num)
2085 int i;
2086 for (i = 0; i < num; i++) {
2087 brelse(heads[i]);
2092 ** given the start, and values for the oldest acceptable transactions,
2093 ** this either reads in a replays a transaction, or returns because the transaction
2094 ** is invalid, or too old.
2096 static int journal_read_transaction(struct super_block *p_s_sb,
2097 unsigned long cur_dblock,
2098 unsigned long oldest_start,
2099 unsigned long oldest_trans_id,
2100 unsigned long newest_mount_id)
2102 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
2103 struct reiserfs_journal_desc *desc;
2104 struct reiserfs_journal_commit *commit;
2105 unsigned long trans_id = 0;
2106 struct buffer_head *c_bh;
2107 struct buffer_head *d_bh;
2108 struct buffer_head **log_blocks = NULL;
2109 struct buffer_head **real_blocks = NULL;
2110 unsigned long trans_offset;
2111 int i;
2112 int trans_half;
2114 d_bh = journal_bread(p_s_sb, cur_dblock);
2115 if (!d_bh)
2116 return 1;
2117 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2118 trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2119 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1037: "
2120 "journal_read_transaction, offset %llu, len %d mount_id %d",
2121 d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2122 get_desc_trans_len(desc), get_desc_mount_id(desc));
2123 if (get_desc_trans_id(desc) < oldest_trans_id) {
2124 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1039: "
2125 "journal_read_trans skipping because %lu is too old",
2126 cur_dblock -
2127 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb));
2128 brelse(d_bh);
2129 return 1;
2131 if (get_desc_mount_id(desc) != newest_mount_id) {
2132 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1146: "
2133 "journal_read_trans skipping because %d is != "
2134 "newest_mount_id %lu", get_desc_mount_id(desc),
2135 newest_mount_id);
2136 brelse(d_bh);
2137 return 1;
2139 c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2140 ((trans_offset + get_desc_trans_len(desc) + 1) %
2141 SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
2142 if (!c_bh) {
2143 brelse(d_bh);
2144 return 1;
2146 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
2147 if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
2148 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2149 "journal_read_transaction, "
2150 "commit offset %llu had bad time %d or length %d",
2151 c_bh->b_blocknr -
2152 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2153 get_commit_trans_id(commit),
2154 get_commit_trans_len(commit));
2155 brelse(c_bh);
2156 brelse(d_bh);
2157 return 1;
2159 trans_id = get_desc_trans_id(desc);
2160 /* now we know we've got a good transaction, and it was inside the valid time ranges */
2161 log_blocks = kmalloc(get_desc_trans_len(desc) *
2162 sizeof(struct buffer_head *), GFP_NOFS);
2163 real_blocks = kmalloc(get_desc_trans_len(desc) *
2164 sizeof(struct buffer_head *), GFP_NOFS);
2165 if (!log_blocks || !real_blocks) {
2166 brelse(c_bh);
2167 brelse(d_bh);
2168 kfree(log_blocks);
2169 kfree(real_blocks);
2170 reiserfs_warning(p_s_sb,
2171 "journal-1169: kmalloc failed, unable to mount FS");
2172 return -1;
2174 /* get all the buffer heads */
2175 trans_half = journal_trans_half(p_s_sb->s_blocksize);
2176 for (i = 0; i < get_desc_trans_len(desc); i++) {
2177 log_blocks[i] =
2178 journal_getblk(p_s_sb,
2179 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2180 (trans_offset + 1 +
2181 i) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2182 if (i < trans_half) {
2183 real_blocks[i] =
2184 sb_getblk(p_s_sb,
2185 le32_to_cpu(desc->j_realblock[i]));
2186 } else {
2187 real_blocks[i] =
2188 sb_getblk(p_s_sb,
2189 le32_to_cpu(commit->
2190 j_realblock[i - trans_half]));
2192 if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(p_s_sb)) {
2193 reiserfs_warning(p_s_sb,
2194 "journal-1207: REPLAY FAILURE fsck required! Block to replay is outside of filesystem");
2195 goto abort_replay;
2197 /* make sure we don't try to replay onto log or reserved area */
2198 if (is_block_in_log_or_reserved_area
2199 (p_s_sb, real_blocks[i]->b_blocknr)) {
2200 reiserfs_warning(p_s_sb,
2201 "journal-1204: REPLAY FAILURE fsck required! Trying to replay onto a log block");
2202 abort_replay:
2203 brelse_array(log_blocks, i);
2204 brelse_array(real_blocks, i);
2205 brelse(c_bh);
2206 brelse(d_bh);
2207 kfree(log_blocks);
2208 kfree(real_blocks);
2209 return -1;
2212 /* read in the log blocks, memcpy to the corresponding real block */
2213 ll_rw_block(READ, get_desc_trans_len(desc), log_blocks);
2214 for (i = 0; i < get_desc_trans_len(desc); i++) {
2215 wait_on_buffer(log_blocks[i]);
2216 if (!buffer_uptodate(log_blocks[i])) {
2217 reiserfs_warning(p_s_sb,
2218 "journal-1212: REPLAY FAILURE fsck required! buffer write failed");
2219 brelse_array(log_blocks + i,
2220 get_desc_trans_len(desc) - i);
2221 brelse_array(real_blocks, get_desc_trans_len(desc));
2222 brelse(c_bh);
2223 brelse(d_bh);
2224 kfree(log_blocks);
2225 kfree(real_blocks);
2226 return -1;
2228 memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data,
2229 real_blocks[i]->b_size);
2230 set_buffer_uptodate(real_blocks[i]);
2231 brelse(log_blocks[i]);
2233 /* flush out the real blocks */
2234 for (i = 0; i < get_desc_trans_len(desc); i++) {
2235 set_buffer_dirty(real_blocks[i]);
2236 ll_rw_block(SWRITE, 1, real_blocks + i);
2238 for (i = 0; i < get_desc_trans_len(desc); i++) {
2239 wait_on_buffer(real_blocks[i]);
2240 if (!buffer_uptodate(real_blocks[i])) {
2241 reiserfs_warning(p_s_sb,
2242 "journal-1226: REPLAY FAILURE, fsck required! buffer write failed");
2243 brelse_array(real_blocks + i,
2244 get_desc_trans_len(desc) - i);
2245 brelse(c_bh);
2246 brelse(d_bh);
2247 kfree(log_blocks);
2248 kfree(real_blocks);
2249 return -1;
2251 brelse(real_blocks[i]);
2253 cur_dblock =
2254 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2255 ((trans_offset + get_desc_trans_len(desc) +
2256 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2257 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2258 "journal-1095: setting journal " "start to offset %ld",
2259 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb));
2261 /* init starting values for the first transaction, in case this is the last transaction to be replayed. */
2262 journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2263 journal->j_last_flush_trans_id = trans_id;
2264 journal->j_trans_id = trans_id + 1;
2265 /* check for trans_id overflow */
2266 if (journal->j_trans_id == 0)
2267 journal->j_trans_id = 10;
2268 brelse(c_bh);
2269 brelse(d_bh);
2270 kfree(log_blocks);
2271 kfree(real_blocks);
2272 return 0;
2275 /* This function reads blocks starting from block and to max_block of bufsize
2276 size (but no more than BUFNR blocks at a time). This proved to improve
2277 mounting speed on self-rebuilding raid5 arrays at least.
2278 Right now it is only used from journal code. But later we might use it
2279 from other places.
2280 Note: Do not use journal_getblk/sb_getblk functions here! */
2281 static struct buffer_head *reiserfs_breada(struct block_device *dev, int block,
2282 int bufsize, unsigned int max_block)
2284 struct buffer_head *bhlist[BUFNR];
2285 unsigned int blocks = BUFNR;
2286 struct buffer_head *bh;
2287 int i, j;
2289 bh = __getblk(dev, block, bufsize);
2290 if (buffer_uptodate(bh))
2291 return (bh);
2293 if (block + BUFNR > max_block) {
2294 blocks = max_block - block;
2296 bhlist[0] = bh;
2297 j = 1;
2298 for (i = 1; i < blocks; i++) {
2299 bh = __getblk(dev, block + i, bufsize);
2300 if (buffer_uptodate(bh)) {
2301 brelse(bh);
2302 break;
2303 } else
2304 bhlist[j++] = bh;
2306 ll_rw_block(READ, j, bhlist);
2307 for (i = 1; i < j; i++)
2308 brelse(bhlist[i]);
2309 bh = bhlist[0];
2310 wait_on_buffer(bh);
2311 if (buffer_uptodate(bh))
2312 return bh;
2313 brelse(bh);
2314 return NULL;
2318 ** read and replay the log
2319 ** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
2320 ** transaction. This tests that before finding all the transactions in the log, which makes normal mount times fast.
2322 ** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
2324 ** On exit, it sets things up so the first transaction will work correctly.
2326 static int journal_read(struct super_block *p_s_sb)
2328 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
2329 struct reiserfs_journal_desc *desc;
2330 unsigned long oldest_trans_id = 0;
2331 unsigned long oldest_invalid_trans_id = 0;
2332 time_t start;
2333 unsigned long oldest_start = 0;
2334 unsigned long cur_dblock = 0;
2335 unsigned long newest_mount_id = 9;
2336 struct buffer_head *d_bh;
2337 struct reiserfs_journal_header *jh;
2338 int valid_journal_header = 0;
2339 int replay_count = 0;
2340 int continue_replay = 1;
2341 int ret;
2342 char b[BDEVNAME_SIZE];
2344 cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2345 reiserfs_info(p_s_sb, "checking transaction log (%s)\n",
2346 bdevname(journal->j_dev_bd, b));
2347 start = get_seconds();
2349 /* step 1, read in the journal header block. Check the transaction it says
2350 ** is the first unflushed, and if that transaction is not valid,
2351 ** replay is done
2353 journal->j_header_bh = journal_bread(p_s_sb,
2354 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)
2355 + SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2356 if (!journal->j_header_bh) {
2357 return 1;
2359 jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data);
2360 if (le32_to_cpu(jh->j_first_unflushed_offset) <
2361 SB_ONDISK_JOURNAL_SIZE(p_s_sb)
2362 && le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
2363 oldest_start =
2364 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2365 le32_to_cpu(jh->j_first_unflushed_offset);
2366 oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2367 newest_mount_id = le32_to_cpu(jh->j_mount_id);
2368 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2369 "journal-1153: found in "
2370 "header: first_unflushed_offset %d, last_flushed_trans_id "
2371 "%lu", le32_to_cpu(jh->j_first_unflushed_offset),
2372 le32_to_cpu(jh->j_last_flush_trans_id));
2373 valid_journal_header = 1;
2375 /* now, we try to read the first unflushed offset. If it is not valid,
2376 ** there is nothing more we can do, and it makes no sense to read
2377 ** through the whole log.
2379 d_bh =
2380 journal_bread(p_s_sb,
2381 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2382 le32_to_cpu(jh->j_first_unflushed_offset));
2383 ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL);
2384 if (!ret) {
2385 continue_replay = 0;
2387 brelse(d_bh);
2388 goto start_log_replay;
2391 if (continue_replay && bdev_read_only(p_s_sb->s_bdev)) {
2392 reiserfs_warning(p_s_sb,
2393 "clm-2076: device is readonly, unable to replay log");
2394 return -1;
2397 /* ok, there are transactions that need to be replayed. start with the first log block, find
2398 ** all the valid transactions, and pick out the oldest.
2400 while (continue_replay
2401 && cur_dblock <
2402 (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2403 SB_ONDISK_JOURNAL_SIZE(p_s_sb))) {
2404 /* Note that it is required for blocksize of primary fs device and journal
2405 device to be the same */
2406 d_bh =
2407 reiserfs_breada(journal->j_dev_bd, cur_dblock,
2408 p_s_sb->s_blocksize,
2409 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2410 SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2411 ret =
2412 journal_transaction_is_valid(p_s_sb, d_bh,
2413 &oldest_invalid_trans_id,
2414 &newest_mount_id);
2415 if (ret == 1) {
2416 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2417 if (oldest_start == 0) { /* init all oldest_ values */
2418 oldest_trans_id = get_desc_trans_id(desc);
2419 oldest_start = d_bh->b_blocknr;
2420 newest_mount_id = get_desc_mount_id(desc);
2421 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2422 "journal-1179: Setting "
2423 "oldest_start to offset %llu, trans_id %lu",
2424 oldest_start -
2425 SB_ONDISK_JOURNAL_1st_BLOCK
2426 (p_s_sb), oldest_trans_id);
2427 } else if (oldest_trans_id > get_desc_trans_id(desc)) {
2428 /* one we just read was older */
2429 oldest_trans_id = get_desc_trans_id(desc);
2430 oldest_start = d_bh->b_blocknr;
2431 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2432 "journal-1180: Resetting "
2433 "oldest_start to offset %lu, trans_id %lu",
2434 oldest_start -
2435 SB_ONDISK_JOURNAL_1st_BLOCK
2436 (p_s_sb), oldest_trans_id);
2438 if (newest_mount_id < get_desc_mount_id(desc)) {
2439 newest_mount_id = get_desc_mount_id(desc);
2440 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2441 "journal-1299: Setting "
2442 "newest_mount_id to %d",
2443 get_desc_mount_id(desc));
2445 cur_dblock += get_desc_trans_len(desc) + 2;
2446 } else {
2447 cur_dblock++;
2449 brelse(d_bh);
2452 start_log_replay:
2453 cur_dblock = oldest_start;
2454 if (oldest_trans_id) {
2455 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2456 "journal-1206: Starting replay "
2457 "from offset %llu, trans_id %lu",
2458 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2459 oldest_trans_id);
2462 replay_count = 0;
2463 while (continue_replay && oldest_trans_id > 0) {
2464 ret =
2465 journal_read_transaction(p_s_sb, cur_dblock, oldest_start,
2466 oldest_trans_id, newest_mount_id);
2467 if (ret < 0) {
2468 return ret;
2469 } else if (ret != 0) {
2470 break;
2472 cur_dblock =
2473 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + journal->j_start;
2474 replay_count++;
2475 if (cur_dblock == oldest_start)
2476 break;
2479 if (oldest_trans_id == 0) {
2480 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2481 "journal-1225: No valid " "transactions found");
2483 /* j_start does not get set correctly if we don't replay any transactions.
2484 ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
2485 ** copy the trans_id from the header
2487 if (valid_journal_header && replay_count == 0) {
2488 journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset);
2489 journal->j_trans_id =
2490 le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2491 /* check for trans_id overflow */
2492 if (journal->j_trans_id == 0)
2493 journal->j_trans_id = 10;
2494 journal->j_last_flush_trans_id =
2495 le32_to_cpu(jh->j_last_flush_trans_id);
2496 journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
2497 } else {
2498 journal->j_mount_id = newest_mount_id + 1;
2500 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
2501 "newest_mount_id to %lu", journal->j_mount_id);
2502 journal->j_first_unflushed_offset = journal->j_start;
2503 if (replay_count > 0) {
2504 reiserfs_info(p_s_sb,
2505 "replayed %d transactions in %lu seconds\n",
2506 replay_count, get_seconds() - start);
2508 if (!bdev_read_only(p_s_sb->s_bdev) &&
2509 _update_journal_header_block(p_s_sb, journal->j_start,
2510 journal->j_last_flush_trans_id)) {
2511 /* replay failed, caller must call free_journal_ram and abort
2512 ** the mount
2514 return -1;
2516 return 0;
2519 static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s)
2521 struct reiserfs_journal_list *jl;
2522 jl = kzalloc(sizeof(struct reiserfs_journal_list),
2523 GFP_NOFS | __GFP_NOFAIL);
2524 INIT_LIST_HEAD(&jl->j_list);
2525 INIT_LIST_HEAD(&jl->j_working_list);
2526 INIT_LIST_HEAD(&jl->j_tail_bh_list);
2527 INIT_LIST_HEAD(&jl->j_bh_list);
2528 sema_init(&jl->j_commit_lock, 1);
2529 SB_JOURNAL(s)->j_num_lists++;
2530 get_journal_list(jl);
2531 return jl;
2534 static void journal_list_init(struct super_block *p_s_sb)
2536 SB_JOURNAL(p_s_sb)->j_current_jl = alloc_journal_list(p_s_sb);
2539 static int release_journal_dev(struct super_block *super,
2540 struct reiserfs_journal *journal)
2542 int result;
2544 result = 0;
2546 if (journal->j_dev_file != NULL) {
2547 result = filp_close(journal->j_dev_file, NULL);
2548 journal->j_dev_file = NULL;
2549 journal->j_dev_bd = NULL;
2550 } else if (journal->j_dev_bd != NULL) {
2551 result = blkdev_put(journal->j_dev_bd);
2552 journal->j_dev_bd = NULL;
2555 if (result != 0) {
2556 reiserfs_warning(super,
2557 "sh-457: release_journal_dev: Cannot release journal device: %i",
2558 result);
2560 return result;
2563 static int journal_init_dev(struct super_block *super,
2564 struct reiserfs_journal *journal,
2565 const char *jdev_name)
2567 int result;
2568 dev_t jdev;
2569 int blkdev_mode = FMODE_READ | FMODE_WRITE;
2570 char b[BDEVNAME_SIZE];
2572 result = 0;
2574 journal->j_dev_bd = NULL;
2575 journal->j_dev_file = NULL;
2576 jdev = SB_ONDISK_JOURNAL_DEVICE(super) ?
2577 new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev;
2579 if (bdev_read_only(super->s_bdev))
2580 blkdev_mode = FMODE_READ;
2582 /* there is no "jdev" option and journal is on separate device */
2583 if ((!jdev_name || !jdev_name[0])) {
2584 journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode);
2585 if (IS_ERR(journal->j_dev_bd)) {
2586 result = PTR_ERR(journal->j_dev_bd);
2587 journal->j_dev_bd = NULL;
2588 reiserfs_warning(super, "sh-458: journal_init_dev: "
2589 "cannot init journal device '%s': %i",
2590 __bdevname(jdev, b), result);
2591 return result;
2592 } else if (jdev != super->s_dev)
2593 set_blocksize(journal->j_dev_bd, super->s_blocksize);
2594 return 0;
2597 journal->j_dev_file = filp_open(jdev_name, 0, 0);
2598 if (!IS_ERR(journal->j_dev_file)) {
2599 struct inode *jdev_inode = journal->j_dev_file->f_mapping->host;
2600 if (!S_ISBLK(jdev_inode->i_mode)) {
2601 reiserfs_warning(super, "journal_init_dev: '%s' is "
2602 "not a block device", jdev_name);
2603 result = -ENOTBLK;
2604 release_journal_dev(super, journal);
2605 } else {
2606 /* ok */
2607 journal->j_dev_bd = I_BDEV(jdev_inode);
2608 set_blocksize(journal->j_dev_bd, super->s_blocksize);
2609 reiserfs_info(super,
2610 "journal_init_dev: journal device: %s\n",
2611 bdevname(journal->j_dev_bd, b));
2613 } else {
2614 result = PTR_ERR(journal->j_dev_file);
2615 journal->j_dev_file = NULL;
2616 reiserfs_warning(super,
2617 "journal_init_dev: Cannot open '%s': %i",
2618 jdev_name, result);
2620 return result;
2624 ** must be called once on fs mount. calls journal_read for you
2626 int journal_init(struct super_block *p_s_sb, const char *j_dev_name,
2627 int old_format, unsigned int commit_max_age)
2629 int num_cnodes = SB_ONDISK_JOURNAL_SIZE(p_s_sb) * 2;
2630 struct buffer_head *bhjh;
2631 struct reiserfs_super_block *rs;
2632 struct reiserfs_journal_header *jh;
2633 struct reiserfs_journal *journal;
2634 struct reiserfs_journal_list *jl;
2635 char b[BDEVNAME_SIZE];
2637 journal = SB_JOURNAL(p_s_sb) = vmalloc(sizeof(struct reiserfs_journal));
2638 if (!journal) {
2639 reiserfs_warning(p_s_sb,
2640 "journal-1256: unable to get memory for journal structure");
2641 return 1;
2643 memset(journal, 0, sizeof(struct reiserfs_journal));
2644 INIT_LIST_HEAD(&journal->j_bitmap_nodes);
2645 INIT_LIST_HEAD(&journal->j_prealloc_list);
2646 INIT_LIST_HEAD(&journal->j_working_list);
2647 INIT_LIST_HEAD(&journal->j_journal_list);
2648 journal->j_persistent_trans = 0;
2649 if (reiserfs_allocate_list_bitmaps(p_s_sb,
2650 journal->j_list_bitmap,
2651 SB_BMAP_NR(p_s_sb)))
2652 goto free_and_return;
2653 allocate_bitmap_nodes(p_s_sb);
2655 /* reserved for journal area support */
2656 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) = (old_format ?
2657 REISERFS_OLD_DISK_OFFSET_IN_BYTES
2658 / p_s_sb->s_blocksize +
2659 SB_BMAP_NR(p_s_sb) +
2661 REISERFS_DISK_OFFSET_IN_BYTES /
2662 p_s_sb->s_blocksize + 2);
2664 /* Sanity check to see is the standard journal fitting withing first bitmap
2665 (actual for small blocksizes) */
2666 if (!SB_ONDISK_JOURNAL_DEVICE(p_s_sb) &&
2667 (SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) +
2668 SB_ONDISK_JOURNAL_SIZE(p_s_sb) > p_s_sb->s_blocksize * 8)) {
2669 reiserfs_warning(p_s_sb,
2670 "journal-1393: journal does not fit for area "
2671 "addressed by first of bitmap blocks. It starts at "
2672 "%u and its size is %u. Block size %ld",
2673 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb),
2674 SB_ONDISK_JOURNAL_SIZE(p_s_sb),
2675 p_s_sb->s_blocksize);
2676 goto free_and_return;
2679 if (journal_init_dev(p_s_sb, journal, j_dev_name) != 0) {
2680 reiserfs_warning(p_s_sb,
2681 "sh-462: unable to initialize jornal device");
2682 goto free_and_return;
2685 rs = SB_DISK_SUPER_BLOCK(p_s_sb);
2687 /* read journal header */
2688 bhjh = journal_bread(p_s_sb,
2689 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2690 SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2691 if (!bhjh) {
2692 reiserfs_warning(p_s_sb,
2693 "sh-459: unable to read journal header");
2694 goto free_and_return;
2696 jh = (struct reiserfs_journal_header *)(bhjh->b_data);
2698 /* make sure that journal matches to the super block */
2699 if (is_reiserfs_jr(rs)
2700 && (le32_to_cpu(jh->jh_journal.jp_journal_magic) !=
2701 sb_jp_journal_magic(rs))) {
2702 reiserfs_warning(p_s_sb,
2703 "sh-460: journal header magic %x "
2704 "(device %s) does not match to magic found in super "
2705 "block %x", jh->jh_journal.jp_journal_magic,
2706 bdevname(journal->j_dev_bd, b),
2707 sb_jp_journal_magic(rs));
2708 brelse(bhjh);
2709 goto free_and_return;
2712 journal->j_trans_max = le32_to_cpu(jh->jh_journal.jp_journal_trans_max);
2713 journal->j_max_batch = le32_to_cpu(jh->jh_journal.jp_journal_max_batch);
2714 journal->j_max_commit_age =
2715 le32_to_cpu(jh->jh_journal.jp_journal_max_commit_age);
2716 journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE;
2718 if (journal->j_trans_max) {
2719 /* make sure these parameters are available, assign it if they are not */
2720 __u32 initial = journal->j_trans_max;
2721 __u32 ratio = 1;
2723 if (p_s_sb->s_blocksize < 4096)
2724 ratio = 4096 / p_s_sb->s_blocksize;
2726 if (SB_ONDISK_JOURNAL_SIZE(p_s_sb) / journal->j_trans_max <
2727 JOURNAL_MIN_RATIO)
2728 journal->j_trans_max =
2729 SB_ONDISK_JOURNAL_SIZE(p_s_sb) / JOURNAL_MIN_RATIO;
2730 if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio)
2731 journal->j_trans_max =
2732 JOURNAL_TRANS_MAX_DEFAULT / ratio;
2733 if (journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio)
2734 journal->j_trans_max =
2735 JOURNAL_TRANS_MIN_DEFAULT / ratio;
2737 if (journal->j_trans_max != initial)
2738 reiserfs_warning(p_s_sb,
2739 "sh-461: journal_init: wrong transaction max size (%u). Changed to %u",
2740 initial, journal->j_trans_max);
2742 journal->j_max_batch = journal->j_trans_max *
2743 JOURNAL_MAX_BATCH_DEFAULT / JOURNAL_TRANS_MAX_DEFAULT;
2746 if (!journal->j_trans_max) {
2747 /*we have the file system was created by old version of mkreiserfs
2748 so this field contains zero value */
2749 journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT;
2750 journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT;
2751 journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE;
2753 /* for blocksize >= 4096 - max transaction size is 1024. For block size < 4096
2754 trans max size is decreased proportionally */
2755 if (p_s_sb->s_blocksize < 4096) {
2756 journal->j_trans_max /= (4096 / p_s_sb->s_blocksize);
2757 journal->j_max_batch = (journal->j_trans_max) * 9 / 10;
2761 journal->j_default_max_commit_age = journal->j_max_commit_age;
2763 if (commit_max_age != 0) {
2764 journal->j_max_commit_age = commit_max_age;
2765 journal->j_max_trans_age = commit_max_age;
2768 reiserfs_info(p_s_sb, "journal params: device %s, size %u, "
2769 "journal first block %u, max trans len %u, max batch %u, "
2770 "max commit age %u, max trans age %u\n",
2771 bdevname(journal->j_dev_bd, b),
2772 SB_ONDISK_JOURNAL_SIZE(p_s_sb),
2773 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2774 journal->j_trans_max,
2775 journal->j_max_batch,
2776 journal->j_max_commit_age, journal->j_max_trans_age);
2778 brelse(bhjh);
2780 journal->j_list_bitmap_index = 0;
2781 journal_list_init(p_s_sb);
2783 memset(journal->j_list_hash_table, 0,
2784 JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
2786 INIT_LIST_HEAD(&journal->j_dirty_buffers);
2787 spin_lock_init(&journal->j_dirty_buffers_lock);
2789 journal->j_start = 0;
2790 journal->j_len = 0;
2791 journal->j_len_alloc = 0;
2792 atomic_set(&(journal->j_wcount), 0);
2793 atomic_set(&(journal->j_async_throttle), 0);
2794 journal->j_bcount = 0;
2795 journal->j_trans_start_time = 0;
2796 journal->j_last = NULL;
2797 journal->j_first = NULL;
2798 init_waitqueue_head(&(journal->j_join_wait));
2799 sema_init(&journal->j_lock, 1);
2800 sema_init(&journal->j_flush_sem, 1);
2802 journal->j_trans_id = 10;
2803 journal->j_mount_id = 10;
2804 journal->j_state = 0;
2805 atomic_set(&(journal->j_jlock), 0);
2806 journal->j_cnode_free_list = allocate_cnodes(num_cnodes);
2807 journal->j_cnode_free_orig = journal->j_cnode_free_list;
2808 journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0;
2809 journal->j_cnode_used = 0;
2810 journal->j_must_wait = 0;
2812 if (journal->j_cnode_free == 0) {
2813 reiserfs_warning(p_s_sb, "journal-2004: Journal cnode memory "
2814 "allocation failed (%ld bytes). Journal is "
2815 "too large for available memory. Usually "
2816 "this is due to a journal that is too large.",
2817 sizeof (struct reiserfs_journal_cnode) * num_cnodes);
2818 goto free_and_return;
2821 init_journal_hash(p_s_sb);
2822 jl = journal->j_current_jl;
2823 jl->j_list_bitmap = get_list_bitmap(p_s_sb, jl);
2824 if (!jl->j_list_bitmap) {
2825 reiserfs_warning(p_s_sb,
2826 "journal-2005, get_list_bitmap failed for journal list 0");
2827 goto free_and_return;
2829 if (journal_read(p_s_sb) < 0) {
2830 reiserfs_warning(p_s_sb, "Replay Failure, unable to mount");
2831 goto free_and_return;
2834 reiserfs_mounted_fs_count++;
2835 if (reiserfs_mounted_fs_count <= 1)
2836 commit_wq = create_workqueue("reiserfs");
2838 INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb);
2839 return 0;
2840 free_and_return:
2841 free_journal_ram(p_s_sb);
2842 return 1;
2846 ** test for a polite end of the current transaction. Used by file_write, and should
2847 ** be used by delete to make sure they don't write more than can fit inside a single
2848 ** transaction
2850 int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
2851 int new_alloc)
2853 struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2854 time_t now = get_seconds();
2855 /* cannot restart while nested */
2856 BUG_ON(!th->t_trans_id);
2857 if (th->t_refcount > 1)
2858 return 0;
2859 if (journal->j_must_wait > 0 ||
2860 (journal->j_len_alloc + new_alloc) >= journal->j_max_batch ||
2861 atomic_read(&(journal->j_jlock)) ||
2862 (now - journal->j_trans_start_time) > journal->j_max_trans_age ||
2863 journal->j_cnode_free < (journal->j_trans_max * 3)) {
2864 return 1;
2866 /* protected by the BKL here */
2867 journal->j_len_alloc += new_alloc;
2868 th->t_blocks_allocated += new_alloc ;
2869 return 0;
2872 /* this must be called inside a transaction, and requires the
2873 ** kernel_lock to be held
2875 void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
2877 struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2878 BUG_ON(!th->t_trans_id);
2879 journal->j_must_wait = 1;
2880 set_bit(J_WRITERS_BLOCKED, &journal->j_state);
2881 return;
2884 /* this must be called without a transaction started, and does not
2885 ** require BKL
2887 void reiserfs_allow_writes(struct super_block *s)
2889 struct reiserfs_journal *journal = SB_JOURNAL(s);
2890 clear_bit(J_WRITERS_BLOCKED, &journal->j_state);
2891 wake_up(&journal->j_join_wait);
2894 /* this must be called without a transaction started, and does not
2895 ** require BKL
2897 void reiserfs_wait_on_write_block(struct super_block *s)
2899 struct reiserfs_journal *journal = SB_JOURNAL(s);
2900 wait_event(journal->j_join_wait,
2901 !test_bit(J_WRITERS_BLOCKED, &journal->j_state));
2904 static void queue_log_writer(struct super_block *s)
2906 wait_queue_t wait;
2907 struct reiserfs_journal *journal = SB_JOURNAL(s);
2908 set_bit(J_WRITERS_QUEUED, &journal->j_state);
2911 * we don't want to use wait_event here because
2912 * we only want to wait once.
2914 init_waitqueue_entry(&wait, current);
2915 add_wait_queue(&journal->j_join_wait, &wait);
2916 set_current_state(TASK_UNINTERRUPTIBLE);
2917 if (test_bit(J_WRITERS_QUEUED, &journal->j_state))
2918 schedule();
2919 current->state = TASK_RUNNING;
2920 remove_wait_queue(&journal->j_join_wait, &wait);
2923 static void wake_queued_writers(struct super_block *s)
2925 struct reiserfs_journal *journal = SB_JOURNAL(s);
2926 if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state))
2927 wake_up(&journal->j_join_wait);
2930 static void let_transaction_grow(struct super_block *sb, unsigned long trans_id)
2932 struct reiserfs_journal *journal = SB_JOURNAL(sb);
2933 unsigned long bcount = journal->j_bcount;
2934 while (1) {
2935 schedule_timeout_uninterruptible(1);
2936 journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
2937 while ((atomic_read(&journal->j_wcount) > 0 ||
2938 atomic_read(&journal->j_jlock)) &&
2939 journal->j_trans_id == trans_id) {
2940 queue_log_writer(sb);
2942 if (journal->j_trans_id != trans_id)
2943 break;
2944 if (bcount == journal->j_bcount)
2945 break;
2946 bcount = journal->j_bcount;
2950 /* join == true if you must join an existing transaction.
2951 ** join == false if you can deal with waiting for others to finish
2953 ** this will block until the transaction is joinable. send the number of blocks you
2954 ** expect to use in nblocks.
2956 static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
2957 struct super_block *p_s_sb, unsigned long nblocks,
2958 int join)
2960 time_t now = get_seconds();
2961 int old_trans_id;
2962 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
2963 struct reiserfs_transaction_handle myth;
2964 int sched_count = 0;
2965 int retval;
2967 reiserfs_check_lock_depth(p_s_sb, "journal_begin");
2968 BUG_ON(nblocks > journal->j_trans_max);
2970 PROC_INFO_INC(p_s_sb, journal.journal_being);
2971 /* set here for journal_join */
2972 th->t_refcount = 1;
2973 th->t_super = p_s_sb;
2975 relock:
2976 lock_journal(p_s_sb);
2977 if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted(journal)) {
2978 unlock_journal(p_s_sb);
2979 retval = journal->j_errno;
2980 goto out_fail;
2982 journal->j_bcount++;
2984 if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
2985 unlock_journal(p_s_sb);
2986 reiserfs_wait_on_write_block(p_s_sb);
2987 PROC_INFO_INC(p_s_sb, journal.journal_relock_writers);
2988 goto relock;
2990 now = get_seconds();
2992 /* if there is no room in the journal OR
2993 ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
2994 ** we don't sleep if there aren't other writers
2997 if ((!join && journal->j_must_wait > 0) ||
2998 (!join
2999 && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch)
3000 || (!join && atomic_read(&journal->j_wcount) > 0
3001 && journal->j_trans_start_time > 0
3002 && (now - journal->j_trans_start_time) >
3003 journal->j_max_trans_age) || (!join
3004 && atomic_read(&journal->j_jlock))
3005 || (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) {
3007 old_trans_id = journal->j_trans_id;
3008 unlock_journal(p_s_sb); /* allow others to finish this transaction */
3010 if (!join && (journal->j_len_alloc + nblocks + 2) >=
3011 journal->j_max_batch &&
3012 ((journal->j_len + nblocks + 2) * 100) <
3013 (journal->j_len_alloc * 75)) {
3014 if (atomic_read(&journal->j_wcount) > 10) {
3015 sched_count++;
3016 queue_log_writer(p_s_sb);
3017 goto relock;
3020 /* don't mess with joining the transaction if all we have to do is
3021 * wait for someone else to do a commit
3023 if (atomic_read(&journal->j_jlock)) {
3024 while (journal->j_trans_id == old_trans_id &&
3025 atomic_read(&journal->j_jlock)) {
3026 queue_log_writer(p_s_sb);
3028 goto relock;
3030 retval = journal_join(&myth, p_s_sb, 1);
3031 if (retval)
3032 goto out_fail;
3034 /* someone might have ended the transaction while we joined */
3035 if (old_trans_id != journal->j_trans_id) {
3036 retval = do_journal_end(&myth, p_s_sb, 1, 0);
3037 } else {
3038 retval = do_journal_end(&myth, p_s_sb, 1, COMMIT_NOW);
3041 if (retval)
3042 goto out_fail;
3044 PROC_INFO_INC(p_s_sb, journal.journal_relock_wcount);
3045 goto relock;
3047 /* we are the first writer, set trans_id */
3048 if (journal->j_trans_start_time == 0) {
3049 journal->j_trans_start_time = get_seconds();
3051 atomic_inc(&(journal->j_wcount));
3052 journal->j_len_alloc += nblocks;
3053 th->t_blocks_logged = 0;
3054 th->t_blocks_allocated = nblocks;
3055 th->t_trans_id = journal->j_trans_id;
3056 unlock_journal(p_s_sb);
3057 INIT_LIST_HEAD(&th->t_list);
3058 get_fs_excl();
3059 return 0;
3061 out_fail:
3062 memset(th, 0, sizeof(*th));
3063 /* Re-set th->t_super, so we can properly keep track of how many
3064 * persistent transactions there are. We need to do this so if this
3065 * call is part of a failed restart_transaction, we can free it later */
3066 th->t_super = p_s_sb;
3067 return retval;
3070 struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct
3071 super_block
3073 int nblocks)
3075 int ret;
3076 struct reiserfs_transaction_handle *th;
3078 /* if we're nesting into an existing transaction. It will be
3079 ** persistent on its own
3081 if (reiserfs_transaction_running(s)) {
3082 th = current->journal_info;
3083 th->t_refcount++;
3084 BUG_ON(th->t_refcount < 2);
3086 return th;
3088 th = kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS);
3089 if (!th)
3090 return NULL;
3091 ret = journal_begin(th, s, nblocks);
3092 if (ret) {
3093 kfree(th);
3094 return NULL;
3097 SB_JOURNAL(s)->j_persistent_trans++;
3098 return th;
3101 int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th)
3103 struct super_block *s = th->t_super;
3104 int ret = 0;
3105 if (th->t_trans_id)
3106 ret = journal_end(th, th->t_super, th->t_blocks_allocated);
3107 else
3108 ret = -EIO;
3109 if (th->t_refcount == 0) {
3110 SB_JOURNAL(s)->j_persistent_trans--;
3111 kfree(th);
3113 return ret;
3116 static int journal_join(struct reiserfs_transaction_handle *th,
3117 struct super_block *p_s_sb, unsigned long nblocks)
3119 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3121 /* this keeps do_journal_end from NULLing out the current->journal_info
3122 ** pointer
3124 th->t_handle_save = cur_th;
3125 BUG_ON(cur_th && cur_th->t_refcount > 1);
3126 return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_JOIN);
3129 int journal_join_abort(struct reiserfs_transaction_handle *th,
3130 struct super_block *p_s_sb, unsigned long nblocks)
3132 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3134 /* this keeps do_journal_end from NULLing out the current->journal_info
3135 ** pointer
3137 th->t_handle_save = cur_th;
3138 BUG_ON(cur_th && cur_th->t_refcount > 1);
3139 return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_ABORT);
3142 int journal_begin(struct reiserfs_transaction_handle *th,
3143 struct super_block *p_s_sb, unsigned long nblocks)
3145 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3146 int ret;
3148 th->t_handle_save = NULL;
3149 if (cur_th) {
3150 /* we are nesting into the current transaction */
3151 if (cur_th->t_super == p_s_sb) {
3152 BUG_ON(!cur_th->t_refcount);
3153 cur_th->t_refcount++;
3154 memcpy(th, cur_th, sizeof(*th));
3155 if (th->t_refcount <= 1)
3156 reiserfs_warning(p_s_sb,
3157 "BAD: refcount <= 1, but journal_info != 0");
3158 return 0;
3159 } else {
3160 /* we've ended up with a handle from a different filesystem.
3161 ** save it and restore on journal_end. This should never
3162 ** really happen...
3164 reiserfs_warning(p_s_sb,
3165 "clm-2100: nesting info a different FS");
3166 th->t_handle_save = current->journal_info;
3167 current->journal_info = th;
3169 } else {
3170 current->journal_info = th;
3172 ret = do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_REG);
3173 BUG_ON(current->journal_info != th);
3175 /* I guess this boils down to being the reciprocal of clm-2100 above.
3176 * If do_journal_begin_r fails, we need to put it back, since journal_end
3177 * won't be called to do it. */
3178 if (ret)
3179 current->journal_info = th->t_handle_save;
3180 else
3181 BUG_ON(!th->t_refcount);
3183 return ret;
3187 ** puts bh into the current transaction. If it was already there, reorders removes the
3188 ** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
3190 ** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
3191 ** transaction is committed.
3193 ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
3195 int journal_mark_dirty(struct reiserfs_transaction_handle *th,
3196 struct super_block *p_s_sb, struct buffer_head *bh)
3198 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3199 struct reiserfs_journal_cnode *cn = NULL;
3200 int count_already_incd = 0;
3201 int prepared = 0;
3202 BUG_ON(!th->t_trans_id);
3204 PROC_INFO_INC(p_s_sb, journal.mark_dirty);
3205 if (th->t_trans_id != journal->j_trans_id) {
3206 reiserfs_panic(th->t_super,
3207 "journal-1577: handle trans id %ld != current trans id %ld\n",
3208 th->t_trans_id, journal->j_trans_id);
3211 p_s_sb->s_dirt = 1;
3213 prepared = test_clear_buffer_journal_prepared(bh);
3214 clear_buffer_journal_restore_dirty(bh);
3215 /* already in this transaction, we are done */
3216 if (buffer_journaled(bh)) {
3217 PROC_INFO_INC(p_s_sb, journal.mark_dirty_already);
3218 return 0;
3221 /* this must be turned into a panic instead of a warning. We can't allow
3222 ** a dirty or journal_dirty or locked buffer to be logged, as some changes
3223 ** could get to disk too early. NOT GOOD.
3225 if (!prepared || buffer_dirty(bh)) {
3226 reiserfs_warning(p_s_sb, "journal-1777: buffer %llu bad state "
3227 "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
3228 (unsigned long long)bh->b_blocknr,
3229 prepared ? ' ' : '!',
3230 buffer_locked(bh) ? ' ' : '!',
3231 buffer_dirty(bh) ? ' ' : '!',
3232 buffer_journal_dirty(bh) ? ' ' : '!');
3235 if (atomic_read(&(journal->j_wcount)) <= 0) {
3236 reiserfs_warning(p_s_sb,
3237 "journal-1409: journal_mark_dirty returning because j_wcount was %d",
3238 atomic_read(&(journal->j_wcount)));
3239 return 1;
3241 /* this error means I've screwed up, and we've overflowed the transaction.
3242 ** Nothing can be done here, except make the FS readonly or panic.
3244 if (journal->j_len >= journal->j_trans_max) {
3245 reiserfs_panic(th->t_super,
3246 "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n",
3247 journal->j_len);
3250 if (buffer_journal_dirty(bh)) {
3251 count_already_incd = 1;
3252 PROC_INFO_INC(p_s_sb, journal.mark_dirty_notjournal);
3253 clear_buffer_journal_dirty(bh);
3256 if (journal->j_len > journal->j_len_alloc) {
3257 journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT;
3260 set_buffer_journaled(bh);
3262 /* now put this guy on the end */
3263 if (!cn) {
3264 cn = get_cnode(p_s_sb);
3265 if (!cn) {
3266 reiserfs_panic(p_s_sb, "get_cnode failed!\n");
3269 if (th->t_blocks_logged == th->t_blocks_allocated) {
3270 th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT;
3271 journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT;
3273 th->t_blocks_logged++;
3274 journal->j_len++;
3276 cn->bh = bh;
3277 cn->blocknr = bh->b_blocknr;
3278 cn->sb = p_s_sb;
3279 cn->jlist = NULL;
3280 insert_journal_hash(journal->j_hash_table, cn);
3281 if (!count_already_incd) {
3282 get_bh(bh);
3285 cn->next = NULL;
3286 cn->prev = journal->j_last;
3287 cn->bh = bh;
3288 if (journal->j_last) {
3289 journal->j_last->next = cn;
3290 journal->j_last = cn;
3291 } else {
3292 journal->j_first = cn;
3293 journal->j_last = cn;
3295 return 0;
3298 int journal_end(struct reiserfs_transaction_handle *th,
3299 struct super_block *p_s_sb, unsigned long nblocks)
3301 if (!current->journal_info && th->t_refcount > 1)
3302 reiserfs_warning(p_s_sb, "REISER-NESTING: th NULL, refcount %d",
3303 th->t_refcount);
3305 if (!th->t_trans_id) {
3306 WARN_ON(1);
3307 return -EIO;
3310 th->t_refcount--;
3311 if (th->t_refcount > 0) {
3312 struct reiserfs_transaction_handle *cur_th =
3313 current->journal_info;
3315 /* we aren't allowed to close a nested transaction on a different
3316 ** filesystem from the one in the task struct
3318 BUG_ON(cur_th->t_super != th->t_super);
3320 if (th != cur_th) {
3321 memcpy(current->journal_info, th, sizeof(*th));
3322 th->t_trans_id = 0;
3324 return 0;
3325 } else {
3326 return do_journal_end(th, p_s_sb, nblocks, 0);
3330 /* removes from the current transaction, relsing and descrementing any counters.
3331 ** also files the removed buffer directly onto the clean list
3333 ** called by journal_mark_freed when a block has been deleted
3335 ** returns 1 if it cleaned and relsed the buffer. 0 otherwise
3337 static int remove_from_transaction(struct super_block *p_s_sb,
3338 b_blocknr_t blocknr, int already_cleaned)
3340 struct buffer_head *bh;
3341 struct reiserfs_journal_cnode *cn;
3342 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3343 int ret = 0;
3345 cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
3346 if (!cn || !cn->bh) {
3347 return ret;
3349 bh = cn->bh;
3350 if (cn->prev) {
3351 cn->prev->next = cn->next;
3353 if (cn->next) {
3354 cn->next->prev = cn->prev;
3356 if (cn == journal->j_first) {
3357 journal->j_first = cn->next;
3359 if (cn == journal->j_last) {
3360 journal->j_last = cn->prev;
3362 if (bh)
3363 remove_journal_hash(p_s_sb, journal->j_hash_table, NULL,
3364 bh->b_blocknr, 0);
3365 clear_buffer_journaled(bh); /* don't log this one */
3367 if (!already_cleaned) {
3368 clear_buffer_journal_dirty(bh);
3369 clear_buffer_dirty(bh);
3370 clear_buffer_journal_test(bh);
3371 put_bh(bh);
3372 if (atomic_read(&(bh->b_count)) < 0) {
3373 reiserfs_warning(p_s_sb,
3374 "journal-1752: remove from trans, b_count < 0");
3376 ret = 1;
3378 journal->j_len--;
3379 journal->j_len_alloc--;
3380 free_cnode(p_s_sb, cn);
3381 return ret;
3385 ** for any cnode in a journal list, it can only be dirtied of all the
3386 ** transactions that include it are commited to disk.
3387 ** this checks through each transaction, and returns 1 if you are allowed to dirty,
3388 ** and 0 if you aren't
3390 ** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
3391 ** blocks for a given transaction on disk
3394 static int can_dirty(struct reiserfs_journal_cnode *cn)
3396 struct super_block *sb = cn->sb;
3397 b_blocknr_t blocknr = cn->blocknr;
3398 struct reiserfs_journal_cnode *cur = cn->hprev;
3399 int can_dirty = 1;
3401 /* first test hprev. These are all newer than cn, so any node here
3402 ** with the same block number and dev means this node can't be sent
3403 ** to disk right now.
3405 while (cur && can_dirty) {
3406 if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb &&
3407 cur->blocknr == blocknr) {
3408 can_dirty = 0;
3410 cur = cur->hprev;
3412 /* then test hnext. These are all older than cn. As long as they
3413 ** are committed to the log, it is safe to write cn to disk
3415 cur = cn->hnext;
3416 while (cur && can_dirty) {
3417 if (cur->jlist && cur->jlist->j_len > 0 &&
3418 atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh &&
3419 cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) {
3420 can_dirty = 0;
3422 cur = cur->hnext;
3424 return can_dirty;
3427 /* syncs the commit blocks, but does not force the real buffers to disk
3428 ** will wait until the current transaction is done/commited before returning
3430 int journal_end_sync(struct reiserfs_transaction_handle *th,
3431 struct super_block *p_s_sb, unsigned long nblocks)
3433 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3435 BUG_ON(!th->t_trans_id);
3436 /* you can sync while nested, very, very bad */
3437 BUG_ON(th->t_refcount > 1);
3438 if (journal->j_len == 0) {
3439 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb),
3441 journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb));
3443 return do_journal_end(th, p_s_sb, nblocks, COMMIT_NOW | WAIT);
3447 ** writeback the pending async commits to disk
3449 static void flush_async_commits(void *p)
3451 struct super_block *p_s_sb = p;
3452 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3453 struct reiserfs_journal_list *jl;
3454 struct list_head *entry;
3456 lock_kernel();
3457 if (!list_empty(&journal->j_journal_list)) {
3458 /* last entry is the youngest, commit it and you get everything */
3459 entry = journal->j_journal_list.prev;
3460 jl = JOURNAL_LIST_ENTRY(entry);
3461 flush_commit_list(p_s_sb, jl, 1);
3463 unlock_kernel();
3467 ** flushes any old transactions to disk
3468 ** ends the current transaction if it is too old
3470 int reiserfs_flush_old_commits(struct super_block *p_s_sb)
3472 time_t now;
3473 struct reiserfs_transaction_handle th;
3474 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3476 now = get_seconds();
3477 /* safety check so we don't flush while we are replaying the log during
3478 * mount
3480 if (list_empty(&journal->j_journal_list)) {
3481 return 0;
3484 /* check the current transaction. If there are no writers, and it is
3485 * too old, finish it, and force the commit blocks to disk
3487 if (atomic_read(&journal->j_wcount) <= 0 &&
3488 journal->j_trans_start_time > 0 &&
3489 journal->j_len > 0 &&
3490 (now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3491 if (!journal_join(&th, p_s_sb, 1)) {
3492 reiserfs_prepare_for_journal(p_s_sb,
3493 SB_BUFFER_WITH_SB(p_s_sb),
3495 journal_mark_dirty(&th, p_s_sb,
3496 SB_BUFFER_WITH_SB(p_s_sb));
3498 /* we're only being called from kreiserfsd, it makes no sense to do
3499 ** an async commit so that kreiserfsd can do it later
3501 do_journal_end(&th, p_s_sb, 1, COMMIT_NOW | WAIT);
3504 return p_s_sb->s_dirt;
3508 ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
3510 ** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
3511 ** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
3512 ** flushes the commit list and returns 0.
3514 ** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
3516 ** Note, we can't allow the journal_end to proceed while there are still writers in the log.
3518 static int check_journal_end(struct reiserfs_transaction_handle *th,
3519 struct super_block *p_s_sb, unsigned long nblocks,
3520 int flags)
3523 time_t now;
3524 int flush = flags & FLUSH_ALL;
3525 int commit_now = flags & COMMIT_NOW;
3526 int wait_on_commit = flags & WAIT;
3527 struct reiserfs_journal_list *jl;
3528 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3530 BUG_ON(!th->t_trans_id);
3532 if (th->t_trans_id != journal->j_trans_id) {
3533 reiserfs_panic(th->t_super,
3534 "journal-1577: handle trans id %ld != current trans id %ld\n",
3535 th->t_trans_id, journal->j_trans_id);
3538 journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged);
3539 if (atomic_read(&(journal->j_wcount)) > 0) { /* <= 0 is allowed. unmounting might not call begin */
3540 atomic_dec(&(journal->j_wcount));
3543 /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
3544 ** will be dealt with by next transaction that actually writes something, but should be taken
3545 ** care of in this trans
3547 BUG_ON(journal->j_len == 0);
3549 /* if wcount > 0, and we are called to with flush or commit_now,
3550 ** we wait on j_join_wait. We will wake up when the last writer has
3551 ** finished the transaction, and started it on its way to the disk.
3552 ** Then, we flush the commit or journal list, and just return 0
3553 ** because the rest of journal end was already done for this transaction.
3555 if (atomic_read(&(journal->j_wcount)) > 0) {
3556 if (flush || commit_now) {
3557 unsigned trans_id;
3559 jl = journal->j_current_jl;
3560 trans_id = jl->j_trans_id;
3561 if (wait_on_commit)
3562 jl->j_state |= LIST_COMMIT_PENDING;
3563 atomic_set(&(journal->j_jlock), 1);
3564 if (flush) {
3565 journal->j_next_full_flush = 1;
3567 unlock_journal(p_s_sb);
3569 /* sleep while the current transaction is still j_jlocked */
3570 while (journal->j_trans_id == trans_id) {
3571 if (atomic_read(&journal->j_jlock)) {
3572 queue_log_writer(p_s_sb);
3573 } else {
3574 lock_journal(p_s_sb);
3575 if (journal->j_trans_id == trans_id) {
3576 atomic_set(&(journal->j_jlock),
3579 unlock_journal(p_s_sb);
3582 BUG_ON(journal->j_trans_id == trans_id);
3584 if (commit_now
3585 && journal_list_still_alive(p_s_sb, trans_id)
3586 && wait_on_commit) {
3587 flush_commit_list(p_s_sb, jl, 1);
3589 return 0;
3591 unlock_journal(p_s_sb);
3592 return 0;
3595 /* deal with old transactions where we are the last writers */
3596 now = get_seconds();
3597 if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3598 commit_now = 1;
3599 journal->j_next_async_flush = 1;
3601 /* don't batch when someone is waiting on j_join_wait */
3602 /* don't batch when syncing the commit or flushing the whole trans */
3603 if (!(journal->j_must_wait > 0) && !(atomic_read(&(journal->j_jlock)))
3604 && !flush && !commit_now && (journal->j_len < journal->j_max_batch)
3605 && journal->j_len_alloc < journal->j_max_batch
3606 && journal->j_cnode_free > (journal->j_trans_max * 3)) {
3607 journal->j_bcount++;
3608 unlock_journal(p_s_sb);
3609 return 0;
3612 if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
3613 reiserfs_panic(p_s_sb,
3614 "journal-003: journal_end: j_start (%ld) is too high\n",
3615 journal->j_start);
3617 return 1;
3621 ** Does all the work that makes deleting blocks safe.
3622 ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
3624 ** otherwise:
3625 ** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
3626 ** before this transaction has finished.
3628 ** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with
3629 ** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash,
3630 ** the block can't be reallocated yet.
3632 ** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
3634 int journal_mark_freed(struct reiserfs_transaction_handle *th,
3635 struct super_block *p_s_sb, b_blocknr_t blocknr)
3637 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3638 struct reiserfs_journal_cnode *cn = NULL;
3639 struct buffer_head *bh = NULL;
3640 struct reiserfs_list_bitmap *jb = NULL;
3641 int cleaned = 0;
3642 BUG_ON(!th->t_trans_id);
3644 cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
3645 if (cn && cn->bh) {
3646 bh = cn->bh;
3647 get_bh(bh);
3649 /* if it is journal new, we just remove it from this transaction */
3650 if (bh && buffer_journal_new(bh)) {
3651 clear_buffer_journal_new(bh);
3652 clear_prepared_bits(bh);
3653 reiserfs_clean_and_file_buffer(bh);
3654 cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned);
3655 } else {
3656 /* set the bit for this block in the journal bitmap for this transaction */
3657 jb = journal->j_current_jl->j_list_bitmap;
3658 if (!jb) {
3659 reiserfs_panic(p_s_sb,
3660 "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n");
3662 set_bit_in_list_bitmap(p_s_sb, blocknr, jb);
3664 /* Note, the entire while loop is not allowed to schedule. */
3666 if (bh) {
3667 clear_prepared_bits(bh);
3668 reiserfs_clean_and_file_buffer(bh);
3670 cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned);
3672 /* find all older transactions with this block, make sure they don't try to write it out */
3673 cn = get_journal_hash_dev(p_s_sb, journal->j_list_hash_table,
3674 blocknr);
3675 while (cn) {
3676 if (p_s_sb == cn->sb && blocknr == cn->blocknr) {
3677 set_bit(BLOCK_FREED, &cn->state);
3678 if (cn->bh) {
3679 if (!cleaned) {
3680 /* remove_from_transaction will brelse the buffer if it was
3681 ** in the current trans
3683 clear_buffer_journal_dirty(cn->
3684 bh);
3685 clear_buffer_dirty(cn->bh);
3686 clear_buffer_journal_test(cn->
3687 bh);
3688 cleaned = 1;
3689 put_bh(cn->bh);
3690 if (atomic_read
3691 (&(cn->bh->b_count)) < 0) {
3692 reiserfs_warning(p_s_sb,
3693 "journal-2138: cn->bh->b_count < 0");
3696 if (cn->jlist) { /* since we are clearing the bh, we MUST dec nonzerolen */
3697 atomic_dec(&
3698 (cn->jlist->
3699 j_nonzerolen));
3701 cn->bh = NULL;
3704 cn = cn->hnext;
3708 if (bh) {
3709 put_bh(bh); /* get_hash grabs the buffer */
3710 if (atomic_read(&(bh->b_count)) < 0) {
3711 reiserfs_warning(p_s_sb,
3712 "journal-2165: bh->b_count < 0");
3715 return 0;
3718 void reiserfs_update_inode_transaction(struct inode *inode)
3720 struct reiserfs_journal *journal = SB_JOURNAL(inode->i_sb);
3721 REISERFS_I(inode)->i_jl = journal->j_current_jl;
3722 REISERFS_I(inode)->i_trans_id = journal->j_trans_id;
3726 * returns -1 on error, 0 if no commits/barriers were done and 1
3727 * if a transaction was actually committed and the barrier was done
3729 static int __commit_trans_jl(struct inode *inode, unsigned long id,
3730 struct reiserfs_journal_list *jl)
3732 struct reiserfs_transaction_handle th;
3733 struct super_block *sb = inode->i_sb;
3734 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3735 int ret = 0;
3737 /* is it from the current transaction, or from an unknown transaction? */
3738 if (id == journal->j_trans_id) {
3739 jl = journal->j_current_jl;
3740 /* try to let other writers come in and grow this transaction */
3741 let_transaction_grow(sb, id);
3742 if (journal->j_trans_id != id) {
3743 goto flush_commit_only;
3746 ret = journal_begin(&th, sb, 1);
3747 if (ret)
3748 return ret;
3750 /* someone might have ended this transaction while we joined */
3751 if (journal->j_trans_id != id) {
3752 reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
3754 journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb));
3755 ret = journal_end(&th, sb, 1);
3756 goto flush_commit_only;
3759 ret = journal_end_sync(&th, sb, 1);
3760 if (!ret)
3761 ret = 1;
3763 } else {
3764 /* this gets tricky, we have to make sure the journal list in
3765 * the inode still exists. We know the list is still around
3766 * if we've got a larger transaction id than the oldest list
3768 flush_commit_only:
3769 if (journal_list_still_alive(inode->i_sb, id)) {
3771 * we only set ret to 1 when we know for sure
3772 * the barrier hasn't been started yet on the commit
3773 * block.
3775 if (atomic_read(&jl->j_commit_left) > 1)
3776 ret = 1;
3777 flush_commit_list(sb, jl, 1);
3778 if (journal->j_errno)
3779 ret = journal->j_errno;
3782 /* otherwise the list is gone, and long since committed */
3783 return ret;
3786 int reiserfs_commit_for_inode(struct inode *inode)
3788 unsigned long id = REISERFS_I(inode)->i_trans_id;
3789 struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl;
3791 /* for the whole inode, assume unset id means it was
3792 * changed in the current transaction. More conservative
3794 if (!id || !jl) {
3795 reiserfs_update_inode_transaction(inode);
3796 id = REISERFS_I(inode)->i_trans_id;
3797 /* jl will be updated in __commit_trans_jl */
3800 return __commit_trans_jl(inode, id, jl);
3803 void reiserfs_restore_prepared_buffer(struct super_block *p_s_sb,
3804 struct buffer_head *bh)
3806 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3807 PROC_INFO_INC(p_s_sb, journal.restore_prepared);
3808 if (!bh) {
3809 return;
3811 if (test_clear_buffer_journal_restore_dirty(bh) &&
3812 buffer_journal_dirty(bh)) {
3813 struct reiserfs_journal_cnode *cn;
3814 cn = get_journal_hash_dev(p_s_sb,
3815 journal->j_list_hash_table,
3816 bh->b_blocknr);
3817 if (cn && can_dirty(cn)) {
3818 set_buffer_journal_test(bh);
3819 mark_buffer_dirty(bh);
3822 clear_buffer_journal_prepared(bh);
3825 extern struct tree_balance *cur_tb;
3827 ** before we can change a metadata block, we have to make sure it won't
3828 ** be written to disk while we are altering it. So, we must:
3829 ** clean it
3830 ** wait on it.
3833 int reiserfs_prepare_for_journal(struct super_block *p_s_sb,
3834 struct buffer_head *bh, int wait)
3836 PROC_INFO_INC(p_s_sb, journal.prepare);
3838 if (test_set_buffer_locked(bh)) {
3839 if (!wait)
3840 return 0;
3841 lock_buffer(bh);
3843 set_buffer_journal_prepared(bh);
3844 if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh)) {
3845 clear_buffer_journal_test(bh);
3846 set_buffer_journal_restore_dirty(bh);
3848 unlock_buffer(bh);
3849 return 1;
3852 static void flush_old_journal_lists(struct super_block *s)
3854 struct reiserfs_journal *journal = SB_JOURNAL(s);
3855 struct reiserfs_journal_list *jl;
3856 struct list_head *entry;
3857 time_t now = get_seconds();
3859 while (!list_empty(&journal->j_journal_list)) {
3860 entry = journal->j_journal_list.next;
3861 jl = JOURNAL_LIST_ENTRY(entry);
3862 /* this check should always be run, to send old lists to disk */
3863 if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4)) &&
3864 atomic_read(&jl->j_commit_left) == 0 &&
3865 test_transaction(s, jl)) {
3866 flush_used_journal_lists(s, jl);
3867 } else {
3868 break;
3874 ** long and ugly. If flush, will not return until all commit
3875 ** blocks and all real buffers in the trans are on disk.
3876 ** If no_async, won't return until all commit blocks are on disk.
3878 ** keep reading, there are comments as you go along
3880 ** If the journal is aborted, we just clean up. Things like flushing
3881 ** journal lists, etc just won't happen.
3883 static int do_journal_end(struct reiserfs_transaction_handle *th,
3884 struct super_block *p_s_sb, unsigned long nblocks,
3885 int flags)
3887 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3888 struct reiserfs_journal_cnode *cn, *next, *jl_cn;
3889 struct reiserfs_journal_cnode *last_cn = NULL;
3890 struct reiserfs_journal_desc *desc;
3891 struct reiserfs_journal_commit *commit;
3892 struct buffer_head *c_bh; /* commit bh */
3893 struct buffer_head *d_bh; /* desc bh */
3894 int cur_write_start = 0; /* start index of current log write */
3895 int old_start;
3896 int i;
3897 int flush;
3898 int wait_on_commit;
3899 struct reiserfs_journal_list *jl, *temp_jl;
3900 struct list_head *entry, *safe;
3901 unsigned long jindex;
3902 unsigned long commit_trans_id;
3903 int trans_half;
3905 BUG_ON(th->t_refcount > 1);
3906 BUG_ON(!th->t_trans_id);
3908 /* protect flush_older_commits from doing mistakes if the
3909 transaction ID counter gets overflowed. */
3910 if (th->t_trans_id == ~0UL)
3911 flags |= FLUSH_ALL | COMMIT_NOW | WAIT;
3912 flush = flags & FLUSH_ALL;
3913 wait_on_commit = flags & WAIT;
3915 put_fs_excl();
3916 current->journal_info = th->t_handle_save;
3917 reiserfs_check_lock_depth(p_s_sb, "journal end");
3918 if (journal->j_len == 0) {
3919 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb),
3921 journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb));
3924 lock_journal(p_s_sb);
3925 if (journal->j_next_full_flush) {
3926 flags |= FLUSH_ALL;
3927 flush = 1;
3929 if (journal->j_next_async_flush) {
3930 flags |= COMMIT_NOW | WAIT;
3931 wait_on_commit = 1;
3934 /* check_journal_end locks the journal, and unlocks if it does not return 1
3935 ** it tells us if we should continue with the journal_end, or just return
3937 if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
3938 p_s_sb->s_dirt = 1;
3939 wake_queued_writers(p_s_sb);
3940 reiserfs_async_progress_wait(p_s_sb);
3941 goto out;
3944 /* check_journal_end might set these, check again */
3945 if (journal->j_next_full_flush) {
3946 flush = 1;
3950 ** j must wait means we have to flush the log blocks, and the real blocks for
3951 ** this transaction
3953 if (journal->j_must_wait > 0) {
3954 flush = 1;
3956 #ifdef REISERFS_PREALLOCATE
3957 /* quota ops might need to nest, setup the journal_info pointer for them
3958 * and raise the refcount so that it is > 0. */
3959 current->journal_info = th;
3960 th->t_refcount++;
3961 reiserfs_discard_all_prealloc(th); /* it should not involve new blocks into
3962 * the transaction */
3963 th->t_refcount--;
3964 current->journal_info = th->t_handle_save;
3965 #endif
3967 /* setup description block */
3968 d_bh =
3969 journal_getblk(p_s_sb,
3970 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
3971 journal->j_start);
3972 set_buffer_uptodate(d_bh);
3973 desc = (struct reiserfs_journal_desc *)(d_bh)->b_data;
3974 memset(d_bh->b_data, 0, d_bh->b_size);
3975 memcpy(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8);
3976 set_desc_trans_id(desc, journal->j_trans_id);
3978 /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
3979 c_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
3980 ((journal->j_start + journal->j_len +
3981 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
3982 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
3983 memset(c_bh->b_data, 0, c_bh->b_size);
3984 set_commit_trans_id(commit, journal->j_trans_id);
3985 set_buffer_uptodate(c_bh);
3987 /* init this journal list */
3988 jl = journal->j_current_jl;
3990 /* we lock the commit before doing anything because
3991 * we want to make sure nobody tries to run flush_commit_list until
3992 * the new transaction is fully setup, and we've already flushed the
3993 * ordered bh list
3995 down(&jl->j_commit_lock);
3997 /* save the transaction id in case we need to commit it later */
3998 commit_trans_id = jl->j_trans_id;
4000 atomic_set(&jl->j_older_commits_done, 0);
4001 jl->j_trans_id = journal->j_trans_id;
4002 jl->j_timestamp = journal->j_trans_start_time;
4003 jl->j_commit_bh = c_bh;
4004 jl->j_start = journal->j_start;
4005 jl->j_len = journal->j_len;
4006 atomic_set(&jl->j_nonzerolen, journal->j_len);
4007 atomic_set(&jl->j_commit_left, journal->j_len + 2);
4008 jl->j_realblock = NULL;
4010 /* The ENTIRE FOR LOOP MUST not cause schedule to occur.
4011 ** for each real block, add it to the journal list hash,
4012 ** copy into real block index array in the commit or desc block
4014 trans_half = journal_trans_half(p_s_sb->s_blocksize);
4015 for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) {
4016 if (buffer_journaled(cn->bh)) {
4017 jl_cn = get_cnode(p_s_sb);
4018 if (!jl_cn) {
4019 reiserfs_panic(p_s_sb,
4020 "journal-1676, get_cnode returned NULL\n");
4022 if (i == 0) {
4023 jl->j_realblock = jl_cn;
4025 jl_cn->prev = last_cn;
4026 jl_cn->next = NULL;
4027 if (last_cn) {
4028 last_cn->next = jl_cn;
4030 last_cn = jl_cn;
4031 /* make sure the block we are trying to log is not a block
4032 of journal or reserved area */
4034 if (is_block_in_log_or_reserved_area
4035 (p_s_sb, cn->bh->b_blocknr)) {
4036 reiserfs_panic(p_s_sb,
4037 "journal-2332: Trying to log block %lu, which is a log block\n",
4038 cn->bh->b_blocknr);
4040 jl_cn->blocknr = cn->bh->b_blocknr;
4041 jl_cn->state = 0;
4042 jl_cn->sb = p_s_sb;
4043 jl_cn->bh = cn->bh;
4044 jl_cn->jlist = jl;
4045 insert_journal_hash(journal->j_list_hash_table, jl_cn);
4046 if (i < trans_half) {
4047 desc->j_realblock[i] =
4048 cpu_to_le32(cn->bh->b_blocknr);
4049 } else {
4050 commit->j_realblock[i - trans_half] =
4051 cpu_to_le32(cn->bh->b_blocknr);
4053 } else {
4054 i--;
4057 set_desc_trans_len(desc, journal->j_len);
4058 set_desc_mount_id(desc, journal->j_mount_id);
4059 set_desc_trans_id(desc, journal->j_trans_id);
4060 set_commit_trans_len(commit, journal->j_len);
4062 /* special check in case all buffers in the journal were marked for not logging */
4063 BUG_ON(journal->j_len == 0);
4065 /* we're about to dirty all the log blocks, mark the description block
4066 * dirty now too. Don't mark the commit block dirty until all the
4067 * others are on disk
4069 mark_buffer_dirty(d_bh);
4071 /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
4072 cur_write_start = journal->j_start;
4073 cn = journal->j_first;
4074 jindex = 1; /* start at one so we don't get the desc again */
4075 while (cn) {
4076 clear_buffer_journal_new(cn->bh);
4077 /* copy all the real blocks into log area. dirty log blocks */
4078 if (buffer_journaled(cn->bh)) {
4079 struct buffer_head *tmp_bh;
4080 char *addr;
4081 struct page *page;
4082 tmp_bh =
4083 journal_getblk(p_s_sb,
4084 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
4085 ((cur_write_start +
4086 jindex) %
4087 SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
4088 set_buffer_uptodate(tmp_bh);
4089 page = cn->bh->b_page;
4090 addr = kmap(page);
4091 memcpy(tmp_bh->b_data,
4092 addr + offset_in_page(cn->bh->b_data),
4093 cn->bh->b_size);
4094 kunmap(page);
4095 mark_buffer_dirty(tmp_bh);
4096 jindex++;
4097 set_buffer_journal_dirty(cn->bh);
4098 clear_buffer_journaled(cn->bh);
4099 } else {
4100 /* JDirty cleared sometime during transaction. don't log this one */
4101 reiserfs_warning(p_s_sb,
4102 "journal-2048: do_journal_end: BAD, buffer in journal hash, but not JDirty!");
4103 brelse(cn->bh);
4105 next = cn->next;
4106 free_cnode(p_s_sb, cn);
4107 cn = next;
4108 cond_resched();
4111 /* we are done with both the c_bh and d_bh, but
4112 ** c_bh must be written after all other commit blocks,
4113 ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
4116 journal->j_current_jl = alloc_journal_list(p_s_sb);
4118 /* now it is safe to insert this transaction on the main list */
4119 list_add_tail(&jl->j_list, &journal->j_journal_list);
4120 list_add_tail(&jl->j_working_list, &journal->j_working_list);
4121 journal->j_num_work_lists++;
4123 /* reset journal values for the next transaction */
4124 old_start = journal->j_start;
4125 journal->j_start =
4126 (journal->j_start + journal->j_len +
4127 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb);
4128 atomic_set(&(journal->j_wcount), 0);
4129 journal->j_bcount = 0;
4130 journal->j_last = NULL;
4131 journal->j_first = NULL;
4132 journal->j_len = 0;
4133 journal->j_trans_start_time = 0;
4134 /* check for trans_id overflow */
4135 if (++journal->j_trans_id == 0)
4136 journal->j_trans_id = 10;
4137 journal->j_current_jl->j_trans_id = journal->j_trans_id;
4138 journal->j_must_wait = 0;
4139 journal->j_len_alloc = 0;
4140 journal->j_next_full_flush = 0;
4141 journal->j_next_async_flush = 0;
4142 init_journal_hash(p_s_sb);
4144 // make sure reiserfs_add_jh sees the new current_jl before we
4145 // write out the tails
4146 smp_mb();
4148 /* tail conversion targets have to hit the disk before we end the
4149 * transaction. Otherwise a later transaction might repack the tail
4150 * before this transaction commits, leaving the data block unflushed and
4151 * clean, if we crash before the later transaction commits, the data block
4152 * is lost.
4154 if (!list_empty(&jl->j_tail_bh_list)) {
4155 unlock_kernel();
4156 write_ordered_buffers(&journal->j_dirty_buffers_lock,
4157 journal, jl, &jl->j_tail_bh_list);
4158 lock_kernel();
4160 BUG_ON(!list_empty(&jl->j_tail_bh_list));
4161 up(&jl->j_commit_lock);
4163 /* honor the flush wishes from the caller, simple commits can
4164 ** be done outside the journal lock, they are done below
4166 ** if we don't flush the commit list right now, we put it into
4167 ** the work queue so the people waiting on the async progress work
4168 ** queue don't wait for this proc to flush journal lists and such.
4170 if (flush) {
4171 flush_commit_list(p_s_sb, jl, 1);
4172 flush_journal_list(p_s_sb, jl, 1);
4173 } else if (!(jl->j_state & LIST_COMMIT_PENDING))
4174 queue_delayed_work(commit_wq, &journal->j_work, HZ / 10);
4176 /* if the next transaction has any chance of wrapping, flush
4177 ** transactions that might get overwritten. If any journal lists are very
4178 ** old flush them as well.
4180 first_jl:
4181 list_for_each_safe(entry, safe, &journal->j_journal_list) {
4182 temp_jl = JOURNAL_LIST_ENTRY(entry);
4183 if (journal->j_start <= temp_jl->j_start) {
4184 if ((journal->j_start + journal->j_trans_max + 1) >=
4185 temp_jl->j_start) {
4186 flush_used_journal_lists(p_s_sb, temp_jl);
4187 goto first_jl;
4188 } else if ((journal->j_start +
4189 journal->j_trans_max + 1) <
4190 SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
4191 /* if we don't cross into the next transaction and we don't
4192 * wrap, there is no way we can overlap any later transactions
4193 * break now
4195 break;
4197 } else if ((journal->j_start +
4198 journal->j_trans_max + 1) >
4199 SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
4200 if (((journal->j_start + journal->j_trans_max + 1) %
4201 SB_ONDISK_JOURNAL_SIZE(p_s_sb)) >=
4202 temp_jl->j_start) {
4203 flush_used_journal_lists(p_s_sb, temp_jl);
4204 goto first_jl;
4205 } else {
4206 /* we don't overlap anything from out start to the end of the
4207 * log, and our wrapped portion doesn't overlap anything at
4208 * the start of the log. We can break
4210 break;
4214 flush_old_journal_lists(p_s_sb);
4216 journal->j_current_jl->j_list_bitmap =
4217 get_list_bitmap(p_s_sb, journal->j_current_jl);
4219 if (!(journal->j_current_jl->j_list_bitmap)) {
4220 reiserfs_panic(p_s_sb,
4221 "journal-1996: do_journal_end, could not get a list bitmap\n");
4224 atomic_set(&(journal->j_jlock), 0);
4225 unlock_journal(p_s_sb);
4226 /* wake up any body waiting to join. */
4227 clear_bit(J_WRITERS_QUEUED, &journal->j_state);
4228 wake_up(&(journal->j_join_wait));
4230 if (!flush && wait_on_commit &&
4231 journal_list_still_alive(p_s_sb, commit_trans_id)) {
4232 flush_commit_list(p_s_sb, jl, 1);
4234 out:
4235 reiserfs_check_lock_depth(p_s_sb, "journal end2");
4237 memset(th, 0, sizeof(*th));
4238 /* Re-set th->t_super, so we can properly keep track of how many
4239 * persistent transactions there are. We need to do this so if this
4240 * call is part of a failed restart_transaction, we can free it later */
4241 th->t_super = p_s_sb;
4243 return journal->j_errno;
4246 static void __reiserfs_journal_abort_hard(struct super_block *sb)
4248 struct reiserfs_journal *journal = SB_JOURNAL(sb);
4249 if (test_bit(J_ABORTED, &journal->j_state))
4250 return;
4252 printk(KERN_CRIT "REISERFS: Aborting journal for filesystem on %s\n",
4253 reiserfs_bdevname(sb));
4255 sb->s_flags |= MS_RDONLY;
4256 set_bit(J_ABORTED, &journal->j_state);
4258 #ifdef CONFIG_REISERFS_CHECK
4259 dump_stack();
4260 #endif
4263 static void __reiserfs_journal_abort_soft(struct super_block *sb, int errno)
4265 struct reiserfs_journal *journal = SB_JOURNAL(sb);
4266 if (test_bit(J_ABORTED, &journal->j_state))
4267 return;
4269 if (!journal->j_errno)
4270 journal->j_errno = errno;
4272 __reiserfs_journal_abort_hard(sb);
4275 void reiserfs_journal_abort(struct super_block *sb, int errno)
4277 return __reiserfs_journal_abort_soft(sb, errno);