2 ** Write ahead logging implementation copyright Chris Mason 2000
4 ** The background commits make this code very interelated, and
5 ** overly complex. I need to rethink things a bit....The major players:
7 ** journal_begin -- call with the number of blocks you expect to log.
8 ** If the current transaction is too
9 ** old, it will block until the current transaction is
10 ** finished, and then start a new one.
11 ** Usually, your transaction will get joined in with
12 ** previous ones for speed.
14 ** journal_join -- same as journal_begin, but won't block on the current
15 ** transaction regardless of age. Don't ever call
16 ** this. Ever. There are only two places it should be
17 ** called from, and they are both inside this file.
19 ** journal_mark_dirty -- adds blocks into this transaction. clears any flags
20 ** that might make them get sent to disk
21 ** and then marks them BH_JDirty. Puts the buffer head
22 ** into the current transaction hash.
24 ** journal_end -- if the current transaction is batchable, it does nothing
25 ** otherwise, it could do an async/synchronous commit, or
26 ** a full flush of all log and real blocks in the
29 ** flush_old_commits -- if the current transaction is too old, it is ended and
30 ** commit blocks are sent to disk. Forces commit blocks
31 ** to disk for all backgrounded commits that have been
33 ** -- Note, if you call this as an immediate flush from
34 ** from within kupdate, it will ignore the immediate flag
37 #include <linux/time.h>
38 #include <linux/semaphore.h>
39 #include <linux/vmalloc.h>
40 #include <linux/reiserfs_fs.h>
41 #include <linux/kernel.h>
42 #include <linux/errno.h>
43 #include <linux/fcntl.h>
44 #include <linux/stat.h>
45 #include <linux/string.h>
46 #include <linux/smp_lock.h>
47 #include <linux/buffer_head.h>
48 #include <linux/workqueue.h>
49 #include <linux/writeback.h>
50 #include <linux/blkdev.h>
51 #include <linux/backing-dev.h>
52 #include <linux/uaccess.h>
54 #include <asm/system.h>
56 /* gets a struct reiserfs_journal_list * from a list head */
57 #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
59 #define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
62 /* the number of mounted filesystems. This is used to decide when to
63 ** start and kill the commit workqueue
65 static int reiserfs_mounted_fs_count
;
67 static struct workqueue_struct
*commit_wq
;
69 #define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit
71 #define BUFNR 64 /*read ahead */
73 /* cnode stat bits. Move these into reiserfs_fs.h */
75 #define BLOCK_FREED 2 /* this block was freed, and can't be written. */
76 #define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */
78 #define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */
79 #define BLOCK_DIRTIED 5
81 /* journal list state bits */
82 #define LIST_TOUCHED 1
84 #define LIST_COMMIT_PENDING 4 /* someone will commit this list */
86 /* flags for do_journal_end */
87 #define FLUSH_ALL 1 /* flush commit and real blocks */
88 #define COMMIT_NOW 2 /* end and commit this transaction */
89 #define WAIT 4 /* wait for the log blocks to hit the disk */
91 static int do_journal_end(struct reiserfs_transaction_handle
*,
92 struct super_block
*, unsigned long nblocks
,
94 static int flush_journal_list(struct super_block
*s
,
95 struct reiserfs_journal_list
*jl
, int flushall
);
96 static int flush_commit_list(struct super_block
*s
,
97 struct reiserfs_journal_list
*jl
, int flushall
);
98 static int can_dirty(struct reiserfs_journal_cnode
*cn
);
99 static int journal_join(struct reiserfs_transaction_handle
*th
,
100 struct super_block
*sb
, unsigned long nblocks
);
101 static int release_journal_dev(struct super_block
*super
,
102 struct reiserfs_journal
*journal
);
103 static int dirty_one_transaction(struct super_block
*s
,
104 struct reiserfs_journal_list
*jl
);
105 static void flush_async_commits(struct work_struct
*work
);
106 static void queue_log_writer(struct super_block
*s
);
108 /* values for join in do_journal_begin_r */
110 JBEGIN_REG
= 0, /* regular journal begin */
111 JBEGIN_JOIN
= 1, /* join the running transaction if at all possible */
112 JBEGIN_ABORT
= 2, /* called from cleanup code, ignores aborted flag */
115 static int do_journal_begin_r(struct reiserfs_transaction_handle
*th
,
116 struct super_block
*sb
,
117 unsigned long nblocks
, int join
);
119 static void init_journal_hash(struct super_block
*sb
)
121 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
122 memset(journal
->j_hash_table
, 0,
123 JOURNAL_HASH_SIZE
* sizeof(struct reiserfs_journal_cnode
*));
127 ** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to
128 ** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for
131 static int reiserfs_clean_and_file_buffer(struct buffer_head
*bh
)
134 clear_buffer_dirty(bh
);
135 clear_buffer_journal_test(bh
);
140 static void disable_barrier(struct super_block
*s
)
142 REISERFS_SB(s
)->s_mount_opt
&= ~(1 << REISERFS_BARRIER_FLUSH
);
143 printk("reiserfs: disabling flush barriers on %s\n",
144 reiserfs_bdevname(s
));
147 static struct reiserfs_bitmap_node
*allocate_bitmap_node(struct super_block
150 struct reiserfs_bitmap_node
*bn
;
153 bn
= kmalloc(sizeof(struct reiserfs_bitmap_node
), GFP_NOFS
);
157 bn
->data
= kzalloc(sb
->s_blocksize
, GFP_NOFS
);
163 INIT_LIST_HEAD(&bn
->list
);
167 static struct reiserfs_bitmap_node
*get_bitmap_node(struct super_block
*sb
)
169 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
170 struct reiserfs_bitmap_node
*bn
= NULL
;
171 struct list_head
*entry
= journal
->j_bitmap_nodes
.next
;
173 journal
->j_used_bitmap_nodes
++;
176 if (entry
!= &journal
->j_bitmap_nodes
) {
177 bn
= list_entry(entry
, struct reiserfs_bitmap_node
, list
);
179 memset(bn
->data
, 0, sb
->s_blocksize
);
180 journal
->j_free_bitmap_nodes
--;
183 bn
= allocate_bitmap_node(sb
);
190 static inline void free_bitmap_node(struct super_block
*sb
,
191 struct reiserfs_bitmap_node
*bn
)
193 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
194 journal
->j_used_bitmap_nodes
--;
195 if (journal
->j_free_bitmap_nodes
> REISERFS_MAX_BITMAP_NODES
) {
199 list_add(&bn
->list
, &journal
->j_bitmap_nodes
);
200 journal
->j_free_bitmap_nodes
++;
204 static void allocate_bitmap_nodes(struct super_block
*sb
)
207 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
208 struct reiserfs_bitmap_node
*bn
= NULL
;
209 for (i
= 0; i
< REISERFS_MIN_BITMAP_NODES
; i
++) {
210 bn
= allocate_bitmap_node(sb
);
212 list_add(&bn
->list
, &journal
->j_bitmap_nodes
);
213 journal
->j_free_bitmap_nodes
++;
215 break; /* this is ok, we'll try again when more are needed */
220 static int set_bit_in_list_bitmap(struct super_block
*sb
,
222 struct reiserfs_list_bitmap
*jb
)
224 unsigned int bmap_nr
= block
/ (sb
->s_blocksize
<< 3);
225 unsigned int bit_nr
= block
% (sb
->s_blocksize
<< 3);
227 if (!jb
->bitmaps
[bmap_nr
]) {
228 jb
->bitmaps
[bmap_nr
] = get_bitmap_node(sb
);
230 set_bit(bit_nr
, (unsigned long *)jb
->bitmaps
[bmap_nr
]->data
);
234 static void cleanup_bitmap_list(struct super_block
*sb
,
235 struct reiserfs_list_bitmap
*jb
)
238 if (jb
->bitmaps
== NULL
)
241 for (i
= 0; i
< reiserfs_bmap_count(sb
); i
++) {
242 if (jb
->bitmaps
[i
]) {
243 free_bitmap_node(sb
, jb
->bitmaps
[i
]);
244 jb
->bitmaps
[i
] = NULL
;
250 ** only call this on FS unmount.
252 static int free_list_bitmaps(struct super_block
*sb
,
253 struct reiserfs_list_bitmap
*jb_array
)
256 struct reiserfs_list_bitmap
*jb
;
257 for (i
= 0; i
< JOURNAL_NUM_BITMAPS
; i
++) {
259 jb
->journal_list
= NULL
;
260 cleanup_bitmap_list(sb
, jb
);
267 static int free_bitmap_nodes(struct super_block
*sb
)
269 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
270 struct list_head
*next
= journal
->j_bitmap_nodes
.next
;
271 struct reiserfs_bitmap_node
*bn
;
273 while (next
!= &journal
->j_bitmap_nodes
) {
274 bn
= list_entry(next
, struct reiserfs_bitmap_node
, list
);
278 next
= journal
->j_bitmap_nodes
.next
;
279 journal
->j_free_bitmap_nodes
--;
286 ** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
287 ** jb_array is the array to be filled in.
289 int reiserfs_allocate_list_bitmaps(struct super_block
*sb
,
290 struct reiserfs_list_bitmap
*jb_array
,
291 unsigned int bmap_nr
)
295 struct reiserfs_list_bitmap
*jb
;
296 int mem
= bmap_nr
* sizeof(struct reiserfs_bitmap_node
*);
298 for (i
= 0; i
< JOURNAL_NUM_BITMAPS
; i
++) {
300 jb
->journal_list
= NULL
;
301 jb
->bitmaps
= vmalloc(mem
);
303 reiserfs_warning(sb
, "clm-2000", "unable to "
304 "allocate bitmaps for journal lists");
308 memset(jb
->bitmaps
, 0, mem
);
311 free_list_bitmaps(sb
, jb_array
);
318 ** find an available list bitmap. If you can't find one, flush a commit list
321 static struct reiserfs_list_bitmap
*get_list_bitmap(struct super_block
*sb
,
322 struct reiserfs_journal_list
326 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
327 struct reiserfs_list_bitmap
*jb
= NULL
;
329 for (j
= 0; j
< (JOURNAL_NUM_BITMAPS
* 3); j
++) {
330 i
= journal
->j_list_bitmap_index
;
331 journal
->j_list_bitmap_index
= (i
+ 1) % JOURNAL_NUM_BITMAPS
;
332 jb
= journal
->j_list_bitmap
+ i
;
333 if (journal
->j_list_bitmap
[i
].journal_list
) {
334 flush_commit_list(sb
,
335 journal
->j_list_bitmap
[i
].
337 if (!journal
->j_list_bitmap
[i
].journal_list
) {
344 if (jb
->journal_list
) { /* double check to make sure if flushed correctly */
347 jb
->journal_list
= jl
;
352 ** allocates a new chunk of X nodes, and links them all together as a list.
353 ** Uses the cnode->next and cnode->prev pointers
354 ** returns NULL on failure
356 static struct reiserfs_journal_cnode
*allocate_cnodes(int num_cnodes
)
358 struct reiserfs_journal_cnode
*head
;
360 if (num_cnodes
<= 0) {
363 head
= vmalloc(num_cnodes
* sizeof(struct reiserfs_journal_cnode
));
367 memset(head
, 0, num_cnodes
* sizeof(struct reiserfs_journal_cnode
));
369 head
[0].next
= head
+ 1;
370 for (i
= 1; i
< num_cnodes
; i
++) {
371 head
[i
].prev
= head
+ (i
- 1);
372 head
[i
].next
= head
+ (i
+ 1); /* if last one, overwrite it after the if */
374 head
[num_cnodes
- 1].next
= NULL
;
379 ** pulls a cnode off the free list, or returns NULL on failure
381 static struct reiserfs_journal_cnode
*get_cnode(struct super_block
*sb
)
383 struct reiserfs_journal_cnode
*cn
;
384 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
386 reiserfs_check_lock_depth(sb
, "get_cnode");
388 if (journal
->j_cnode_free
<= 0) {
391 journal
->j_cnode_used
++;
392 journal
->j_cnode_free
--;
393 cn
= journal
->j_cnode_free_list
;
398 cn
->next
->prev
= NULL
;
400 journal
->j_cnode_free_list
= cn
->next
;
401 memset(cn
, 0, sizeof(struct reiserfs_journal_cnode
));
406 ** returns a cnode to the free list
408 static void free_cnode(struct super_block
*sb
,
409 struct reiserfs_journal_cnode
*cn
)
411 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
413 reiserfs_check_lock_depth(sb
, "free_cnode");
415 journal
->j_cnode_used
--;
416 journal
->j_cnode_free
++;
417 /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
418 cn
->next
= journal
->j_cnode_free_list
;
419 if (journal
->j_cnode_free_list
) {
420 journal
->j_cnode_free_list
->prev
= cn
;
422 cn
->prev
= NULL
; /* not needed with the memset, but I might kill the memset, and forget to do this */
423 journal
->j_cnode_free_list
= cn
;
426 static void clear_prepared_bits(struct buffer_head
*bh
)
428 clear_buffer_journal_prepared(bh
);
429 clear_buffer_journal_restore_dirty(bh
);
432 /* utility function to force a BUG if it is called without the big
433 ** kernel lock held. caller is the string printed just before calling BUG()
435 void reiserfs_check_lock_depth(struct super_block
*sb
, char *caller
)
438 if (current
->lock_depth
< 0) {
439 reiserfs_panic(sb
, "journal-1", "%s called without kernel "
440 "lock held", caller
);
447 /* return a cnode with same dev, block number and size in table, or null if not found */
448 static inline struct reiserfs_journal_cnode
*get_journal_hash_dev(struct
452 reiserfs_journal_cnode
456 struct reiserfs_journal_cnode
*cn
;
457 cn
= journal_hash(table
, sb
, bl
);
459 if (cn
->blocknr
== bl
&& cn
->sb
== sb
)
463 return (struct reiserfs_journal_cnode
*)0;
467 ** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated
468 ** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
469 ** being overwritten by a replay after crashing.
471 ** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting
472 ** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make
473 ** sure you never write the block without logging it.
475 ** next_zero_bit is a suggestion about the next block to try for find_forward.
476 ** when bl is rejected because it is set in a journal list bitmap, we search
477 ** for the next zero bit in the bitmap that rejected bl. Then, we return that
478 ** through next_zero_bit for find_forward to try.
480 ** Just because we return something in next_zero_bit does not mean we won't
481 ** reject it on the next call to reiserfs_in_journal
484 int reiserfs_in_journal(struct super_block
*sb
,
485 unsigned int bmap_nr
, int bit_nr
, int search_all
,
486 b_blocknr_t
* next_zero_bit
)
488 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
489 struct reiserfs_journal_cnode
*cn
;
490 struct reiserfs_list_bitmap
*jb
;
494 *next_zero_bit
= 0; /* always start this at zero. */
496 PROC_INFO_INC(sb
, journal
.in_journal
);
497 /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
498 ** if we crash before the transaction that freed it commits, this transaction won't
499 ** have committed either, and the block will never be written
502 for (i
= 0; i
< JOURNAL_NUM_BITMAPS
; i
++) {
503 PROC_INFO_INC(sb
, journal
.in_journal_bitmap
);
504 jb
= journal
->j_list_bitmap
+ i
;
505 if (jb
->journal_list
&& jb
->bitmaps
[bmap_nr
] &&
507 (unsigned long *)jb
->bitmaps
[bmap_nr
]->
510 find_next_zero_bit((unsigned long *)
511 (jb
->bitmaps
[bmap_nr
]->
513 sb
->s_blocksize
<< 3,
520 bl
= bmap_nr
* (sb
->s_blocksize
<< 3) + bit_nr
;
521 /* is it in any old transactions? */
524 get_journal_hash_dev(sb
, journal
->j_list_hash_table
, bl
))) {
528 /* is it in the current transaction. This should never happen */
529 if ((cn
= get_journal_hash_dev(sb
, journal
->j_hash_table
, bl
))) {
534 PROC_INFO_INC(sb
, journal
.in_journal_reusable
);
539 /* insert cn into table
541 static inline void insert_journal_hash(struct reiserfs_journal_cnode
**table
,
542 struct reiserfs_journal_cnode
*cn
)
544 struct reiserfs_journal_cnode
*cn_orig
;
546 cn_orig
= journal_hash(table
, cn
->sb
, cn
->blocknr
);
552 journal_hash(table
, cn
->sb
, cn
->blocknr
) = cn
;
555 /* lock the current transaction */
556 static inline void lock_journal(struct super_block
*sb
)
558 PROC_INFO_INC(sb
, journal
.lock_journal
);
559 mutex_lock(&SB_JOURNAL(sb
)->j_mutex
);
562 /* unlock the current transaction */
563 static inline void unlock_journal(struct super_block
*sb
)
565 mutex_unlock(&SB_JOURNAL(sb
)->j_mutex
);
568 static inline void get_journal_list(struct reiserfs_journal_list
*jl
)
573 static inline void put_journal_list(struct super_block
*s
,
574 struct reiserfs_journal_list
*jl
)
576 if (jl
->j_refcount
< 1) {
577 reiserfs_panic(s
, "journal-2", "trans id %u, refcount at %d",
578 jl
->j_trans_id
, jl
->j_refcount
);
580 if (--jl
->j_refcount
== 0)
585 ** this used to be much more involved, and I'm keeping it just in case things get ugly again.
586 ** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
589 static void cleanup_freed_for_journal_list(struct super_block
*sb
,
590 struct reiserfs_journal_list
*jl
)
593 struct reiserfs_list_bitmap
*jb
= jl
->j_list_bitmap
;
595 cleanup_bitmap_list(sb
, jb
);
597 jl
->j_list_bitmap
->journal_list
= NULL
;
598 jl
->j_list_bitmap
= NULL
;
601 static int journal_list_still_alive(struct super_block
*s
,
602 unsigned int trans_id
)
604 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
605 struct list_head
*entry
= &journal
->j_journal_list
;
606 struct reiserfs_journal_list
*jl
;
608 if (!list_empty(entry
)) {
609 jl
= JOURNAL_LIST_ENTRY(entry
->next
);
610 if (jl
->j_trans_id
<= trans_id
) {
618 * If page->mapping was null, we failed to truncate this page for
619 * some reason. Most likely because it was truncated after being
620 * logged via data=journal.
622 * This does a check to see if the buffer belongs to one of these
623 * lost pages before doing the final put_bh. If page->mapping was
624 * null, it tries to free buffers on the page, which should make the
625 * final page_cache_release drop the page from the lru.
627 static void release_buffer_page(struct buffer_head
*bh
)
629 struct page
*page
= bh
->b_page
;
630 if (!page
->mapping
&& trylock_page(page
)) {
631 page_cache_get(page
);
634 try_to_free_buffers(page
);
636 page_cache_release(page
);
642 static void reiserfs_end_buffer_io_sync(struct buffer_head
*bh
, int uptodate
)
644 char b
[BDEVNAME_SIZE
];
646 if (buffer_journaled(bh
)) {
647 reiserfs_warning(NULL
, "clm-2084",
648 "pinned buffer %lu:%s sent to disk",
649 bh
->b_blocknr
, bdevname(bh
->b_bdev
, b
));
652 set_buffer_uptodate(bh
);
654 clear_buffer_uptodate(bh
);
657 release_buffer_page(bh
);
660 static void reiserfs_end_ordered_io(struct buffer_head
*bh
, int uptodate
)
663 set_buffer_uptodate(bh
);
665 clear_buffer_uptodate(bh
);
670 static void submit_logged_buffer(struct buffer_head
*bh
)
673 bh
->b_end_io
= reiserfs_end_buffer_io_sync
;
674 clear_buffer_journal_new(bh
);
675 clear_buffer_dirty(bh
);
676 if (!test_clear_buffer_journal_test(bh
))
678 if (!buffer_uptodate(bh
))
680 submit_bh(WRITE
, bh
);
683 static void submit_ordered_buffer(struct buffer_head
*bh
)
686 bh
->b_end_io
= reiserfs_end_ordered_io
;
687 clear_buffer_dirty(bh
);
688 if (!buffer_uptodate(bh
))
690 submit_bh(WRITE
, bh
);
693 static int submit_barrier_buffer(struct buffer_head
*bh
)
696 bh
->b_end_io
= reiserfs_end_ordered_io
;
697 clear_buffer_dirty(bh
);
698 if (!buffer_uptodate(bh
))
700 return submit_bh(WRITE_BARRIER
, bh
);
703 static void check_barrier_completion(struct super_block
*s
,
704 struct buffer_head
*bh
)
706 if (buffer_eopnotsupp(bh
)) {
707 clear_buffer_eopnotsupp(bh
);
709 set_buffer_uptodate(bh
);
710 set_buffer_dirty(bh
);
711 sync_dirty_buffer(bh
);
715 #define CHUNK_SIZE 32
716 struct buffer_chunk
{
717 struct buffer_head
*bh
[CHUNK_SIZE
];
721 static void write_chunk(struct buffer_chunk
*chunk
)
725 for (i
= 0; i
< chunk
->nr
; i
++) {
726 submit_logged_buffer(chunk
->bh
[i
]);
732 static void write_ordered_chunk(struct buffer_chunk
*chunk
)
736 for (i
= 0; i
< chunk
->nr
; i
++) {
737 submit_ordered_buffer(chunk
->bh
[i
]);
743 static int add_to_chunk(struct buffer_chunk
*chunk
, struct buffer_head
*bh
,
744 spinlock_t
* lock
, void (fn
) (struct buffer_chunk
*))
747 BUG_ON(chunk
->nr
>= CHUNK_SIZE
);
748 chunk
->bh
[chunk
->nr
++] = bh
;
749 if (chunk
->nr
>= CHUNK_SIZE
) {
760 static atomic_t nr_reiserfs_jh
= ATOMIC_INIT(0);
761 static struct reiserfs_jh
*alloc_jh(void)
763 struct reiserfs_jh
*jh
;
765 jh
= kmalloc(sizeof(*jh
), GFP_NOFS
);
767 atomic_inc(&nr_reiserfs_jh
);
775 * we want to free the jh when the buffer has been written
778 void reiserfs_free_jh(struct buffer_head
*bh
)
780 struct reiserfs_jh
*jh
;
784 bh
->b_private
= NULL
;
786 list_del_init(&jh
->list
);
788 if (atomic_read(&nr_reiserfs_jh
) <= 0)
790 atomic_dec(&nr_reiserfs_jh
);
795 static inline int __add_jh(struct reiserfs_journal
*j
, struct buffer_head
*bh
,
798 struct reiserfs_jh
*jh
;
801 spin_lock(&j
->j_dirty_buffers_lock
);
802 if (!bh
->b_private
) {
803 spin_unlock(&j
->j_dirty_buffers_lock
);
807 list_del_init(&jh
->list
);
812 spin_lock(&j
->j_dirty_buffers_lock
);
813 /* buffer must be locked for __add_jh, should be able to have
814 * two adds at the same time
816 BUG_ON(bh
->b_private
);
820 jh
->jl
= j
->j_current_jl
;
822 list_add_tail(&jh
->list
, &jh
->jl
->j_tail_bh_list
);
824 list_add_tail(&jh
->list
, &jh
->jl
->j_bh_list
);
826 spin_unlock(&j
->j_dirty_buffers_lock
);
830 int reiserfs_add_tail_list(struct inode
*inode
, struct buffer_head
*bh
)
832 return __add_jh(SB_JOURNAL(inode
->i_sb
), bh
, 1);
834 int reiserfs_add_ordered_list(struct inode
*inode
, struct buffer_head
*bh
)
836 return __add_jh(SB_JOURNAL(inode
->i_sb
), bh
, 0);
839 #define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)
840 static int write_ordered_buffers(spinlock_t
* lock
,
841 struct reiserfs_journal
*j
,
842 struct reiserfs_journal_list
*jl
,
843 struct list_head
*list
)
845 struct buffer_head
*bh
;
846 struct reiserfs_jh
*jh
;
847 int ret
= j
->j_errno
;
848 struct buffer_chunk chunk
;
849 struct list_head tmp
;
850 INIT_LIST_HEAD(&tmp
);
854 while (!list_empty(list
)) {
855 jh
= JH_ENTRY(list
->next
);
858 if (!trylock_buffer(bh
)) {
859 if (!buffer_dirty(bh
)) {
860 list_move(&jh
->list
, &tmp
);
865 write_ordered_chunk(&chunk
);
871 /* in theory, dirty non-uptodate buffers should never get here,
872 * but the upper layer io error paths still have a few quirks.
873 * Handle them here as gracefully as we can
875 if (!buffer_uptodate(bh
) && buffer_dirty(bh
)) {
876 clear_buffer_dirty(bh
);
879 if (buffer_dirty(bh
)) {
880 list_move(&jh
->list
, &tmp
);
881 add_to_chunk(&chunk
, bh
, lock
, write_ordered_chunk
);
883 reiserfs_free_jh(bh
);
888 cond_resched_lock(lock
);
892 write_ordered_chunk(&chunk
);
895 while (!list_empty(&tmp
)) {
896 jh
= JH_ENTRY(tmp
.prev
);
899 reiserfs_free_jh(bh
);
901 if (buffer_locked(bh
)) {
906 if (!buffer_uptodate(bh
)) {
909 /* ugly interaction with invalidatepage here.
910 * reiserfs_invalidate_page will pin any buffer that has a valid
911 * journal head from an older transaction. If someone else sets
912 * our buffer dirty after we write it in the first loop, and
913 * then someone truncates the page away, nobody will ever write
914 * the buffer. We're safe if we write the page one last time
915 * after freeing the journal header.
917 if (buffer_dirty(bh
) && unlikely(bh
->b_page
->mapping
== NULL
)) {
919 ll_rw_block(WRITE
, 1, &bh
);
923 cond_resched_lock(lock
);
929 static int flush_older_commits(struct super_block
*s
,
930 struct reiserfs_journal_list
*jl
)
932 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
933 struct reiserfs_journal_list
*other_jl
;
934 struct reiserfs_journal_list
*first_jl
;
935 struct list_head
*entry
;
936 unsigned int trans_id
= jl
->j_trans_id
;
937 unsigned int other_trans_id
;
938 unsigned int first_trans_id
;
942 * first we walk backwards to find the oldest uncommitted transation
945 entry
= jl
->j_list
.prev
;
947 other_jl
= JOURNAL_LIST_ENTRY(entry
);
948 if (entry
== &journal
->j_journal_list
||
949 atomic_read(&other_jl
->j_older_commits_done
))
953 entry
= other_jl
->j_list
.prev
;
956 /* if we didn't find any older uncommitted transactions, return now */
957 if (first_jl
== jl
) {
961 first_trans_id
= first_jl
->j_trans_id
;
963 entry
= &first_jl
->j_list
;
965 other_jl
= JOURNAL_LIST_ENTRY(entry
);
966 other_trans_id
= other_jl
->j_trans_id
;
968 if (other_trans_id
< trans_id
) {
969 if (atomic_read(&other_jl
->j_commit_left
) != 0) {
970 flush_commit_list(s
, other_jl
, 0);
972 /* list we were called with is gone, return */
973 if (!journal_list_still_alive(s
, trans_id
))
976 /* the one we just flushed is gone, this means all
977 * older lists are also gone, so first_jl is no longer
978 * valid either. Go back to the beginning.
980 if (!journal_list_still_alive
981 (s
, other_trans_id
)) {
986 if (entry
== &journal
->j_journal_list
)
995 static int reiserfs_async_progress_wait(struct super_block
*s
)
998 struct reiserfs_journal
*j
= SB_JOURNAL(s
);
999 if (atomic_read(&j
->j_async_throttle
))
1000 congestion_wait(WRITE
, HZ
/ 10);
1005 ** if this journal list still has commit blocks unflushed, send them to disk.
1007 ** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
1008 ** Before the commit block can by written, every other log block must be safely on disk
1011 static int flush_commit_list(struct super_block
*s
,
1012 struct reiserfs_journal_list
*jl
, int flushall
)
1016 struct buffer_head
*tbh
= NULL
;
1017 unsigned int trans_id
= jl
->j_trans_id
;
1018 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
1023 reiserfs_check_lock_depth(s
, "flush_commit_list");
1025 if (atomic_read(&jl
->j_older_commits_done
)) {
1031 /* before we can put our commit blocks on disk, we have to make sure everyone older than
1032 ** us is on disk too
1034 BUG_ON(jl
->j_len
<= 0);
1035 BUG_ON(trans_id
== journal
->j_trans_id
);
1037 get_journal_list(jl
);
1039 if (flush_older_commits(s
, jl
) == 1) {
1040 /* list disappeared during flush_older_commits. return */
1045 /* make sure nobody is trying to flush this one at the same time */
1046 mutex_lock(&jl
->j_commit_mutex
);
1047 if (!journal_list_still_alive(s
, trans_id
)) {
1048 mutex_unlock(&jl
->j_commit_mutex
);
1051 BUG_ON(jl
->j_trans_id
== 0);
1053 /* this commit is done, exit */
1054 if (atomic_read(&(jl
->j_commit_left
)) <= 0) {
1056 atomic_set(&(jl
->j_older_commits_done
), 1);
1058 mutex_unlock(&jl
->j_commit_mutex
);
1062 if (!list_empty(&jl
->j_bh_list
)) {
1065 ret
= write_ordered_buffers(&journal
->j_dirty_buffers_lock
,
1066 journal
, jl
, &jl
->j_bh_list
);
1067 if (ret
< 0 && retval
== 0)
1071 BUG_ON(!list_empty(&jl
->j_bh_list
));
1073 * for the description block and all the log blocks, submit any buffers
1074 * that haven't already reached the disk. Try to write at least 256
1075 * log blocks. later on, we will only wait on blocks that correspond
1076 * to this transaction, but while we're unplugging we might as well
1077 * get a chunk of data on there.
1079 atomic_inc(&journal
->j_async_throttle
);
1080 write_len
= jl
->j_len
+ 1;
1081 if (write_len
< 256)
1083 for (i
= 0 ; i
< write_len
; i
++) {
1084 bn
= SB_ONDISK_JOURNAL_1st_BLOCK(s
) + (jl
->j_start
+ i
) %
1085 SB_ONDISK_JOURNAL_SIZE(s
);
1086 tbh
= journal_find_get_block(s
, bn
);
1088 if (buffer_dirty(tbh
))
1089 ll_rw_block(WRITE
, 1, &tbh
) ;
1093 atomic_dec(&journal
->j_async_throttle
);
1095 /* We're skipping the commit if there's an error */
1096 if (retval
|| reiserfs_is_journal_aborted(journal
))
1099 /* wait on everything written so far before writing the commit
1100 * if we are in barrier mode, send the commit down now
1102 barrier
= reiserfs_barrier_flush(s
);
1105 lock_buffer(jl
->j_commit_bh
);
1106 ret
= submit_barrier_buffer(jl
->j_commit_bh
);
1107 if (ret
== -EOPNOTSUPP
) {
1108 set_buffer_uptodate(jl
->j_commit_bh
);
1113 for (i
= 0; i
< (jl
->j_len
+ 1); i
++) {
1114 bn
= SB_ONDISK_JOURNAL_1st_BLOCK(s
) +
1115 (jl
->j_start
+ i
) % SB_ONDISK_JOURNAL_SIZE(s
);
1116 tbh
= journal_find_get_block(s
, bn
);
1117 wait_on_buffer(tbh
);
1118 // since we're using ll_rw_blk above, it might have skipped over
1119 // a locked buffer. Double check here
1121 if (buffer_dirty(tbh
)) /* redundant, sync_dirty_buffer() checks */
1122 sync_dirty_buffer(tbh
);
1123 if (unlikely(!buffer_uptodate(tbh
))) {
1124 #ifdef CONFIG_REISERFS_CHECK
1125 reiserfs_warning(s
, "journal-601",
1126 "buffer write failed");
1130 put_bh(tbh
); /* once for journal_find_get_block */
1131 put_bh(tbh
); /* once due to original getblk in do_journal_end */
1132 atomic_dec(&(jl
->j_commit_left
));
1135 BUG_ON(atomic_read(&(jl
->j_commit_left
)) != 1);
1138 /* If there was a write error in the journal - we can't commit
1139 * this transaction - it will be invalid and, if successful,
1140 * will just end up propagating the write error out to
1141 * the file system. */
1142 if (likely(!retval
&& !reiserfs_is_journal_aborted (journal
))) {
1143 if (buffer_dirty(jl
->j_commit_bh
))
1145 mark_buffer_dirty(jl
->j_commit_bh
) ;
1146 sync_dirty_buffer(jl
->j_commit_bh
) ;
1149 wait_on_buffer(jl
->j_commit_bh
);
1151 check_barrier_completion(s
, jl
->j_commit_bh
);
1153 /* If there was a write error in the journal - we can't commit this
1154 * transaction - it will be invalid and, if successful, will just end
1155 * up propagating the write error out to the filesystem. */
1156 if (unlikely(!buffer_uptodate(jl
->j_commit_bh
))) {
1157 #ifdef CONFIG_REISERFS_CHECK
1158 reiserfs_warning(s
, "journal-615", "buffer write failed");
1162 bforget(jl
->j_commit_bh
);
1163 if (journal
->j_last_commit_id
!= 0 &&
1164 (jl
->j_trans_id
- journal
->j_last_commit_id
) != 1) {
1165 reiserfs_warning(s
, "clm-2200", "last commit %lu, current %lu",
1166 journal
->j_last_commit_id
, jl
->j_trans_id
);
1168 journal
->j_last_commit_id
= jl
->j_trans_id
;
1170 /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */
1171 cleanup_freed_for_journal_list(s
, jl
);
1173 retval
= retval
? retval
: journal
->j_errno
;
1175 /* mark the metadata dirty */
1177 dirty_one_transaction(s
, jl
);
1178 atomic_dec(&(jl
->j_commit_left
));
1181 atomic_set(&(jl
->j_older_commits_done
), 1);
1183 mutex_unlock(&jl
->j_commit_mutex
);
1185 put_journal_list(s
, jl
);
1188 reiserfs_abort(s
, retval
, "Journal write error in %s",
1195 ** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
1196 ** returns NULL if it can't find anything
1198 static struct reiserfs_journal_list
*find_newer_jl_for_cn(struct
1199 reiserfs_journal_cnode
1202 struct super_block
*sb
= cn
->sb
;
1203 b_blocknr_t blocknr
= cn
->blocknr
;
1207 if (cn
->sb
== sb
&& cn
->blocknr
== blocknr
&& cn
->jlist
) {
1215 static int newer_jl_done(struct reiserfs_journal_cnode
*cn
)
1217 struct super_block
*sb
= cn
->sb
;
1218 b_blocknr_t blocknr
= cn
->blocknr
;
1222 if (cn
->sb
== sb
&& cn
->blocknr
== blocknr
&& cn
->jlist
&&
1223 atomic_read(&cn
->jlist
->j_commit_left
) != 0)
1230 static void remove_journal_hash(struct super_block
*,
1231 struct reiserfs_journal_cnode
**,
1232 struct reiserfs_journal_list
*, unsigned long,
1236 ** once all the real blocks have been flushed, it is safe to remove them from the
1237 ** journal list for this transaction. Aside from freeing the cnode, this also allows the
1238 ** block to be reallocated for data blocks if it had been deleted.
1240 static void remove_all_from_journal_list(struct super_block
*sb
,
1241 struct reiserfs_journal_list
*jl
,
1244 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
1245 struct reiserfs_journal_cnode
*cn
, *last
;
1246 cn
= jl
->j_realblock
;
1248 /* which is better, to lock once around the whole loop, or
1249 ** to lock for each call to remove_journal_hash?
1252 if (cn
->blocknr
!= 0) {
1254 reiserfs_warning(sb
, "reiserfs-2201",
1255 "block %u, bh is %d, state %ld",
1256 cn
->blocknr
, cn
->bh
? 1 : 0,
1260 remove_journal_hash(sb
, journal
->j_list_hash_table
,
1261 jl
, cn
->blocknr
, 1);
1265 free_cnode(sb
, last
);
1267 jl
->j_realblock
= NULL
;
1271 ** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
1272 ** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
1273 ** releasing blocks in this transaction for reuse as data blocks.
1274 ** called by flush_journal_list, before it calls remove_all_from_journal_list
1277 static int _update_journal_header_block(struct super_block
*sb
,
1278 unsigned long offset
,
1279 unsigned int trans_id
)
1281 struct reiserfs_journal_header
*jh
;
1282 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
1284 if (reiserfs_is_journal_aborted(journal
))
1287 if (trans_id
>= journal
->j_last_flush_trans_id
) {
1288 if (buffer_locked((journal
->j_header_bh
))) {
1289 wait_on_buffer((journal
->j_header_bh
));
1290 if (unlikely(!buffer_uptodate(journal
->j_header_bh
))) {
1291 #ifdef CONFIG_REISERFS_CHECK
1292 reiserfs_warning(sb
, "journal-699",
1293 "buffer write failed");
1298 journal
->j_last_flush_trans_id
= trans_id
;
1299 journal
->j_first_unflushed_offset
= offset
;
1300 jh
= (struct reiserfs_journal_header
*)(journal
->j_header_bh
->
1302 jh
->j_last_flush_trans_id
= cpu_to_le32(trans_id
);
1303 jh
->j_first_unflushed_offset
= cpu_to_le32(offset
);
1304 jh
->j_mount_id
= cpu_to_le32(journal
->j_mount_id
);
1306 if (reiserfs_barrier_flush(sb
)) {
1308 lock_buffer(journal
->j_header_bh
);
1309 ret
= submit_barrier_buffer(journal
->j_header_bh
);
1310 if (ret
== -EOPNOTSUPP
) {
1311 set_buffer_uptodate(journal
->j_header_bh
);
1312 disable_barrier(sb
);
1315 wait_on_buffer(journal
->j_header_bh
);
1316 check_barrier_completion(sb
, journal
->j_header_bh
);
1319 set_buffer_dirty(journal
->j_header_bh
);
1320 sync_dirty_buffer(journal
->j_header_bh
);
1322 if (!buffer_uptodate(journal
->j_header_bh
)) {
1323 reiserfs_warning(sb
, "journal-837",
1324 "IO error during journal replay");
1331 static int update_journal_header_block(struct super_block
*sb
,
1332 unsigned long offset
,
1333 unsigned int trans_id
)
1335 return _update_journal_header_block(sb
, offset
, trans_id
);
1339 ** flush any and all journal lists older than you are
1340 ** can only be called from flush_journal_list
1342 static int flush_older_journal_lists(struct super_block
*sb
,
1343 struct reiserfs_journal_list
*jl
)
1345 struct list_head
*entry
;
1346 struct reiserfs_journal_list
*other_jl
;
1347 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
1348 unsigned int trans_id
= jl
->j_trans_id
;
1350 /* we know we are the only ones flushing things, no extra race
1351 * protection is required.
1354 entry
= journal
->j_journal_list
.next
;
1356 if (entry
== &journal
->j_journal_list
)
1358 other_jl
= JOURNAL_LIST_ENTRY(entry
);
1359 if (other_jl
->j_trans_id
< trans_id
) {
1360 BUG_ON(other_jl
->j_refcount
<= 0);
1361 /* do not flush all */
1362 flush_journal_list(sb
, other_jl
, 0);
1364 /* other_jl is now deleted from the list */
1370 static void del_from_work_list(struct super_block
*s
,
1371 struct reiserfs_journal_list
*jl
)
1373 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
1374 if (!list_empty(&jl
->j_working_list
)) {
1375 list_del_init(&jl
->j_working_list
);
1376 journal
->j_num_work_lists
--;
1380 /* flush a journal list, both commit and real blocks
1382 ** always set flushall to 1, unless you are calling from inside
1383 ** flush_journal_list
1385 ** IMPORTANT. This can only be called while there are no journal writers,
1386 ** and the journal is locked. That means it can only be called from
1387 ** do_journal_end, or by journal_release
1389 static int flush_journal_list(struct super_block
*s
,
1390 struct reiserfs_journal_list
*jl
, int flushall
)
1392 struct reiserfs_journal_list
*pjl
;
1393 struct reiserfs_journal_cnode
*cn
, *last
;
1397 struct buffer_head
*saved_bh
;
1398 unsigned long j_len_saved
= jl
->j_len
;
1399 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
1402 BUG_ON(j_len_saved
<= 0);
1404 if (atomic_read(&journal
->j_wcount
) != 0) {
1405 reiserfs_warning(s
, "clm-2048", "called with wcount %d",
1406 atomic_read(&journal
->j_wcount
));
1408 BUG_ON(jl
->j_trans_id
== 0);
1410 /* if flushall == 0, the lock is already held */
1412 mutex_lock(&journal
->j_flush_mutex
);
1413 } else if (mutex_trylock(&journal
->j_flush_mutex
)) {
1418 if (j_len_saved
> journal
->j_trans_max
) {
1419 reiserfs_panic(s
, "journal-715", "length is %lu, trans id %lu",
1420 j_len_saved
, jl
->j_trans_id
);
1426 /* if all the work is already done, get out of here */
1427 if (atomic_read(&(jl
->j_nonzerolen
)) <= 0 &&
1428 atomic_read(&(jl
->j_commit_left
)) <= 0) {
1429 goto flush_older_and_return
;
1432 /* start by putting the commit list on disk. This will also flush
1433 ** the commit lists of any olders transactions
1435 flush_commit_list(s
, jl
, 1);
1437 if (!(jl
->j_state
& LIST_DIRTY
)
1438 && !reiserfs_is_journal_aborted(journal
))
1441 /* are we done now? */
1442 if (atomic_read(&(jl
->j_nonzerolen
)) <= 0 &&
1443 atomic_read(&(jl
->j_commit_left
)) <= 0) {
1444 goto flush_older_and_return
;
1447 /* loop through each cnode, see if we need to write it,
1448 ** or wait on a more recent transaction, or just ignore it
1450 if (atomic_read(&(journal
->j_wcount
)) != 0) {
1451 reiserfs_panic(s
, "journal-844", "journal list is flushing, "
1454 cn
= jl
->j_realblock
;
1459 /* blocknr of 0 is no longer in the hash, ignore it */
1460 if (cn
->blocknr
== 0) {
1464 /* This transaction failed commit. Don't write out to the disk */
1465 if (!(jl
->j_state
& LIST_DIRTY
))
1468 pjl
= find_newer_jl_for_cn(cn
);
1469 /* the order is important here. We check pjl to make sure we
1470 ** don't clear BH_JDirty_wait if we aren't the one writing this
1473 if (!pjl
&& cn
->bh
) {
1476 /* we do this to make sure nobody releases the buffer while
1477 ** we are working with it
1481 if (buffer_journal_dirty(saved_bh
)) {
1482 BUG_ON(!can_dirty(cn
));
1485 } else if (can_dirty(cn
)) {
1486 /* everything with !pjl && jwait should be writable */
1491 /* if someone has this block in a newer transaction, just make
1492 ** sure they are committed, and don't try writing it to disk
1495 if (atomic_read(&pjl
->j_commit_left
))
1496 flush_commit_list(s
, pjl
, 1);
1500 /* bh == NULL when the block got to disk on its own, OR,
1501 ** the block got freed in a future transaction
1503 if (saved_bh
== NULL
) {
1507 /* this should never happen. kupdate_one_transaction has this list
1508 ** locked while it works, so we should never see a buffer here that
1509 ** is not marked JDirty_wait
1511 if ((!was_jwait
) && !buffer_locked(saved_bh
)) {
1512 reiserfs_warning(s
, "journal-813",
1513 "BAD! buffer %llu %cdirty %cjwait, "
1514 "not in a newer tranasction",
1515 (unsigned long long)saved_bh
->
1516 b_blocknr
, was_dirty
? ' ' : '!',
1517 was_jwait
? ' ' : '!');
1520 /* we inc again because saved_bh gets decremented at free_cnode */
1522 set_bit(BLOCK_NEEDS_FLUSH
, &cn
->state
);
1523 lock_buffer(saved_bh
);
1524 BUG_ON(cn
->blocknr
!= saved_bh
->b_blocknr
);
1525 if (buffer_dirty(saved_bh
))
1526 submit_logged_buffer(saved_bh
);
1528 unlock_buffer(saved_bh
);
1531 reiserfs_warning(s
, "clm-2082",
1532 "Unable to flush buffer %llu in %s",
1533 (unsigned long long)saved_bh
->
1534 b_blocknr
, __func__
);
1540 /* we incremented this to keep others from taking the buffer head away */
1542 if (atomic_read(&(saved_bh
->b_count
)) < 0) {
1543 reiserfs_warning(s
, "journal-945",
1544 "saved_bh->b_count < 0");
1549 cn
= jl
->j_realblock
;
1551 if (test_bit(BLOCK_NEEDS_FLUSH
, &cn
->state
)) {
1553 reiserfs_panic(s
, "journal-1011",
1556 wait_on_buffer(cn
->bh
);
1558 reiserfs_panic(s
, "journal-1012",
1561 if (unlikely(!buffer_uptodate(cn
->bh
))) {
1562 #ifdef CONFIG_REISERFS_CHECK
1563 reiserfs_warning(s
, "journal-949",
1564 "buffer write failed");
1568 /* note, we must clear the JDirty_wait bit after the up to date
1569 ** check, otherwise we race against our flushpage routine
1571 BUG_ON(!test_clear_buffer_journal_dirty
1574 /* drop one ref for us */
1576 /* drop one ref for journal_mark_dirty */
1577 release_buffer_page(cn
->bh
);
1584 reiserfs_abort(s
, -EIO
,
1585 "Write error while pushing transaction to disk in %s",
1587 flush_older_and_return
:
1589 /* before we can update the journal header block, we _must_ flush all
1590 ** real blocks from all older transactions to disk. This is because
1591 ** once the header block is updated, this transaction will not be
1592 ** replayed after a crash
1595 flush_older_journal_lists(s
, jl
);
1598 err
= journal
->j_errno
;
1599 /* before we can remove everything from the hash tables for this
1600 ** transaction, we must make sure it can never be replayed
1602 ** since we are only called from do_journal_end, we know for sure there
1603 ** are no allocations going on while we are flushing journal lists. So,
1604 ** we only need to update the journal header block for the last list
1607 if (!err
&& flushall
) {
1609 update_journal_header_block(s
,
1610 (jl
->j_start
+ jl
->j_len
+
1611 2) % SB_ONDISK_JOURNAL_SIZE(s
),
1614 reiserfs_abort(s
, -EIO
,
1615 "Write error while updating journal header in %s",
1618 remove_all_from_journal_list(s
, jl
, 0);
1619 list_del_init(&jl
->j_list
);
1620 journal
->j_num_lists
--;
1621 del_from_work_list(s
, jl
);
1623 if (journal
->j_last_flush_id
!= 0 &&
1624 (jl
->j_trans_id
- journal
->j_last_flush_id
) != 1) {
1625 reiserfs_warning(s
, "clm-2201", "last flush %lu, current %lu",
1626 journal
->j_last_flush_id
, jl
->j_trans_id
);
1628 journal
->j_last_flush_id
= jl
->j_trans_id
;
1630 /* not strictly required since we are freeing the list, but it should
1631 * help find code using dead lists later on
1634 atomic_set(&(jl
->j_nonzerolen
), 0);
1636 jl
->j_realblock
= NULL
;
1637 jl
->j_commit_bh
= NULL
;
1640 put_journal_list(s
, jl
);
1642 mutex_unlock(&journal
->j_flush_mutex
);
1647 static int test_transaction(struct super_block
*s
,
1648 struct reiserfs_journal_list
*jl
)
1650 struct reiserfs_journal_cnode
*cn
;
1652 if (jl
->j_len
== 0 || atomic_read(&jl
->j_nonzerolen
) == 0)
1655 cn
= jl
->j_realblock
;
1657 /* if the blocknr == 0, this has been cleared from the hash,
1660 if (cn
->blocknr
== 0) {
1663 if (cn
->bh
&& !newer_jl_done(cn
))
1672 static int write_one_transaction(struct super_block
*s
,
1673 struct reiserfs_journal_list
*jl
,
1674 struct buffer_chunk
*chunk
)
1676 struct reiserfs_journal_cnode
*cn
;
1679 jl
->j_state
|= LIST_TOUCHED
;
1680 del_from_work_list(s
, jl
);
1681 if (jl
->j_len
== 0 || atomic_read(&jl
->j_nonzerolen
) == 0) {
1685 cn
= jl
->j_realblock
;
1687 /* if the blocknr == 0, this has been cleared from the hash,
1690 if (cn
->blocknr
== 0) {
1693 if (cn
->bh
&& can_dirty(cn
) && buffer_dirty(cn
->bh
)) {
1694 struct buffer_head
*tmp_bh
;
1695 /* we can race against journal_mark_freed when we try
1696 * to lock_buffer(cn->bh), so we have to inc the buffer
1697 * count, and recheck things after locking
1701 lock_buffer(tmp_bh
);
1702 if (cn
->bh
&& can_dirty(cn
) && buffer_dirty(tmp_bh
)) {
1703 if (!buffer_journal_dirty(tmp_bh
) ||
1704 buffer_journal_prepared(tmp_bh
))
1706 add_to_chunk(chunk
, tmp_bh
, NULL
, write_chunk
);
1709 /* note, cn->bh might be null now */
1710 unlock_buffer(tmp_bh
);
1721 /* used by flush_commit_list */
1722 static int dirty_one_transaction(struct super_block
*s
,
1723 struct reiserfs_journal_list
*jl
)
1725 struct reiserfs_journal_cnode
*cn
;
1726 struct reiserfs_journal_list
*pjl
;
1729 jl
->j_state
|= LIST_DIRTY
;
1730 cn
= jl
->j_realblock
;
1732 /* look for a more recent transaction that logged this
1733 ** buffer. Only the most recent transaction with a buffer in
1734 ** it is allowed to send that buffer to disk
1736 pjl
= find_newer_jl_for_cn(cn
);
1737 if (!pjl
&& cn
->blocknr
&& cn
->bh
1738 && buffer_journal_dirty(cn
->bh
)) {
1739 BUG_ON(!can_dirty(cn
));
1740 /* if the buffer is prepared, it will either be logged
1741 * or restored. If restored, we need to make sure
1742 * it actually gets marked dirty
1744 clear_buffer_journal_new(cn
->bh
);
1745 if (buffer_journal_prepared(cn
->bh
)) {
1746 set_buffer_journal_restore_dirty(cn
->bh
);
1748 set_buffer_journal_test(cn
->bh
);
1749 mark_buffer_dirty(cn
->bh
);
1757 static int kupdate_transactions(struct super_block
*s
,
1758 struct reiserfs_journal_list
*jl
,
1759 struct reiserfs_journal_list
**next_jl
,
1760 unsigned int *next_trans_id
,
1761 int num_blocks
, int num_trans
)
1765 int transactions_flushed
= 0;
1766 unsigned int orig_trans_id
= jl
->j_trans_id
;
1767 struct buffer_chunk chunk
;
1768 struct list_head
*entry
;
1769 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
1772 mutex_lock(&journal
->j_flush_mutex
);
1773 if (!journal_list_still_alive(s
, orig_trans_id
)) {
1777 /* we've got j_flush_mutex held, nobody is going to delete any
1778 * of these lists out from underneath us
1780 while ((num_trans
&& transactions_flushed
< num_trans
) ||
1781 (!num_trans
&& written
< num_blocks
)) {
1783 if (jl
->j_len
== 0 || (jl
->j_state
& LIST_TOUCHED
) ||
1784 atomic_read(&jl
->j_commit_left
)
1785 || !(jl
->j_state
& LIST_DIRTY
)) {
1786 del_from_work_list(s
, jl
);
1789 ret
= write_one_transaction(s
, jl
, &chunk
);
1793 transactions_flushed
++;
1795 entry
= jl
->j_list
.next
;
1798 if (entry
== &journal
->j_journal_list
) {
1801 jl
= JOURNAL_LIST_ENTRY(entry
);
1803 /* don't bother with older transactions */
1804 if (jl
->j_trans_id
<= orig_trans_id
)
1808 write_chunk(&chunk
);
1812 mutex_unlock(&journal
->j_flush_mutex
);
1816 /* for o_sync and fsync heavy applications, they tend to use
1817 ** all the journa list slots with tiny transactions. These
1818 ** trigger lots and lots of calls to update the header block, which
1819 ** adds seeks and slows things down.
1821 ** This function tries to clear out a large chunk of the journal lists
1822 ** at once, which makes everything faster since only the newest journal
1823 ** list updates the header block
1825 static int flush_used_journal_lists(struct super_block
*s
,
1826 struct reiserfs_journal_list
*jl
)
1828 unsigned long len
= 0;
1829 unsigned long cur_len
;
1833 struct reiserfs_journal_list
*tjl
;
1834 struct reiserfs_journal_list
*flush_jl
;
1835 unsigned int trans_id
;
1836 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
1838 flush_jl
= tjl
= jl
;
1840 /* in data logging mode, try harder to flush a lot of blocks */
1841 if (reiserfs_data_log(s
))
1843 /* flush for 256 transactions or limit blocks, whichever comes first */
1844 for (i
= 0; i
< 256 && len
< limit
; i
++) {
1845 if (atomic_read(&tjl
->j_commit_left
) ||
1846 tjl
->j_trans_id
< jl
->j_trans_id
) {
1849 cur_len
= atomic_read(&tjl
->j_nonzerolen
);
1851 tjl
->j_state
&= ~LIST_TOUCHED
;
1855 if (tjl
->j_list
.next
== &journal
->j_journal_list
)
1857 tjl
= JOURNAL_LIST_ENTRY(tjl
->j_list
.next
);
1859 /* try to find a group of blocks we can flush across all the
1860 ** transactions, but only bother if we've actually spanned
1861 ** across multiple lists
1863 if (flush_jl
!= jl
) {
1864 ret
= kupdate_transactions(s
, jl
, &tjl
, &trans_id
, len
, i
);
1866 flush_journal_list(s
, flush_jl
, 1);
1871 ** removes any nodes in table with name block and dev as bh.
1872 ** only touchs the hnext and hprev pointers.
1874 void remove_journal_hash(struct super_block
*sb
,
1875 struct reiserfs_journal_cnode
**table
,
1876 struct reiserfs_journal_list
*jl
,
1877 unsigned long block
, int remove_freed
)
1879 struct reiserfs_journal_cnode
*cur
;
1880 struct reiserfs_journal_cnode
**head
;
1882 head
= &(journal_hash(table
, sb
, block
));
1888 if (cur
->blocknr
== block
&& cur
->sb
== sb
1889 && (jl
== NULL
|| jl
== cur
->jlist
)
1890 && (!test_bit(BLOCK_FREED
, &cur
->state
) || remove_freed
)) {
1892 cur
->hnext
->hprev
= cur
->hprev
;
1895 cur
->hprev
->hnext
= cur
->hnext
;
1902 if (cur
->bh
&& cur
->jlist
) /* anybody who clears the cur->bh will also dec the nonzerolen */
1903 atomic_dec(&(cur
->jlist
->j_nonzerolen
));
1911 static void free_journal_ram(struct super_block
*sb
)
1913 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
1914 kfree(journal
->j_current_jl
);
1915 journal
->j_num_lists
--;
1917 vfree(journal
->j_cnode_free_orig
);
1918 free_list_bitmaps(sb
, journal
->j_list_bitmap
);
1919 free_bitmap_nodes(sb
); /* must be after free_list_bitmaps */
1920 if (journal
->j_header_bh
) {
1921 brelse(journal
->j_header_bh
);
1923 /* j_header_bh is on the journal dev, make sure not to release the journal
1924 * dev until we brelse j_header_bh
1926 release_journal_dev(sb
, journal
);
1931 ** call on unmount. Only set error to 1 if you haven't made your way out
1932 ** of read_super() yet. Any other caller must keep error at 0.
1934 static int do_journal_release(struct reiserfs_transaction_handle
*th
,
1935 struct super_block
*sb
, int error
)
1937 struct reiserfs_transaction_handle myth
;
1939 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
1941 /* we only want to flush out transactions if we were called with error == 0
1943 if (!error
&& !(sb
->s_flags
& MS_RDONLY
)) {
1944 /* end the current trans */
1945 BUG_ON(!th
->t_trans_id
);
1946 do_journal_end(th
, sb
, 10, FLUSH_ALL
);
1948 /* make sure something gets logged to force our way into the flush code */
1949 if (!journal_join(&myth
, sb
, 1)) {
1950 reiserfs_prepare_for_journal(sb
,
1951 SB_BUFFER_WITH_SB(sb
),
1953 journal_mark_dirty(&myth
, sb
,
1954 SB_BUFFER_WITH_SB(sb
));
1955 do_journal_end(&myth
, sb
, 1, FLUSH_ALL
);
1960 /* this also catches errors during the do_journal_end above */
1961 if (!error
&& reiserfs_is_journal_aborted(journal
)) {
1962 memset(&myth
, 0, sizeof(myth
));
1963 if (!journal_join_abort(&myth
, sb
, 1)) {
1964 reiserfs_prepare_for_journal(sb
,
1965 SB_BUFFER_WITH_SB(sb
),
1967 journal_mark_dirty(&myth
, sb
,
1968 SB_BUFFER_WITH_SB(sb
));
1969 do_journal_end(&myth
, sb
, 1, FLUSH_ALL
);
1973 reiserfs_mounted_fs_count
--;
1974 /* wait for all commits to finish */
1975 cancel_delayed_work(&SB_JOURNAL(sb
)->j_work
);
1976 flush_workqueue(commit_wq
);
1977 if (!reiserfs_mounted_fs_count
) {
1978 destroy_workqueue(commit_wq
);
1982 free_journal_ram(sb
);
1988 ** call on unmount. flush all journal trans, release all alloc'd ram
1990 int journal_release(struct reiserfs_transaction_handle
*th
,
1991 struct super_block
*sb
)
1993 return do_journal_release(th
, sb
, 0);
1997 ** only call from an error condition inside reiserfs_read_super!
1999 int journal_release_error(struct reiserfs_transaction_handle
*th
,
2000 struct super_block
*sb
)
2002 return do_journal_release(th
, sb
, 1);
2005 /* compares description block with commit block. returns 1 if they differ, 0 if they are the same */
2006 static int journal_compare_desc_commit(struct super_block
*sb
,
2007 struct reiserfs_journal_desc
*desc
,
2008 struct reiserfs_journal_commit
*commit
)
2010 if (get_commit_trans_id(commit
) != get_desc_trans_id(desc
) ||
2011 get_commit_trans_len(commit
) != get_desc_trans_len(desc
) ||
2012 get_commit_trans_len(commit
) > SB_JOURNAL(sb
)->j_trans_max
||
2013 get_commit_trans_len(commit
) <= 0) {
2019 /* returns 0 if it did not find a description block
2020 ** returns -1 if it found a corrupt commit block
2021 ** returns 1 if both desc and commit were valid
2023 static int journal_transaction_is_valid(struct super_block
*sb
,
2024 struct buffer_head
*d_bh
,
2025 unsigned int *oldest_invalid_trans_id
,
2026 unsigned long *newest_mount_id
)
2028 struct reiserfs_journal_desc
*desc
;
2029 struct reiserfs_journal_commit
*commit
;
2030 struct buffer_head
*c_bh
;
2031 unsigned long offset
;
2036 desc
= (struct reiserfs_journal_desc
*)d_bh
->b_data
;
2037 if (get_desc_trans_len(desc
) > 0
2038 && !memcmp(get_journal_desc_magic(d_bh
), JOURNAL_DESC_MAGIC
, 8)) {
2039 if (oldest_invalid_trans_id
&& *oldest_invalid_trans_id
2040 && get_desc_trans_id(desc
) > *oldest_invalid_trans_id
) {
2041 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2042 "journal-986: transaction "
2043 "is valid returning because trans_id %d is greater than "
2044 "oldest_invalid %lu",
2045 get_desc_trans_id(desc
),
2046 *oldest_invalid_trans_id
);
2050 && *newest_mount_id
> get_desc_mount_id(desc
)) {
2051 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2052 "journal-1087: transaction "
2053 "is valid returning because mount_id %d is less than "
2054 "newest_mount_id %lu",
2055 get_desc_mount_id(desc
),
2059 if (get_desc_trans_len(desc
) > SB_JOURNAL(sb
)->j_trans_max
) {
2060 reiserfs_warning(sb
, "journal-2018",
2061 "Bad transaction length %d "
2062 "encountered, ignoring transaction",
2063 get_desc_trans_len(desc
));
2066 offset
= d_bh
->b_blocknr
- SB_ONDISK_JOURNAL_1st_BLOCK(sb
);
2068 /* ok, we have a journal description block, lets see if the transaction was valid */
2071 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2072 ((offset
+ get_desc_trans_len(desc
) +
2073 1) % SB_ONDISK_JOURNAL_SIZE(sb
)));
2076 commit
= (struct reiserfs_journal_commit
*)c_bh
->b_data
;
2077 if (journal_compare_desc_commit(sb
, desc
, commit
)) {
2078 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2079 "journal_transaction_is_valid, commit offset %ld had bad "
2080 "time %d or length %d",
2082 SB_ONDISK_JOURNAL_1st_BLOCK(sb
),
2083 get_commit_trans_id(commit
),
2084 get_commit_trans_len(commit
));
2086 if (oldest_invalid_trans_id
) {
2087 *oldest_invalid_trans_id
=
2088 get_desc_trans_id(desc
);
2089 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2091 "transaction_is_valid setting oldest invalid trans_id "
2093 get_desc_trans_id(desc
));
2098 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2099 "journal-1006: found valid "
2100 "transaction start offset %llu, len %d id %d",
2102 SB_ONDISK_JOURNAL_1st_BLOCK(sb
),
2103 get_desc_trans_len(desc
),
2104 get_desc_trans_id(desc
));
2111 static void brelse_array(struct buffer_head
**heads
, int num
)
2114 for (i
= 0; i
< num
; i
++) {
2120 ** given the start, and values for the oldest acceptable transactions,
2121 ** this either reads in a replays a transaction, or returns because the transaction
2122 ** is invalid, or too old.
2124 static int journal_read_transaction(struct super_block
*sb
,
2125 unsigned long cur_dblock
,
2126 unsigned long oldest_start
,
2127 unsigned int oldest_trans_id
,
2128 unsigned long newest_mount_id
)
2130 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
2131 struct reiserfs_journal_desc
*desc
;
2132 struct reiserfs_journal_commit
*commit
;
2133 unsigned int trans_id
= 0;
2134 struct buffer_head
*c_bh
;
2135 struct buffer_head
*d_bh
;
2136 struct buffer_head
**log_blocks
= NULL
;
2137 struct buffer_head
**real_blocks
= NULL
;
2138 unsigned int trans_offset
;
2142 d_bh
= journal_bread(sb
, cur_dblock
);
2145 desc
= (struct reiserfs_journal_desc
*)d_bh
->b_data
;
2146 trans_offset
= d_bh
->b_blocknr
- SB_ONDISK_JOURNAL_1st_BLOCK(sb
);
2147 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
, "journal-1037: "
2148 "journal_read_transaction, offset %llu, len %d mount_id %d",
2149 d_bh
->b_blocknr
- SB_ONDISK_JOURNAL_1st_BLOCK(sb
),
2150 get_desc_trans_len(desc
), get_desc_mount_id(desc
));
2151 if (get_desc_trans_id(desc
) < oldest_trans_id
) {
2152 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
, "journal-1039: "
2153 "journal_read_trans skipping because %lu is too old",
2155 SB_ONDISK_JOURNAL_1st_BLOCK(sb
));
2159 if (get_desc_mount_id(desc
) != newest_mount_id
) {
2160 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
, "journal-1146: "
2161 "journal_read_trans skipping because %d is != "
2162 "newest_mount_id %lu", get_desc_mount_id(desc
),
2167 c_bh
= journal_bread(sb
, SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2168 ((trans_offset
+ get_desc_trans_len(desc
) + 1) %
2169 SB_ONDISK_JOURNAL_SIZE(sb
)));
2174 commit
= (struct reiserfs_journal_commit
*)c_bh
->b_data
;
2175 if (journal_compare_desc_commit(sb
, desc
, commit
)) {
2176 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2177 "journal_read_transaction, "
2178 "commit offset %llu had bad time %d or length %d",
2180 SB_ONDISK_JOURNAL_1st_BLOCK(sb
),
2181 get_commit_trans_id(commit
),
2182 get_commit_trans_len(commit
));
2187 trans_id
= get_desc_trans_id(desc
);
2188 /* now we know we've got a good transaction, and it was inside the valid time ranges */
2189 log_blocks
= kmalloc(get_desc_trans_len(desc
) *
2190 sizeof(struct buffer_head
*), GFP_NOFS
);
2191 real_blocks
= kmalloc(get_desc_trans_len(desc
) *
2192 sizeof(struct buffer_head
*), GFP_NOFS
);
2193 if (!log_blocks
|| !real_blocks
) {
2198 reiserfs_warning(sb
, "journal-1169",
2199 "kmalloc failed, unable to mount FS");
2202 /* get all the buffer heads */
2203 trans_half
= journal_trans_half(sb
->s_blocksize
);
2204 for (i
= 0; i
< get_desc_trans_len(desc
); i
++) {
2207 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2209 i
) % SB_ONDISK_JOURNAL_SIZE(sb
));
2210 if (i
< trans_half
) {
2213 le32_to_cpu(desc
->j_realblock
[i
]));
2217 le32_to_cpu(commit
->
2218 j_realblock
[i
- trans_half
]));
2220 if (real_blocks
[i
]->b_blocknr
> SB_BLOCK_COUNT(sb
)) {
2221 reiserfs_warning(sb
, "journal-1207",
2222 "REPLAY FAILURE fsck required! "
2223 "Block to replay is outside of "
2227 /* make sure we don't try to replay onto log or reserved area */
2228 if (is_block_in_log_or_reserved_area
2229 (sb
, real_blocks
[i
]->b_blocknr
)) {
2230 reiserfs_warning(sb
, "journal-1204",
2231 "REPLAY FAILURE fsck required! "
2232 "Trying to replay onto a log block");
2234 brelse_array(log_blocks
, i
);
2235 brelse_array(real_blocks
, i
);
2243 /* read in the log blocks, memcpy to the corresponding real block */
2244 ll_rw_block(READ
, get_desc_trans_len(desc
), log_blocks
);
2245 for (i
= 0; i
< get_desc_trans_len(desc
); i
++) {
2246 wait_on_buffer(log_blocks
[i
]);
2247 if (!buffer_uptodate(log_blocks
[i
])) {
2248 reiserfs_warning(sb
, "journal-1212",
2249 "REPLAY FAILURE fsck required! "
2250 "buffer write failed");
2251 brelse_array(log_blocks
+ i
,
2252 get_desc_trans_len(desc
) - i
);
2253 brelse_array(real_blocks
, get_desc_trans_len(desc
));
2260 memcpy(real_blocks
[i
]->b_data
, log_blocks
[i
]->b_data
,
2261 real_blocks
[i
]->b_size
);
2262 set_buffer_uptodate(real_blocks
[i
]);
2263 brelse(log_blocks
[i
]);
2265 /* flush out the real blocks */
2266 for (i
= 0; i
< get_desc_trans_len(desc
); i
++) {
2267 set_buffer_dirty(real_blocks
[i
]);
2268 ll_rw_block(SWRITE
, 1, real_blocks
+ i
);
2270 for (i
= 0; i
< get_desc_trans_len(desc
); i
++) {
2271 wait_on_buffer(real_blocks
[i
]);
2272 if (!buffer_uptodate(real_blocks
[i
])) {
2273 reiserfs_warning(sb
, "journal-1226",
2274 "REPLAY FAILURE, fsck required! "
2275 "buffer write failed");
2276 brelse_array(real_blocks
+ i
,
2277 get_desc_trans_len(desc
) - i
);
2284 brelse(real_blocks
[i
]);
2287 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2288 ((trans_offset
+ get_desc_trans_len(desc
) +
2289 2) % SB_ONDISK_JOURNAL_SIZE(sb
));
2290 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2291 "journal-1095: setting journal " "start to offset %ld",
2292 cur_dblock
- SB_ONDISK_JOURNAL_1st_BLOCK(sb
));
2294 /* init starting values for the first transaction, in case this is the last transaction to be replayed. */
2295 journal
->j_start
= cur_dblock
- SB_ONDISK_JOURNAL_1st_BLOCK(sb
);
2296 journal
->j_last_flush_trans_id
= trans_id
;
2297 journal
->j_trans_id
= trans_id
+ 1;
2298 /* check for trans_id overflow */
2299 if (journal
->j_trans_id
== 0)
2300 journal
->j_trans_id
= 10;
2308 /* This function reads blocks starting from block and to max_block of bufsize
2309 size (but no more than BUFNR blocks at a time). This proved to improve
2310 mounting speed on self-rebuilding raid5 arrays at least.
2311 Right now it is only used from journal code. But later we might use it
2313 Note: Do not use journal_getblk/sb_getblk functions here! */
2314 static struct buffer_head
*reiserfs_breada(struct block_device
*dev
,
2315 b_blocknr_t block
, int bufsize
,
2316 b_blocknr_t max_block
)
2318 struct buffer_head
*bhlist
[BUFNR
];
2319 unsigned int blocks
= BUFNR
;
2320 struct buffer_head
*bh
;
2323 bh
= __getblk(dev
, block
, bufsize
);
2324 if (buffer_uptodate(bh
))
2327 if (block
+ BUFNR
> max_block
) {
2328 blocks
= max_block
- block
;
2332 for (i
= 1; i
< blocks
; i
++) {
2333 bh
= __getblk(dev
, block
+ i
, bufsize
);
2334 if (buffer_uptodate(bh
)) {
2340 ll_rw_block(READ
, j
, bhlist
);
2341 for (i
= 1; i
< j
; i
++)
2345 if (buffer_uptodate(bh
))
2352 ** read and replay the log
2353 ** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
2354 ** transaction. This tests that before finding all the transactions in the log, which makes normal mount times fast.
2356 ** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
2358 ** On exit, it sets things up so the first transaction will work correctly.
2360 static int journal_read(struct super_block
*sb
)
2362 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
2363 struct reiserfs_journal_desc
*desc
;
2364 unsigned int oldest_trans_id
= 0;
2365 unsigned int oldest_invalid_trans_id
= 0;
2367 unsigned long oldest_start
= 0;
2368 unsigned long cur_dblock
= 0;
2369 unsigned long newest_mount_id
= 9;
2370 struct buffer_head
*d_bh
;
2371 struct reiserfs_journal_header
*jh
;
2372 int valid_journal_header
= 0;
2373 int replay_count
= 0;
2374 int continue_replay
= 1;
2376 char b
[BDEVNAME_SIZE
];
2378 cur_dblock
= SB_ONDISK_JOURNAL_1st_BLOCK(sb
);
2379 reiserfs_info(sb
, "checking transaction log (%s)\n",
2380 bdevname(journal
->j_dev_bd
, b
));
2381 start
= get_seconds();
2383 /* step 1, read in the journal header block. Check the transaction it says
2384 ** is the first unflushed, and if that transaction is not valid,
2387 journal
->j_header_bh
= journal_bread(sb
,
2388 SB_ONDISK_JOURNAL_1st_BLOCK(sb
)
2389 + SB_ONDISK_JOURNAL_SIZE(sb
));
2390 if (!journal
->j_header_bh
) {
2393 jh
= (struct reiserfs_journal_header
*)(journal
->j_header_bh
->b_data
);
2394 if (le32_to_cpu(jh
->j_first_unflushed_offset
) <
2395 SB_ONDISK_JOURNAL_SIZE(sb
)
2396 && le32_to_cpu(jh
->j_last_flush_trans_id
) > 0) {
2398 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2399 le32_to_cpu(jh
->j_first_unflushed_offset
);
2400 oldest_trans_id
= le32_to_cpu(jh
->j_last_flush_trans_id
) + 1;
2401 newest_mount_id
= le32_to_cpu(jh
->j_mount_id
);
2402 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2403 "journal-1153: found in "
2404 "header: first_unflushed_offset %d, last_flushed_trans_id "
2405 "%lu", le32_to_cpu(jh
->j_first_unflushed_offset
),
2406 le32_to_cpu(jh
->j_last_flush_trans_id
));
2407 valid_journal_header
= 1;
2409 /* now, we try to read the first unflushed offset. If it is not valid,
2410 ** there is nothing more we can do, and it makes no sense to read
2411 ** through the whole log.
2415 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2416 le32_to_cpu(jh
->j_first_unflushed_offset
));
2417 ret
= journal_transaction_is_valid(sb
, d_bh
, NULL
, NULL
);
2419 continue_replay
= 0;
2422 goto start_log_replay
;
2425 if (continue_replay
&& bdev_read_only(sb
->s_bdev
)) {
2426 reiserfs_warning(sb
, "clm-2076",
2427 "device is readonly, unable to replay log");
2431 /* ok, there are transactions that need to be replayed. start with the first log block, find
2432 ** all the valid transactions, and pick out the oldest.
2434 while (continue_replay
2436 (SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2437 SB_ONDISK_JOURNAL_SIZE(sb
))) {
2438 /* Note that it is required for blocksize of primary fs device and journal
2439 device to be the same */
2441 reiserfs_breada(journal
->j_dev_bd
, cur_dblock
,
2443 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2444 SB_ONDISK_JOURNAL_SIZE(sb
));
2446 journal_transaction_is_valid(sb
, d_bh
,
2447 &oldest_invalid_trans_id
,
2450 desc
= (struct reiserfs_journal_desc
*)d_bh
->b_data
;
2451 if (oldest_start
== 0) { /* init all oldest_ values */
2452 oldest_trans_id
= get_desc_trans_id(desc
);
2453 oldest_start
= d_bh
->b_blocknr
;
2454 newest_mount_id
= get_desc_mount_id(desc
);
2455 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2456 "journal-1179: Setting "
2457 "oldest_start to offset %llu, trans_id %lu",
2459 SB_ONDISK_JOURNAL_1st_BLOCK
2460 (sb
), oldest_trans_id
);
2461 } else if (oldest_trans_id
> get_desc_trans_id(desc
)) {
2462 /* one we just read was older */
2463 oldest_trans_id
= get_desc_trans_id(desc
);
2464 oldest_start
= d_bh
->b_blocknr
;
2465 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2466 "journal-1180: Resetting "
2467 "oldest_start to offset %lu, trans_id %lu",
2469 SB_ONDISK_JOURNAL_1st_BLOCK
2470 (sb
), oldest_trans_id
);
2472 if (newest_mount_id
< get_desc_mount_id(desc
)) {
2473 newest_mount_id
= get_desc_mount_id(desc
);
2474 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2475 "journal-1299: Setting "
2476 "newest_mount_id to %d",
2477 get_desc_mount_id(desc
));
2479 cur_dblock
+= get_desc_trans_len(desc
) + 2;
2487 cur_dblock
= oldest_start
;
2488 if (oldest_trans_id
) {
2489 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2490 "journal-1206: Starting replay "
2491 "from offset %llu, trans_id %lu",
2492 cur_dblock
- SB_ONDISK_JOURNAL_1st_BLOCK(sb
),
2497 while (continue_replay
&& oldest_trans_id
> 0) {
2499 journal_read_transaction(sb
, cur_dblock
, oldest_start
,
2500 oldest_trans_id
, newest_mount_id
);
2503 } else if (ret
!= 0) {
2507 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) + journal
->j_start
;
2509 if (cur_dblock
== oldest_start
)
2513 if (oldest_trans_id
== 0) {
2514 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2515 "journal-1225: No valid " "transactions found");
2517 /* j_start does not get set correctly if we don't replay any transactions.
2518 ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
2519 ** copy the trans_id from the header
2521 if (valid_journal_header
&& replay_count
== 0) {
2522 journal
->j_start
= le32_to_cpu(jh
->j_first_unflushed_offset
);
2523 journal
->j_trans_id
=
2524 le32_to_cpu(jh
->j_last_flush_trans_id
) + 1;
2525 /* check for trans_id overflow */
2526 if (journal
->j_trans_id
== 0)
2527 journal
->j_trans_id
= 10;
2528 journal
->j_last_flush_trans_id
=
2529 le32_to_cpu(jh
->j_last_flush_trans_id
);
2530 journal
->j_mount_id
= le32_to_cpu(jh
->j_mount_id
) + 1;
2532 journal
->j_mount_id
= newest_mount_id
+ 1;
2534 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
, "journal-1299: Setting "
2535 "newest_mount_id to %lu", journal
->j_mount_id
);
2536 journal
->j_first_unflushed_offset
= journal
->j_start
;
2537 if (replay_count
> 0) {
2539 "replayed %d transactions in %lu seconds\n",
2540 replay_count
, get_seconds() - start
);
2542 if (!bdev_read_only(sb
->s_bdev
) &&
2543 _update_journal_header_block(sb
, journal
->j_start
,
2544 journal
->j_last_flush_trans_id
)) {
2545 /* replay failed, caller must call free_journal_ram and abort
2553 static struct reiserfs_journal_list
*alloc_journal_list(struct super_block
*s
)
2555 struct reiserfs_journal_list
*jl
;
2556 jl
= kzalloc(sizeof(struct reiserfs_journal_list
),
2557 GFP_NOFS
| __GFP_NOFAIL
);
2558 INIT_LIST_HEAD(&jl
->j_list
);
2559 INIT_LIST_HEAD(&jl
->j_working_list
);
2560 INIT_LIST_HEAD(&jl
->j_tail_bh_list
);
2561 INIT_LIST_HEAD(&jl
->j_bh_list
);
2562 mutex_init(&jl
->j_commit_mutex
);
2563 SB_JOURNAL(s
)->j_num_lists
++;
2564 get_journal_list(jl
);
2568 static void journal_list_init(struct super_block
*sb
)
2570 SB_JOURNAL(sb
)->j_current_jl
= alloc_journal_list(sb
);
2573 static int release_journal_dev(struct super_block
*super
,
2574 struct reiserfs_journal
*journal
)
2580 if (journal
->j_dev_bd
!= NULL
) {
2581 if (journal
->j_dev_bd
->bd_dev
!= super
->s_dev
)
2582 bd_release(journal
->j_dev_bd
);
2583 result
= blkdev_put(journal
->j_dev_bd
, journal
->j_dev_mode
);
2584 journal
->j_dev_bd
= NULL
;
2588 reiserfs_warning(super
, "sh-457",
2589 "Cannot release journal device: %i", result
);
2594 static int journal_init_dev(struct super_block
*super
,
2595 struct reiserfs_journal
*journal
,
2596 const char *jdev_name
)
2600 fmode_t blkdev_mode
= FMODE_READ
| FMODE_WRITE
;
2601 char b
[BDEVNAME_SIZE
];
2605 journal
->j_dev_bd
= NULL
;
2606 jdev
= SB_ONDISK_JOURNAL_DEVICE(super
) ?
2607 new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super
)) : super
->s_dev
;
2609 if (bdev_read_only(super
->s_bdev
))
2610 blkdev_mode
= FMODE_READ
;
2612 /* there is no "jdev" option and journal is on separate device */
2613 if ((!jdev_name
|| !jdev_name
[0])) {
2614 journal
->j_dev_bd
= open_by_devnum(jdev
, blkdev_mode
);
2615 journal
->j_dev_mode
= blkdev_mode
;
2616 if (IS_ERR(journal
->j_dev_bd
)) {
2617 result
= PTR_ERR(journal
->j_dev_bd
);
2618 journal
->j_dev_bd
= NULL
;
2619 reiserfs_warning(super
, "sh-458",
2620 "cannot init journal device '%s': %i",
2621 __bdevname(jdev
, b
), result
);
2623 } else if (jdev
!= super
->s_dev
) {
2624 result
= bd_claim(journal
->j_dev_bd
, journal
);
2626 blkdev_put(journal
->j_dev_bd
, blkdev_mode
);
2630 set_blocksize(journal
->j_dev_bd
, super
->s_blocksize
);
2636 journal
->j_dev_mode
= blkdev_mode
;
2637 journal
->j_dev_bd
= open_bdev_exclusive(jdev_name
,
2638 blkdev_mode
, journal
);
2639 if (IS_ERR(journal
->j_dev_bd
)) {
2640 result
= PTR_ERR(journal
->j_dev_bd
);
2641 journal
->j_dev_bd
= NULL
;
2642 reiserfs_warning(super
,
2643 "journal_init_dev: Cannot open '%s': %i",
2648 set_blocksize(journal
->j_dev_bd
, super
->s_blocksize
);
2649 reiserfs_info(super
,
2650 "journal_init_dev: journal device: %s\n",
2651 bdevname(journal
->j_dev_bd
, b
));
2656 * When creating/tuning a file system user can assign some
2657 * journal params within boundaries which depend on the ratio
2658 * blocksize/standard_blocksize.
2660 * For blocks >= standard_blocksize transaction size should
2661 * be not less then JOURNAL_TRANS_MIN_DEFAULT, and not more
2662 * then JOURNAL_TRANS_MAX_DEFAULT.
2664 * For blocks < standard_blocksize these boundaries should be
2665 * decreased proportionally.
2667 #define REISERFS_STANDARD_BLKSIZE (4096)
2669 static int check_advise_trans_params(struct super_block
*sb
,
2670 struct reiserfs_journal
*journal
)
2672 if (journal
->j_trans_max
) {
2673 /* Non-default journal params.
2674 Do sanity check for them. */
2676 if (sb
->s_blocksize
< REISERFS_STANDARD_BLKSIZE
)
2677 ratio
= REISERFS_STANDARD_BLKSIZE
/ sb
->s_blocksize
;
2679 if (journal
->j_trans_max
> JOURNAL_TRANS_MAX_DEFAULT
/ ratio
||
2680 journal
->j_trans_max
< JOURNAL_TRANS_MIN_DEFAULT
/ ratio
||
2681 SB_ONDISK_JOURNAL_SIZE(sb
) / journal
->j_trans_max
<
2682 JOURNAL_MIN_RATIO
) {
2683 reiserfs_warning(sb
, "sh-462",
2684 "bad transaction max size (%u). "
2685 "FSCK?", journal
->j_trans_max
);
2688 if (journal
->j_max_batch
!= (journal
->j_trans_max
) *
2689 JOURNAL_MAX_BATCH_DEFAULT
/JOURNAL_TRANS_MAX_DEFAULT
) {
2690 reiserfs_warning(sb
, "sh-463",
2691 "bad transaction max batch (%u). "
2692 "FSCK?", journal
->j_max_batch
);
2696 /* Default journal params.
2697 The file system was created by old version
2698 of mkreiserfs, so some fields contain zeros,
2699 and we need to advise proper values for them */
2700 if (sb
->s_blocksize
!= REISERFS_STANDARD_BLKSIZE
) {
2701 reiserfs_warning(sb
, "sh-464", "bad blocksize (%u)",
2705 journal
->j_trans_max
= JOURNAL_TRANS_MAX_DEFAULT
;
2706 journal
->j_max_batch
= JOURNAL_MAX_BATCH_DEFAULT
;
2707 journal
->j_max_commit_age
= JOURNAL_MAX_COMMIT_AGE
;
2713 ** must be called once on fs mount. calls journal_read for you
2715 int journal_init(struct super_block
*sb
, const char *j_dev_name
,
2716 int old_format
, unsigned int commit_max_age
)
2718 int num_cnodes
= SB_ONDISK_JOURNAL_SIZE(sb
) * 2;
2719 struct buffer_head
*bhjh
;
2720 struct reiserfs_super_block
*rs
;
2721 struct reiserfs_journal_header
*jh
;
2722 struct reiserfs_journal
*journal
;
2723 struct reiserfs_journal_list
*jl
;
2724 char b
[BDEVNAME_SIZE
];
2726 journal
= SB_JOURNAL(sb
) = vmalloc(sizeof(struct reiserfs_journal
));
2728 reiserfs_warning(sb
, "journal-1256",
2729 "unable to get memory for journal structure");
2732 memset(journal
, 0, sizeof(struct reiserfs_journal
));
2733 INIT_LIST_HEAD(&journal
->j_bitmap_nodes
);
2734 INIT_LIST_HEAD(&journal
->j_prealloc_list
);
2735 INIT_LIST_HEAD(&journal
->j_working_list
);
2736 INIT_LIST_HEAD(&journal
->j_journal_list
);
2737 journal
->j_persistent_trans
= 0;
2738 if (reiserfs_allocate_list_bitmaps(sb
,
2739 journal
->j_list_bitmap
,
2740 reiserfs_bmap_count(sb
)))
2741 goto free_and_return
;
2742 allocate_bitmap_nodes(sb
);
2744 /* reserved for journal area support */
2745 SB_JOURNAL_1st_RESERVED_BLOCK(sb
) = (old_format
?
2746 REISERFS_OLD_DISK_OFFSET_IN_BYTES
2748 reiserfs_bmap_count(sb
) +
2750 REISERFS_DISK_OFFSET_IN_BYTES
/
2751 sb
->s_blocksize
+ 2);
2753 /* Sanity check to see is the standard journal fitting withing first bitmap
2754 (actual for small blocksizes) */
2755 if (!SB_ONDISK_JOURNAL_DEVICE(sb
) &&
2756 (SB_JOURNAL_1st_RESERVED_BLOCK(sb
) +
2757 SB_ONDISK_JOURNAL_SIZE(sb
) > sb
->s_blocksize
* 8)) {
2758 reiserfs_warning(sb
, "journal-1393",
2759 "journal does not fit for area addressed "
2760 "by first of bitmap blocks. It starts at "
2761 "%u and its size is %u. Block size %ld",
2762 SB_JOURNAL_1st_RESERVED_BLOCK(sb
),
2763 SB_ONDISK_JOURNAL_SIZE(sb
),
2765 goto free_and_return
;
2768 if (journal_init_dev(sb
, journal
, j_dev_name
) != 0) {
2769 reiserfs_warning(sb
, "sh-462",
2770 "unable to initialize jornal device");
2771 goto free_and_return
;
2774 rs
= SB_DISK_SUPER_BLOCK(sb
);
2776 /* read journal header */
2777 bhjh
= journal_bread(sb
,
2778 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2779 SB_ONDISK_JOURNAL_SIZE(sb
));
2781 reiserfs_warning(sb
, "sh-459",
2782 "unable to read journal header");
2783 goto free_and_return
;
2785 jh
= (struct reiserfs_journal_header
*)(bhjh
->b_data
);
2787 /* make sure that journal matches to the super block */
2788 if (is_reiserfs_jr(rs
)
2789 && (le32_to_cpu(jh
->jh_journal
.jp_journal_magic
) !=
2790 sb_jp_journal_magic(rs
))) {
2791 reiserfs_warning(sb
, "sh-460",
2792 "journal header magic %x (device %s) does "
2793 "not match to magic found in super block %x",
2794 jh
->jh_journal
.jp_journal_magic
,
2795 bdevname(journal
->j_dev_bd
, b
),
2796 sb_jp_journal_magic(rs
));
2798 goto free_and_return
;
2801 journal
->j_trans_max
= le32_to_cpu(jh
->jh_journal
.jp_journal_trans_max
);
2802 journal
->j_max_batch
= le32_to_cpu(jh
->jh_journal
.jp_journal_max_batch
);
2803 journal
->j_max_commit_age
=
2804 le32_to_cpu(jh
->jh_journal
.jp_journal_max_commit_age
);
2805 journal
->j_max_trans_age
= JOURNAL_MAX_TRANS_AGE
;
2807 if (check_advise_trans_params(sb
, journal
) != 0)
2808 goto free_and_return
;
2809 journal
->j_default_max_commit_age
= journal
->j_max_commit_age
;
2811 if (commit_max_age
!= 0) {
2812 journal
->j_max_commit_age
= commit_max_age
;
2813 journal
->j_max_trans_age
= commit_max_age
;
2816 reiserfs_info(sb
, "journal params: device %s, size %u, "
2817 "journal first block %u, max trans len %u, max batch %u, "
2818 "max commit age %u, max trans age %u\n",
2819 bdevname(journal
->j_dev_bd
, b
),
2820 SB_ONDISK_JOURNAL_SIZE(sb
),
2821 SB_ONDISK_JOURNAL_1st_BLOCK(sb
),
2822 journal
->j_trans_max
,
2823 journal
->j_max_batch
,
2824 journal
->j_max_commit_age
, journal
->j_max_trans_age
);
2828 journal
->j_list_bitmap_index
= 0;
2829 journal_list_init(sb
);
2831 memset(journal
->j_list_hash_table
, 0,
2832 JOURNAL_HASH_SIZE
* sizeof(struct reiserfs_journal_cnode
*));
2834 INIT_LIST_HEAD(&journal
->j_dirty_buffers
);
2835 spin_lock_init(&journal
->j_dirty_buffers_lock
);
2837 journal
->j_start
= 0;
2839 journal
->j_len_alloc
= 0;
2840 atomic_set(&(journal
->j_wcount
), 0);
2841 atomic_set(&(journal
->j_async_throttle
), 0);
2842 journal
->j_bcount
= 0;
2843 journal
->j_trans_start_time
= 0;
2844 journal
->j_last
= NULL
;
2845 journal
->j_first
= NULL
;
2846 init_waitqueue_head(&(journal
->j_join_wait
));
2847 mutex_init(&journal
->j_mutex
);
2848 mutex_init(&journal
->j_flush_mutex
);
2850 journal
->j_trans_id
= 10;
2851 journal
->j_mount_id
= 10;
2852 journal
->j_state
= 0;
2853 atomic_set(&(journal
->j_jlock
), 0);
2854 journal
->j_cnode_free_list
= allocate_cnodes(num_cnodes
);
2855 journal
->j_cnode_free_orig
= journal
->j_cnode_free_list
;
2856 journal
->j_cnode_free
= journal
->j_cnode_free_list
? num_cnodes
: 0;
2857 journal
->j_cnode_used
= 0;
2858 journal
->j_must_wait
= 0;
2860 if (journal
->j_cnode_free
== 0) {
2861 reiserfs_warning(sb
, "journal-2004", "Journal cnode memory "
2862 "allocation failed (%ld bytes). Journal is "
2863 "too large for available memory. Usually "
2864 "this is due to a journal that is too large.",
2865 sizeof (struct reiserfs_journal_cnode
) * num_cnodes
);
2866 goto free_and_return
;
2869 init_journal_hash(sb
);
2870 jl
= journal
->j_current_jl
;
2871 jl
->j_list_bitmap
= get_list_bitmap(sb
, jl
);
2872 if (!jl
->j_list_bitmap
) {
2873 reiserfs_warning(sb
, "journal-2005",
2874 "get_list_bitmap failed for journal list 0");
2875 goto free_and_return
;
2877 if (journal_read(sb
) < 0) {
2878 reiserfs_warning(sb
, "reiserfs-2006",
2879 "Replay Failure, unable to mount");
2880 goto free_and_return
;
2883 reiserfs_mounted_fs_count
++;
2884 if (reiserfs_mounted_fs_count
<= 1)
2885 commit_wq
= create_workqueue("reiserfs");
2887 INIT_DELAYED_WORK(&journal
->j_work
, flush_async_commits
);
2888 journal
->j_work_sb
= sb
;
2891 free_journal_ram(sb
);
2896 ** test for a polite end of the current transaction. Used by file_write, and should
2897 ** be used by delete to make sure they don't write more than can fit inside a single
2900 int journal_transaction_should_end(struct reiserfs_transaction_handle
*th
,
2903 struct reiserfs_journal
*journal
= SB_JOURNAL(th
->t_super
);
2904 time_t now
= get_seconds();
2905 /* cannot restart while nested */
2906 BUG_ON(!th
->t_trans_id
);
2907 if (th
->t_refcount
> 1)
2909 if (journal
->j_must_wait
> 0 ||
2910 (journal
->j_len_alloc
+ new_alloc
) >= journal
->j_max_batch
||
2911 atomic_read(&(journal
->j_jlock
)) ||
2912 (now
- journal
->j_trans_start_time
) > journal
->j_max_trans_age
||
2913 journal
->j_cnode_free
< (journal
->j_trans_max
* 3)) {
2916 /* protected by the BKL here */
2917 journal
->j_len_alloc
+= new_alloc
;
2918 th
->t_blocks_allocated
+= new_alloc
;
2922 /* this must be called inside a transaction, and requires the
2923 ** kernel_lock to be held
2925 void reiserfs_block_writes(struct reiserfs_transaction_handle
*th
)
2927 struct reiserfs_journal
*journal
= SB_JOURNAL(th
->t_super
);
2928 BUG_ON(!th
->t_trans_id
);
2929 journal
->j_must_wait
= 1;
2930 set_bit(J_WRITERS_BLOCKED
, &journal
->j_state
);
2934 /* this must be called without a transaction started, and does not
2937 void reiserfs_allow_writes(struct super_block
*s
)
2939 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
2940 clear_bit(J_WRITERS_BLOCKED
, &journal
->j_state
);
2941 wake_up(&journal
->j_join_wait
);
2944 /* this must be called without a transaction started, and does not
2947 void reiserfs_wait_on_write_block(struct super_block
*s
)
2949 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
2950 wait_event(journal
->j_join_wait
,
2951 !test_bit(J_WRITERS_BLOCKED
, &journal
->j_state
));
2954 static void queue_log_writer(struct super_block
*s
)
2957 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
2958 set_bit(J_WRITERS_QUEUED
, &journal
->j_state
);
2961 * we don't want to use wait_event here because
2962 * we only want to wait once.
2964 init_waitqueue_entry(&wait
, current
);
2965 add_wait_queue(&journal
->j_join_wait
, &wait
);
2966 set_current_state(TASK_UNINTERRUPTIBLE
);
2967 if (test_bit(J_WRITERS_QUEUED
, &journal
->j_state
))
2969 __set_current_state(TASK_RUNNING
);
2970 remove_wait_queue(&journal
->j_join_wait
, &wait
);
2973 static void wake_queued_writers(struct super_block
*s
)
2975 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
2976 if (test_and_clear_bit(J_WRITERS_QUEUED
, &journal
->j_state
))
2977 wake_up(&journal
->j_join_wait
);
2980 static void let_transaction_grow(struct super_block
*sb
, unsigned int trans_id
)
2982 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
2983 unsigned long bcount
= journal
->j_bcount
;
2985 schedule_timeout_uninterruptible(1);
2986 journal
->j_current_jl
->j_state
|= LIST_COMMIT_PENDING
;
2987 while ((atomic_read(&journal
->j_wcount
) > 0 ||
2988 atomic_read(&journal
->j_jlock
)) &&
2989 journal
->j_trans_id
== trans_id
) {
2990 queue_log_writer(sb
);
2992 if (journal
->j_trans_id
!= trans_id
)
2994 if (bcount
== journal
->j_bcount
)
2996 bcount
= journal
->j_bcount
;
3000 /* join == true if you must join an existing transaction.
3001 ** join == false if you can deal with waiting for others to finish
3003 ** this will block until the transaction is joinable. send the number of blocks you
3004 ** expect to use in nblocks.
3006 static int do_journal_begin_r(struct reiserfs_transaction_handle
*th
,
3007 struct super_block
*sb
, unsigned long nblocks
,
3010 time_t now
= get_seconds();
3011 unsigned int old_trans_id
;
3012 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3013 struct reiserfs_transaction_handle myth
;
3014 int sched_count
= 0;
3017 reiserfs_check_lock_depth(sb
, "journal_begin");
3018 BUG_ON(nblocks
> journal
->j_trans_max
);
3020 PROC_INFO_INC(sb
, journal
.journal_being
);
3021 /* set here for journal_join */
3027 if (join
!= JBEGIN_ABORT
&& reiserfs_is_journal_aborted(journal
)) {
3029 retval
= journal
->j_errno
;
3032 journal
->j_bcount
++;
3034 if (test_bit(J_WRITERS_BLOCKED
, &journal
->j_state
)) {
3036 reiserfs_wait_on_write_block(sb
);
3037 PROC_INFO_INC(sb
, journal
.journal_relock_writers
);
3040 now
= get_seconds();
3042 /* if there is no room in the journal OR
3043 ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
3044 ** we don't sleep if there aren't other writers
3047 if ((!join
&& journal
->j_must_wait
> 0) ||
3049 && (journal
->j_len_alloc
+ nblocks
+ 2) >= journal
->j_max_batch
)
3050 || (!join
&& atomic_read(&journal
->j_wcount
) > 0
3051 && journal
->j_trans_start_time
> 0
3052 && (now
- journal
->j_trans_start_time
) >
3053 journal
->j_max_trans_age
) || (!join
3054 && atomic_read(&journal
->j_jlock
))
3055 || (!join
&& journal
->j_cnode_free
< (journal
->j_trans_max
* 3))) {
3057 old_trans_id
= journal
->j_trans_id
;
3058 unlock_journal(sb
); /* allow others to finish this transaction */
3060 if (!join
&& (journal
->j_len_alloc
+ nblocks
+ 2) >=
3061 journal
->j_max_batch
&&
3062 ((journal
->j_len
+ nblocks
+ 2) * 100) <
3063 (journal
->j_len_alloc
* 75)) {
3064 if (atomic_read(&journal
->j_wcount
) > 10) {
3066 queue_log_writer(sb
);
3070 /* don't mess with joining the transaction if all we have to do is
3071 * wait for someone else to do a commit
3073 if (atomic_read(&journal
->j_jlock
)) {
3074 while (journal
->j_trans_id
== old_trans_id
&&
3075 atomic_read(&journal
->j_jlock
)) {
3076 queue_log_writer(sb
);
3080 retval
= journal_join(&myth
, sb
, 1);
3084 /* someone might have ended the transaction while we joined */
3085 if (old_trans_id
!= journal
->j_trans_id
) {
3086 retval
= do_journal_end(&myth
, sb
, 1, 0);
3088 retval
= do_journal_end(&myth
, sb
, 1, COMMIT_NOW
);
3094 PROC_INFO_INC(sb
, journal
.journal_relock_wcount
);
3097 /* we are the first writer, set trans_id */
3098 if (journal
->j_trans_start_time
== 0) {
3099 journal
->j_trans_start_time
= get_seconds();
3101 atomic_inc(&(journal
->j_wcount
));
3102 journal
->j_len_alloc
+= nblocks
;
3103 th
->t_blocks_logged
= 0;
3104 th
->t_blocks_allocated
= nblocks
;
3105 th
->t_trans_id
= journal
->j_trans_id
;
3107 INIT_LIST_HEAD(&th
->t_list
);
3112 memset(th
, 0, sizeof(*th
));
3113 /* Re-set th->t_super, so we can properly keep track of how many
3114 * persistent transactions there are. We need to do this so if this
3115 * call is part of a failed restart_transaction, we can free it later */
3120 struct reiserfs_transaction_handle
*reiserfs_persistent_transaction(struct
3126 struct reiserfs_transaction_handle
*th
;
3128 /* if we're nesting into an existing transaction. It will be
3129 ** persistent on its own
3131 if (reiserfs_transaction_running(s
)) {
3132 th
= current
->journal_info
;
3134 BUG_ON(th
->t_refcount
< 2);
3138 th
= kmalloc(sizeof(struct reiserfs_transaction_handle
), GFP_NOFS
);
3141 ret
= journal_begin(th
, s
, nblocks
);
3147 SB_JOURNAL(s
)->j_persistent_trans
++;
3151 int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle
*th
)
3153 struct super_block
*s
= th
->t_super
;
3156 ret
= journal_end(th
, th
->t_super
, th
->t_blocks_allocated
);
3159 if (th
->t_refcount
== 0) {
3160 SB_JOURNAL(s
)->j_persistent_trans
--;
3166 static int journal_join(struct reiserfs_transaction_handle
*th
,
3167 struct super_block
*sb
, unsigned long nblocks
)
3169 struct reiserfs_transaction_handle
*cur_th
= current
->journal_info
;
3171 /* this keeps do_journal_end from NULLing out the current->journal_info
3174 th
->t_handle_save
= cur_th
;
3175 BUG_ON(cur_th
&& cur_th
->t_refcount
> 1);
3176 return do_journal_begin_r(th
, sb
, nblocks
, JBEGIN_JOIN
);
3179 int journal_join_abort(struct reiserfs_transaction_handle
*th
,
3180 struct super_block
*sb
, unsigned long nblocks
)
3182 struct reiserfs_transaction_handle
*cur_th
= current
->journal_info
;
3184 /* this keeps do_journal_end from NULLing out the current->journal_info
3187 th
->t_handle_save
= cur_th
;
3188 BUG_ON(cur_th
&& cur_th
->t_refcount
> 1);
3189 return do_journal_begin_r(th
, sb
, nblocks
, JBEGIN_ABORT
);
3192 int journal_begin(struct reiserfs_transaction_handle
*th
,
3193 struct super_block
*sb
, unsigned long nblocks
)
3195 struct reiserfs_transaction_handle
*cur_th
= current
->journal_info
;
3198 th
->t_handle_save
= NULL
;
3200 /* we are nesting into the current transaction */
3201 if (cur_th
->t_super
== sb
) {
3202 BUG_ON(!cur_th
->t_refcount
);
3203 cur_th
->t_refcount
++;
3204 memcpy(th
, cur_th
, sizeof(*th
));
3205 if (th
->t_refcount
<= 1)
3206 reiserfs_warning(sb
, "reiserfs-2005",
3207 "BAD: refcount <= 1, but "
3208 "journal_info != 0");
3211 /* we've ended up with a handle from a different filesystem.
3212 ** save it and restore on journal_end. This should never
3215 reiserfs_warning(sb
, "clm-2100",
3216 "nesting info a different FS");
3217 th
->t_handle_save
= current
->journal_info
;
3218 current
->journal_info
= th
;
3221 current
->journal_info
= th
;
3223 ret
= do_journal_begin_r(th
, sb
, nblocks
, JBEGIN_REG
);
3224 BUG_ON(current
->journal_info
!= th
);
3226 /* I guess this boils down to being the reciprocal of clm-2100 above.
3227 * If do_journal_begin_r fails, we need to put it back, since journal_end
3228 * won't be called to do it. */
3230 current
->journal_info
= th
->t_handle_save
;
3232 BUG_ON(!th
->t_refcount
);
3238 ** puts bh into the current transaction. If it was already there, reorders removes the
3239 ** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
3241 ** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
3242 ** transaction is committed.
3244 ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
3246 int journal_mark_dirty(struct reiserfs_transaction_handle
*th
,
3247 struct super_block
*sb
, struct buffer_head
*bh
)
3249 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3250 struct reiserfs_journal_cnode
*cn
= NULL
;
3251 int count_already_incd
= 0;
3253 BUG_ON(!th
->t_trans_id
);
3255 PROC_INFO_INC(sb
, journal
.mark_dirty
);
3256 if (th
->t_trans_id
!= journal
->j_trans_id
) {
3257 reiserfs_panic(th
->t_super
, "journal-1577",
3258 "handle trans id %ld != current trans id %ld",
3259 th
->t_trans_id
, journal
->j_trans_id
);
3264 prepared
= test_clear_buffer_journal_prepared(bh
);
3265 clear_buffer_journal_restore_dirty(bh
);
3266 /* already in this transaction, we are done */
3267 if (buffer_journaled(bh
)) {
3268 PROC_INFO_INC(sb
, journal
.mark_dirty_already
);
3272 /* this must be turned into a panic instead of a warning. We can't allow
3273 ** a dirty or journal_dirty or locked buffer to be logged, as some changes
3274 ** could get to disk too early. NOT GOOD.
3276 if (!prepared
|| buffer_dirty(bh
)) {
3277 reiserfs_warning(sb
, "journal-1777",
3278 "buffer %llu bad state "
3279 "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
3280 (unsigned long long)bh
->b_blocknr
,
3281 prepared
? ' ' : '!',
3282 buffer_locked(bh
) ? ' ' : '!',
3283 buffer_dirty(bh
) ? ' ' : '!',
3284 buffer_journal_dirty(bh
) ? ' ' : '!');
3287 if (atomic_read(&(journal
->j_wcount
)) <= 0) {
3288 reiserfs_warning(sb
, "journal-1409",
3289 "returning because j_wcount was %d",
3290 atomic_read(&(journal
->j_wcount
)));
3293 /* this error means I've screwed up, and we've overflowed the transaction.
3294 ** Nothing can be done here, except make the FS readonly or panic.
3296 if (journal
->j_len
>= journal
->j_trans_max
) {
3297 reiserfs_panic(th
->t_super
, "journal-1413",
3298 "j_len (%lu) is too big",
3302 if (buffer_journal_dirty(bh
)) {
3303 count_already_incd
= 1;
3304 PROC_INFO_INC(sb
, journal
.mark_dirty_notjournal
);
3305 clear_buffer_journal_dirty(bh
);
3308 if (journal
->j_len
> journal
->j_len_alloc
) {
3309 journal
->j_len_alloc
= journal
->j_len
+ JOURNAL_PER_BALANCE_CNT
;
3312 set_buffer_journaled(bh
);
3314 /* now put this guy on the end */
3318 reiserfs_panic(sb
, "journal-4", "get_cnode failed!");
3321 if (th
->t_blocks_logged
== th
->t_blocks_allocated
) {
3322 th
->t_blocks_allocated
+= JOURNAL_PER_BALANCE_CNT
;
3323 journal
->j_len_alloc
+= JOURNAL_PER_BALANCE_CNT
;
3325 th
->t_blocks_logged
++;
3329 cn
->blocknr
= bh
->b_blocknr
;
3332 insert_journal_hash(journal
->j_hash_table
, cn
);
3333 if (!count_already_incd
) {
3338 cn
->prev
= journal
->j_last
;
3340 if (journal
->j_last
) {
3341 journal
->j_last
->next
= cn
;
3342 journal
->j_last
= cn
;
3344 journal
->j_first
= cn
;
3345 journal
->j_last
= cn
;
3350 int journal_end(struct reiserfs_transaction_handle
*th
,
3351 struct super_block
*sb
, unsigned long nblocks
)
3353 if (!current
->journal_info
&& th
->t_refcount
> 1)
3354 reiserfs_warning(sb
, "REISER-NESTING",
3355 "th NULL, refcount %d", th
->t_refcount
);
3357 if (!th
->t_trans_id
) {
3363 if (th
->t_refcount
> 0) {
3364 struct reiserfs_transaction_handle
*cur_th
=
3365 current
->journal_info
;
3367 /* we aren't allowed to close a nested transaction on a different
3368 ** filesystem from the one in the task struct
3370 BUG_ON(cur_th
->t_super
!= th
->t_super
);
3373 memcpy(current
->journal_info
, th
, sizeof(*th
));
3378 return do_journal_end(th
, sb
, nblocks
, 0);
3382 /* removes from the current transaction, relsing and descrementing any counters.
3383 ** also files the removed buffer directly onto the clean list
3385 ** called by journal_mark_freed when a block has been deleted
3387 ** returns 1 if it cleaned and relsed the buffer. 0 otherwise
3389 static int remove_from_transaction(struct super_block
*sb
,
3390 b_blocknr_t blocknr
, int already_cleaned
)
3392 struct buffer_head
*bh
;
3393 struct reiserfs_journal_cnode
*cn
;
3394 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3397 cn
= get_journal_hash_dev(sb
, journal
->j_hash_table
, blocknr
);
3398 if (!cn
|| !cn
->bh
) {
3403 cn
->prev
->next
= cn
->next
;
3406 cn
->next
->prev
= cn
->prev
;
3408 if (cn
== journal
->j_first
) {
3409 journal
->j_first
= cn
->next
;
3411 if (cn
== journal
->j_last
) {
3412 journal
->j_last
= cn
->prev
;
3415 remove_journal_hash(sb
, journal
->j_hash_table
, NULL
,
3417 clear_buffer_journaled(bh
); /* don't log this one */
3419 if (!already_cleaned
) {
3420 clear_buffer_journal_dirty(bh
);
3421 clear_buffer_dirty(bh
);
3422 clear_buffer_journal_test(bh
);
3424 if (atomic_read(&(bh
->b_count
)) < 0) {
3425 reiserfs_warning(sb
, "journal-1752",
3431 journal
->j_len_alloc
--;
3437 ** for any cnode in a journal list, it can only be dirtied of all the
3438 ** transactions that include it are committed to disk.
3439 ** this checks through each transaction, and returns 1 if you are allowed to dirty,
3440 ** and 0 if you aren't
3442 ** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
3443 ** blocks for a given transaction on disk
3446 static int can_dirty(struct reiserfs_journal_cnode
*cn
)
3448 struct super_block
*sb
= cn
->sb
;
3449 b_blocknr_t blocknr
= cn
->blocknr
;
3450 struct reiserfs_journal_cnode
*cur
= cn
->hprev
;
3453 /* first test hprev. These are all newer than cn, so any node here
3454 ** with the same block number and dev means this node can't be sent
3455 ** to disk right now.
3457 while (cur
&& can_dirty
) {
3458 if (cur
->jlist
&& cur
->bh
&& cur
->blocknr
&& cur
->sb
== sb
&&
3459 cur
->blocknr
== blocknr
) {
3464 /* then test hnext. These are all older than cn. As long as they
3465 ** are committed to the log, it is safe to write cn to disk
3468 while (cur
&& can_dirty
) {
3469 if (cur
->jlist
&& cur
->jlist
->j_len
> 0 &&
3470 atomic_read(&(cur
->jlist
->j_commit_left
)) > 0 && cur
->bh
&&
3471 cur
->blocknr
&& cur
->sb
== sb
&& cur
->blocknr
== blocknr
) {
3479 /* syncs the commit blocks, but does not force the real buffers to disk
3480 ** will wait until the current transaction is done/committed before returning
3482 int journal_end_sync(struct reiserfs_transaction_handle
*th
,
3483 struct super_block
*sb
, unsigned long nblocks
)
3485 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3487 BUG_ON(!th
->t_trans_id
);
3488 /* you can sync while nested, very, very bad */
3489 BUG_ON(th
->t_refcount
> 1);
3490 if (journal
->j_len
== 0) {
3491 reiserfs_prepare_for_journal(sb
, SB_BUFFER_WITH_SB(sb
),
3493 journal_mark_dirty(th
, sb
, SB_BUFFER_WITH_SB(sb
));
3495 return do_journal_end(th
, sb
, nblocks
, COMMIT_NOW
| WAIT
);
3499 ** writeback the pending async commits to disk
3501 static void flush_async_commits(struct work_struct
*work
)
3503 struct reiserfs_journal
*journal
=
3504 container_of(work
, struct reiserfs_journal
, j_work
.work
);
3505 struct super_block
*sb
= journal
->j_work_sb
;
3506 struct reiserfs_journal_list
*jl
;
3507 struct list_head
*entry
;
3510 if (!list_empty(&journal
->j_journal_list
)) {
3511 /* last entry is the youngest, commit it and you get everything */
3512 entry
= journal
->j_journal_list
.prev
;
3513 jl
= JOURNAL_LIST_ENTRY(entry
);
3514 flush_commit_list(sb
, jl
, 1);
3520 ** flushes any old transactions to disk
3521 ** ends the current transaction if it is too old
3523 int reiserfs_flush_old_commits(struct super_block
*sb
)
3526 struct reiserfs_transaction_handle th
;
3527 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3529 now
= get_seconds();
3530 /* safety check so we don't flush while we are replaying the log during
3533 if (list_empty(&journal
->j_journal_list
)) {
3537 /* check the current transaction. If there are no writers, and it is
3538 * too old, finish it, and force the commit blocks to disk
3540 if (atomic_read(&journal
->j_wcount
) <= 0 &&
3541 journal
->j_trans_start_time
> 0 &&
3542 journal
->j_len
> 0 &&
3543 (now
- journal
->j_trans_start_time
) > journal
->j_max_trans_age
) {
3544 if (!journal_join(&th
, sb
, 1)) {
3545 reiserfs_prepare_for_journal(sb
,
3546 SB_BUFFER_WITH_SB(sb
),
3548 journal_mark_dirty(&th
, sb
,
3549 SB_BUFFER_WITH_SB(sb
));
3551 /* we're only being called from kreiserfsd, it makes no sense to do
3552 ** an async commit so that kreiserfsd can do it later
3554 do_journal_end(&th
, sb
, 1, COMMIT_NOW
| WAIT
);
3561 ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
3563 ** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
3564 ** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
3565 ** flushes the commit list and returns 0.
3567 ** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
3569 ** Note, we can't allow the journal_end to proceed while there are still writers in the log.
3571 static int check_journal_end(struct reiserfs_transaction_handle
*th
,
3572 struct super_block
*sb
, unsigned long nblocks
,
3577 int flush
= flags
& FLUSH_ALL
;
3578 int commit_now
= flags
& COMMIT_NOW
;
3579 int wait_on_commit
= flags
& WAIT
;
3580 struct reiserfs_journal_list
*jl
;
3581 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3583 BUG_ON(!th
->t_trans_id
);
3585 if (th
->t_trans_id
!= journal
->j_trans_id
) {
3586 reiserfs_panic(th
->t_super
, "journal-1577",
3587 "handle trans id %ld != current trans id %ld",
3588 th
->t_trans_id
, journal
->j_trans_id
);
3591 journal
->j_len_alloc
-= (th
->t_blocks_allocated
- th
->t_blocks_logged
);
3592 if (atomic_read(&(journal
->j_wcount
)) > 0) { /* <= 0 is allowed. unmounting might not call begin */
3593 atomic_dec(&(journal
->j_wcount
));
3596 /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
3597 ** will be dealt with by next transaction that actually writes something, but should be taken
3598 ** care of in this trans
3600 BUG_ON(journal
->j_len
== 0);
3602 /* if wcount > 0, and we are called to with flush or commit_now,
3603 ** we wait on j_join_wait. We will wake up when the last writer has
3604 ** finished the transaction, and started it on its way to the disk.
3605 ** Then, we flush the commit or journal list, and just return 0
3606 ** because the rest of journal end was already done for this transaction.
3608 if (atomic_read(&(journal
->j_wcount
)) > 0) {
3609 if (flush
|| commit_now
) {
3612 jl
= journal
->j_current_jl
;
3613 trans_id
= jl
->j_trans_id
;
3615 jl
->j_state
|= LIST_COMMIT_PENDING
;
3616 atomic_set(&(journal
->j_jlock
), 1);
3618 journal
->j_next_full_flush
= 1;
3622 /* sleep while the current transaction is still j_jlocked */
3623 while (journal
->j_trans_id
== trans_id
) {
3624 if (atomic_read(&journal
->j_jlock
)) {
3625 queue_log_writer(sb
);
3628 if (journal
->j_trans_id
== trans_id
) {
3629 atomic_set(&(journal
->j_jlock
),
3635 BUG_ON(journal
->j_trans_id
== trans_id
);
3638 && journal_list_still_alive(sb
, trans_id
)
3639 && wait_on_commit
) {
3640 flush_commit_list(sb
, jl
, 1);
3648 /* deal with old transactions where we are the last writers */
3649 now
= get_seconds();
3650 if ((now
- journal
->j_trans_start_time
) > journal
->j_max_trans_age
) {
3652 journal
->j_next_async_flush
= 1;
3654 /* don't batch when someone is waiting on j_join_wait */
3655 /* don't batch when syncing the commit or flushing the whole trans */
3656 if (!(journal
->j_must_wait
> 0) && !(atomic_read(&(journal
->j_jlock
)))
3657 && !flush
&& !commit_now
&& (journal
->j_len
< journal
->j_max_batch
)
3658 && journal
->j_len_alloc
< journal
->j_max_batch
3659 && journal
->j_cnode_free
> (journal
->j_trans_max
* 3)) {
3660 journal
->j_bcount
++;
3665 if (journal
->j_start
> SB_ONDISK_JOURNAL_SIZE(sb
)) {
3666 reiserfs_panic(sb
, "journal-003",
3667 "j_start (%ld) is too high",
3674 ** Does all the work that makes deleting blocks safe.
3675 ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
3678 ** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
3679 ** before this transaction has finished.
3681 ** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with
3682 ** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash,
3683 ** the block can't be reallocated yet.
3685 ** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
3687 int journal_mark_freed(struct reiserfs_transaction_handle
*th
,
3688 struct super_block
*sb
, b_blocknr_t blocknr
)
3690 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3691 struct reiserfs_journal_cnode
*cn
= NULL
;
3692 struct buffer_head
*bh
= NULL
;
3693 struct reiserfs_list_bitmap
*jb
= NULL
;
3695 BUG_ON(!th
->t_trans_id
);
3697 cn
= get_journal_hash_dev(sb
, journal
->j_hash_table
, blocknr
);
3702 /* if it is journal new, we just remove it from this transaction */
3703 if (bh
&& buffer_journal_new(bh
)) {
3704 clear_buffer_journal_new(bh
);
3705 clear_prepared_bits(bh
);
3706 reiserfs_clean_and_file_buffer(bh
);
3707 cleaned
= remove_from_transaction(sb
, blocknr
, cleaned
);
3709 /* set the bit for this block in the journal bitmap for this transaction */
3710 jb
= journal
->j_current_jl
->j_list_bitmap
;
3712 reiserfs_panic(sb
, "journal-1702",
3713 "journal_list_bitmap is NULL");
3715 set_bit_in_list_bitmap(sb
, blocknr
, jb
);
3717 /* Note, the entire while loop is not allowed to schedule. */
3720 clear_prepared_bits(bh
);
3721 reiserfs_clean_and_file_buffer(bh
);
3723 cleaned
= remove_from_transaction(sb
, blocknr
, cleaned
);
3725 /* find all older transactions with this block, make sure they don't try to write it out */
3726 cn
= get_journal_hash_dev(sb
, journal
->j_list_hash_table
,
3729 if (sb
== cn
->sb
&& blocknr
== cn
->blocknr
) {
3730 set_bit(BLOCK_FREED
, &cn
->state
);
3733 /* remove_from_transaction will brelse the buffer if it was
3734 ** in the current trans
3736 clear_buffer_journal_dirty(cn
->
3738 clear_buffer_dirty(cn
->bh
);
3739 clear_buffer_journal_test(cn
->
3744 (&(cn
->bh
->b_count
)) < 0) {
3745 reiserfs_warning(sb
,
3747 "cn->bh->b_count < 0");
3750 if (cn
->jlist
) { /* since we are clearing the bh, we MUST dec nonzerolen */
3763 release_buffer_page(bh
); /* get_hash grabs the buffer */
3767 void reiserfs_update_inode_transaction(struct inode
*inode
)
3769 struct reiserfs_journal
*journal
= SB_JOURNAL(inode
->i_sb
);
3770 REISERFS_I(inode
)->i_jl
= journal
->j_current_jl
;
3771 REISERFS_I(inode
)->i_trans_id
= journal
->j_trans_id
;
3775 * returns -1 on error, 0 if no commits/barriers were done and 1
3776 * if a transaction was actually committed and the barrier was done
3778 static int __commit_trans_jl(struct inode
*inode
, unsigned long id
,
3779 struct reiserfs_journal_list
*jl
)
3781 struct reiserfs_transaction_handle th
;
3782 struct super_block
*sb
= inode
->i_sb
;
3783 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3786 /* is it from the current transaction, or from an unknown transaction? */
3787 if (id
== journal
->j_trans_id
) {
3788 jl
= journal
->j_current_jl
;
3789 /* try to let other writers come in and grow this transaction */
3790 let_transaction_grow(sb
, id
);
3791 if (journal
->j_trans_id
!= id
) {
3792 goto flush_commit_only
;
3795 ret
= journal_begin(&th
, sb
, 1);
3799 /* someone might have ended this transaction while we joined */
3800 if (journal
->j_trans_id
!= id
) {
3801 reiserfs_prepare_for_journal(sb
, SB_BUFFER_WITH_SB(sb
),
3803 journal_mark_dirty(&th
, sb
, SB_BUFFER_WITH_SB(sb
));
3804 ret
= journal_end(&th
, sb
, 1);
3805 goto flush_commit_only
;
3808 ret
= journal_end_sync(&th
, sb
, 1);
3813 /* this gets tricky, we have to make sure the journal list in
3814 * the inode still exists. We know the list is still around
3815 * if we've got a larger transaction id than the oldest list
3818 if (journal_list_still_alive(inode
->i_sb
, id
)) {
3820 * we only set ret to 1 when we know for sure
3821 * the barrier hasn't been started yet on the commit
3824 if (atomic_read(&jl
->j_commit_left
) > 1)
3826 flush_commit_list(sb
, jl
, 1);
3827 if (journal
->j_errno
)
3828 ret
= journal
->j_errno
;
3831 /* otherwise the list is gone, and long since committed */
3835 int reiserfs_commit_for_inode(struct inode
*inode
)
3837 unsigned int id
= REISERFS_I(inode
)->i_trans_id
;
3838 struct reiserfs_journal_list
*jl
= REISERFS_I(inode
)->i_jl
;
3840 /* for the whole inode, assume unset id means it was
3841 * changed in the current transaction. More conservative
3844 reiserfs_update_inode_transaction(inode
);
3845 id
= REISERFS_I(inode
)->i_trans_id
;
3846 /* jl will be updated in __commit_trans_jl */
3849 return __commit_trans_jl(inode
, id
, jl
);
3852 void reiserfs_restore_prepared_buffer(struct super_block
*sb
,
3853 struct buffer_head
*bh
)
3855 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3856 PROC_INFO_INC(sb
, journal
.restore_prepared
);
3860 if (test_clear_buffer_journal_restore_dirty(bh
) &&
3861 buffer_journal_dirty(bh
)) {
3862 struct reiserfs_journal_cnode
*cn
;
3863 cn
= get_journal_hash_dev(sb
,
3864 journal
->j_list_hash_table
,
3866 if (cn
&& can_dirty(cn
)) {
3867 set_buffer_journal_test(bh
);
3868 mark_buffer_dirty(bh
);
3871 clear_buffer_journal_prepared(bh
);
3874 extern struct tree_balance
*cur_tb
;
3876 ** before we can change a metadata block, we have to make sure it won't
3877 ** be written to disk while we are altering it. So, we must:
3882 int reiserfs_prepare_for_journal(struct super_block
*sb
,
3883 struct buffer_head
*bh
, int wait
)
3885 PROC_INFO_INC(sb
, journal
.prepare
);
3887 if (!trylock_buffer(bh
)) {
3892 set_buffer_journal_prepared(bh
);
3893 if (test_clear_buffer_dirty(bh
) && buffer_journal_dirty(bh
)) {
3894 clear_buffer_journal_test(bh
);
3895 set_buffer_journal_restore_dirty(bh
);
3901 static void flush_old_journal_lists(struct super_block
*s
)
3903 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
3904 struct reiserfs_journal_list
*jl
;
3905 struct list_head
*entry
;
3906 time_t now
= get_seconds();
3908 while (!list_empty(&journal
->j_journal_list
)) {
3909 entry
= journal
->j_journal_list
.next
;
3910 jl
= JOURNAL_LIST_ENTRY(entry
);
3911 /* this check should always be run, to send old lists to disk */
3912 if (jl
->j_timestamp
< (now
- (JOURNAL_MAX_TRANS_AGE
* 4)) &&
3913 atomic_read(&jl
->j_commit_left
) == 0 &&
3914 test_transaction(s
, jl
)) {
3915 flush_used_journal_lists(s
, jl
);
3923 ** long and ugly. If flush, will not return until all commit
3924 ** blocks and all real buffers in the trans are on disk.
3925 ** If no_async, won't return until all commit blocks are on disk.
3927 ** keep reading, there are comments as you go along
3929 ** If the journal is aborted, we just clean up. Things like flushing
3930 ** journal lists, etc just won't happen.
3932 static int do_journal_end(struct reiserfs_transaction_handle
*th
,
3933 struct super_block
*sb
, unsigned long nblocks
,
3936 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3937 struct reiserfs_journal_cnode
*cn
, *next
, *jl_cn
;
3938 struct reiserfs_journal_cnode
*last_cn
= NULL
;
3939 struct reiserfs_journal_desc
*desc
;
3940 struct reiserfs_journal_commit
*commit
;
3941 struct buffer_head
*c_bh
; /* commit bh */
3942 struct buffer_head
*d_bh
; /* desc bh */
3943 int cur_write_start
= 0; /* start index of current log write */
3948 struct reiserfs_journal_list
*jl
, *temp_jl
;
3949 struct list_head
*entry
, *safe
;
3950 unsigned long jindex
;
3951 unsigned int commit_trans_id
;
3954 BUG_ON(th
->t_refcount
> 1);
3955 BUG_ON(!th
->t_trans_id
);
3957 /* protect flush_older_commits from doing mistakes if the
3958 transaction ID counter gets overflowed. */
3959 if (th
->t_trans_id
== ~0U)
3960 flags
|= FLUSH_ALL
| COMMIT_NOW
| WAIT
;
3961 flush
= flags
& FLUSH_ALL
;
3962 wait_on_commit
= flags
& WAIT
;
3965 current
->journal_info
= th
->t_handle_save
;
3966 reiserfs_check_lock_depth(sb
, "journal end");
3967 if (journal
->j_len
== 0) {
3968 reiserfs_prepare_for_journal(sb
, SB_BUFFER_WITH_SB(sb
),
3970 journal_mark_dirty(th
, sb
, SB_BUFFER_WITH_SB(sb
));
3974 if (journal
->j_next_full_flush
) {
3978 if (journal
->j_next_async_flush
) {
3979 flags
|= COMMIT_NOW
| WAIT
;
3983 /* check_journal_end locks the journal, and unlocks if it does not return 1
3984 ** it tells us if we should continue with the journal_end, or just return
3986 if (!check_journal_end(th
, sb
, nblocks
, flags
)) {
3988 wake_queued_writers(sb
);
3989 reiserfs_async_progress_wait(sb
);
3993 /* check_journal_end might set these, check again */
3994 if (journal
->j_next_full_flush
) {
3999 ** j must wait means we have to flush the log blocks, and the real blocks for
4002 if (journal
->j_must_wait
> 0) {
4005 #ifdef REISERFS_PREALLOCATE
4006 /* quota ops might need to nest, setup the journal_info pointer for them
4007 * and raise the refcount so that it is > 0. */
4008 current
->journal_info
= th
;
4010 reiserfs_discard_all_prealloc(th
); /* it should not involve new blocks into
4011 * the transaction */
4013 current
->journal_info
= th
->t_handle_save
;
4016 /* setup description block */
4019 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
4021 set_buffer_uptodate(d_bh
);
4022 desc
= (struct reiserfs_journal_desc
*)(d_bh
)->b_data
;
4023 memset(d_bh
->b_data
, 0, d_bh
->b_size
);
4024 memcpy(get_journal_desc_magic(d_bh
), JOURNAL_DESC_MAGIC
, 8);
4025 set_desc_trans_id(desc
, journal
->j_trans_id
);
4027 /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
4028 c_bh
= journal_getblk(sb
, SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
4029 ((journal
->j_start
+ journal
->j_len
+
4030 1) % SB_ONDISK_JOURNAL_SIZE(sb
)));
4031 commit
= (struct reiserfs_journal_commit
*)c_bh
->b_data
;
4032 memset(c_bh
->b_data
, 0, c_bh
->b_size
);
4033 set_commit_trans_id(commit
, journal
->j_trans_id
);
4034 set_buffer_uptodate(c_bh
);
4036 /* init this journal list */
4037 jl
= journal
->j_current_jl
;
4039 /* we lock the commit before doing anything because
4040 * we want to make sure nobody tries to run flush_commit_list until
4041 * the new transaction is fully setup, and we've already flushed the
4044 mutex_lock(&jl
->j_commit_mutex
);
4046 /* save the transaction id in case we need to commit it later */
4047 commit_trans_id
= jl
->j_trans_id
;
4049 atomic_set(&jl
->j_older_commits_done
, 0);
4050 jl
->j_trans_id
= journal
->j_trans_id
;
4051 jl
->j_timestamp
= journal
->j_trans_start_time
;
4052 jl
->j_commit_bh
= c_bh
;
4053 jl
->j_start
= journal
->j_start
;
4054 jl
->j_len
= journal
->j_len
;
4055 atomic_set(&jl
->j_nonzerolen
, journal
->j_len
);
4056 atomic_set(&jl
->j_commit_left
, journal
->j_len
+ 2);
4057 jl
->j_realblock
= NULL
;
4059 /* The ENTIRE FOR LOOP MUST not cause schedule to occur.
4060 ** for each real block, add it to the journal list hash,
4061 ** copy into real block index array in the commit or desc block
4063 trans_half
= journal_trans_half(sb
->s_blocksize
);
4064 for (i
= 0, cn
= journal
->j_first
; cn
; cn
= cn
->next
, i
++) {
4065 if (buffer_journaled(cn
->bh
)) {
4066 jl_cn
= get_cnode(sb
);
4068 reiserfs_panic(sb
, "journal-1676",
4069 "get_cnode returned NULL");
4072 jl
->j_realblock
= jl_cn
;
4074 jl_cn
->prev
= last_cn
;
4077 last_cn
->next
= jl_cn
;
4080 /* make sure the block we are trying to log is not a block
4081 of journal or reserved area */
4083 if (is_block_in_log_or_reserved_area
4084 (sb
, cn
->bh
->b_blocknr
)) {
4085 reiserfs_panic(sb
, "journal-2332",
4086 "Trying to log block %lu, "
4087 "which is a log block",
4090 jl_cn
->blocknr
= cn
->bh
->b_blocknr
;
4095 insert_journal_hash(journal
->j_list_hash_table
, jl_cn
);
4096 if (i
< trans_half
) {
4097 desc
->j_realblock
[i
] =
4098 cpu_to_le32(cn
->bh
->b_blocknr
);
4100 commit
->j_realblock
[i
- trans_half
] =
4101 cpu_to_le32(cn
->bh
->b_blocknr
);
4107 set_desc_trans_len(desc
, journal
->j_len
);
4108 set_desc_mount_id(desc
, journal
->j_mount_id
);
4109 set_desc_trans_id(desc
, journal
->j_trans_id
);
4110 set_commit_trans_len(commit
, journal
->j_len
);
4112 /* special check in case all buffers in the journal were marked for not logging */
4113 BUG_ON(journal
->j_len
== 0);
4115 /* we're about to dirty all the log blocks, mark the description block
4116 * dirty now too. Don't mark the commit block dirty until all the
4117 * others are on disk
4119 mark_buffer_dirty(d_bh
);
4121 /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
4122 cur_write_start
= journal
->j_start
;
4123 cn
= journal
->j_first
;
4124 jindex
= 1; /* start at one so we don't get the desc again */
4126 clear_buffer_journal_new(cn
->bh
);
4127 /* copy all the real blocks into log area. dirty log blocks */
4128 if (buffer_journaled(cn
->bh
)) {
4129 struct buffer_head
*tmp_bh
;
4134 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
4137 SB_ONDISK_JOURNAL_SIZE(sb
)));
4138 set_buffer_uptodate(tmp_bh
);
4139 page
= cn
->bh
->b_page
;
4141 memcpy(tmp_bh
->b_data
,
4142 addr
+ offset_in_page(cn
->bh
->b_data
),
4145 mark_buffer_dirty(tmp_bh
);
4147 set_buffer_journal_dirty(cn
->bh
);
4148 clear_buffer_journaled(cn
->bh
);
4150 /* JDirty cleared sometime during transaction. don't log this one */
4151 reiserfs_warning(sb
, "journal-2048",
4152 "BAD, buffer in journal hash, "
4162 /* we are done with both the c_bh and d_bh, but
4163 ** c_bh must be written after all other commit blocks,
4164 ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
4167 journal
->j_current_jl
= alloc_journal_list(sb
);
4169 /* now it is safe to insert this transaction on the main list */
4170 list_add_tail(&jl
->j_list
, &journal
->j_journal_list
);
4171 list_add_tail(&jl
->j_working_list
, &journal
->j_working_list
);
4172 journal
->j_num_work_lists
++;
4174 /* reset journal values for the next transaction */
4175 old_start
= journal
->j_start
;
4177 (journal
->j_start
+ journal
->j_len
+
4178 2) % SB_ONDISK_JOURNAL_SIZE(sb
);
4179 atomic_set(&(journal
->j_wcount
), 0);
4180 journal
->j_bcount
= 0;
4181 journal
->j_last
= NULL
;
4182 journal
->j_first
= NULL
;
4184 journal
->j_trans_start_time
= 0;
4185 /* check for trans_id overflow */
4186 if (++journal
->j_trans_id
== 0)
4187 journal
->j_trans_id
= 10;
4188 journal
->j_current_jl
->j_trans_id
= journal
->j_trans_id
;
4189 journal
->j_must_wait
= 0;
4190 journal
->j_len_alloc
= 0;
4191 journal
->j_next_full_flush
= 0;
4192 journal
->j_next_async_flush
= 0;
4193 init_journal_hash(sb
);
4195 // make sure reiserfs_add_jh sees the new current_jl before we
4196 // write out the tails
4199 /* tail conversion targets have to hit the disk before we end the
4200 * transaction. Otherwise a later transaction might repack the tail
4201 * before this transaction commits, leaving the data block unflushed and
4202 * clean, if we crash before the later transaction commits, the data block
4205 if (!list_empty(&jl
->j_tail_bh_list
)) {
4207 write_ordered_buffers(&journal
->j_dirty_buffers_lock
,
4208 journal
, jl
, &jl
->j_tail_bh_list
);
4211 BUG_ON(!list_empty(&jl
->j_tail_bh_list
));
4212 mutex_unlock(&jl
->j_commit_mutex
);
4214 /* honor the flush wishes from the caller, simple commits can
4215 ** be done outside the journal lock, they are done below
4217 ** if we don't flush the commit list right now, we put it into
4218 ** the work queue so the people waiting on the async progress work
4219 ** queue don't wait for this proc to flush journal lists and such.
4222 flush_commit_list(sb
, jl
, 1);
4223 flush_journal_list(sb
, jl
, 1);
4224 } else if (!(jl
->j_state
& LIST_COMMIT_PENDING
))
4225 queue_delayed_work(commit_wq
, &journal
->j_work
, HZ
/ 10);
4227 /* if the next transaction has any chance of wrapping, flush
4228 ** transactions that might get overwritten. If any journal lists are very
4229 ** old flush them as well.
4232 list_for_each_safe(entry
, safe
, &journal
->j_journal_list
) {
4233 temp_jl
= JOURNAL_LIST_ENTRY(entry
);
4234 if (journal
->j_start
<= temp_jl
->j_start
) {
4235 if ((journal
->j_start
+ journal
->j_trans_max
+ 1) >=
4237 flush_used_journal_lists(sb
, temp_jl
);
4239 } else if ((journal
->j_start
+
4240 journal
->j_trans_max
+ 1) <
4241 SB_ONDISK_JOURNAL_SIZE(sb
)) {
4242 /* if we don't cross into the next transaction and we don't
4243 * wrap, there is no way we can overlap any later transactions
4248 } else if ((journal
->j_start
+
4249 journal
->j_trans_max
+ 1) >
4250 SB_ONDISK_JOURNAL_SIZE(sb
)) {
4251 if (((journal
->j_start
+ journal
->j_trans_max
+ 1) %
4252 SB_ONDISK_JOURNAL_SIZE(sb
)) >=
4254 flush_used_journal_lists(sb
, temp_jl
);
4257 /* we don't overlap anything from out start to the end of the
4258 * log, and our wrapped portion doesn't overlap anything at
4259 * the start of the log. We can break
4265 flush_old_journal_lists(sb
);
4267 journal
->j_current_jl
->j_list_bitmap
=
4268 get_list_bitmap(sb
, journal
->j_current_jl
);
4270 if (!(journal
->j_current_jl
->j_list_bitmap
)) {
4271 reiserfs_panic(sb
, "journal-1996",
4272 "could not get a list bitmap");
4275 atomic_set(&(journal
->j_jlock
), 0);
4277 /* wake up any body waiting to join. */
4278 clear_bit(J_WRITERS_QUEUED
, &journal
->j_state
);
4279 wake_up(&(journal
->j_join_wait
));
4281 if (!flush
&& wait_on_commit
&&
4282 journal_list_still_alive(sb
, commit_trans_id
)) {
4283 flush_commit_list(sb
, jl
, 1);
4286 reiserfs_check_lock_depth(sb
, "journal end2");
4288 memset(th
, 0, sizeof(*th
));
4289 /* Re-set th->t_super, so we can properly keep track of how many
4290 * persistent transactions there are. We need to do this so if this
4291 * call is part of a failed restart_transaction, we can free it later */
4294 return journal
->j_errno
;
4297 /* Send the file system read only and refuse new transactions */
4298 void reiserfs_abort_journal(struct super_block
*sb
, int errno
)
4300 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
4301 if (test_bit(J_ABORTED
, &journal
->j_state
))
4304 if (!journal
->j_errno
)
4305 journal
->j_errno
= errno
;
4307 sb
->s_flags
|= MS_RDONLY
;
4308 set_bit(J_ABORTED
, &journal
->j_state
);
4310 #ifdef CONFIG_REISERFS_CHECK