jbd2: Use tracepoints for history file
[linux-2.6/libata-dev.git] / fs / jbd2 / commit.c
blobd4cfd6d2779e07fe1d5381975840ccb3e3486566
1 /*
2 * linux/fs/jbd2/commit.c
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 * Copyright 1998 Red Hat corp --- All Rights Reserved
8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference.
12 * Journal commit routines for the generic filesystem journaling code;
13 * part of the ext2fs journaling system.
16 #include <linux/time.h>
17 #include <linux/fs.h>
18 #include <linux/jbd2.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
21 #include <linux/mm.h>
22 #include <linux/pagemap.h>
23 #include <linux/jiffies.h>
24 #include <linux/crc32.h>
25 #include <linux/writeback.h>
26 #include <linux/backing-dev.h>
27 #include <linux/bio.h>
28 #include <linux/blkdev.h>
29 #include <trace/events/jbd2.h>
32 * Default IO end handler for temporary BJ_IO buffer_heads.
34 static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
36 BUFFER_TRACE(bh, "");
37 if (uptodate)
38 set_buffer_uptodate(bh);
39 else
40 clear_buffer_uptodate(bh);
41 unlock_buffer(bh);
45 * When an ext4 file is truncated, it is possible that some pages are not
46 * successfully freed, because they are attached to a committing transaction.
47 * After the transaction commits, these pages are left on the LRU, with no
48 * ->mapping, and with attached buffers. These pages are trivially reclaimable
49 * by the VM, but their apparent absence upsets the VM accounting, and it makes
50 * the numbers in /proc/meminfo look odd.
52 * So here, we have a buffer which has just come off the forget list. Look to
53 * see if we can strip all buffers from the backing page.
55 * Called under lock_journal(), and possibly under journal_datalist_lock. The
56 * caller provided us with a ref against the buffer, and we drop that here.
58 static void release_buffer_page(struct buffer_head *bh)
60 struct page *page;
62 if (buffer_dirty(bh))
63 goto nope;
64 if (atomic_read(&bh->b_count) != 1)
65 goto nope;
66 page = bh->b_page;
67 if (!page)
68 goto nope;
69 if (page->mapping)
70 goto nope;
72 /* OK, it's a truncated page */
73 if (!trylock_page(page))
74 goto nope;
76 page_cache_get(page);
77 __brelse(bh);
78 try_to_free_buffers(page);
79 unlock_page(page);
80 page_cache_release(page);
81 return;
83 nope:
84 __brelse(bh);
88 * Done it all: now submit the commit record. We should have
89 * cleaned up our previous buffers by now, so if we are in abort
90 * mode we can now just skip the rest of the journal write
91 * entirely.
93 * Returns 1 if the journal needs to be aborted or 0 on success
95 static int journal_submit_commit_record(journal_t *journal,
96 transaction_t *commit_transaction,
97 struct buffer_head **cbh,
98 __u32 crc32_sum)
100 struct journal_head *descriptor;
101 struct commit_header *tmp;
102 struct buffer_head *bh;
103 int ret;
104 int barrier_done = 0;
105 struct timespec now = current_kernel_time();
107 if (is_journal_aborted(journal))
108 return 0;
110 descriptor = jbd2_journal_get_descriptor_buffer(journal);
111 if (!descriptor)
112 return 1;
114 bh = jh2bh(descriptor);
116 tmp = (struct commit_header *)bh->b_data;
117 tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
118 tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
119 tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
120 tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
121 tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
123 if (JBD2_HAS_COMPAT_FEATURE(journal,
124 JBD2_FEATURE_COMPAT_CHECKSUM)) {
125 tmp->h_chksum_type = JBD2_CRC32_CHKSUM;
126 tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE;
127 tmp->h_chksum[0] = cpu_to_be32(crc32_sum);
130 JBUFFER_TRACE(descriptor, "submit commit block");
131 lock_buffer(bh);
132 clear_buffer_dirty(bh);
133 set_buffer_uptodate(bh);
134 bh->b_end_io = journal_end_buffer_io_sync;
136 if (journal->j_flags & JBD2_BARRIER &&
137 !JBD2_HAS_INCOMPAT_FEATURE(journal,
138 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
139 set_buffer_ordered(bh);
140 barrier_done = 1;
142 ret = submit_bh(WRITE_SYNC_PLUG, bh);
143 if (barrier_done)
144 clear_buffer_ordered(bh);
146 /* is it possible for another commit to fail at roughly
147 * the same time as this one? If so, we don't want to
148 * trust the barrier flag in the super, but instead want
149 * to remember if we sent a barrier request
151 if (ret == -EOPNOTSUPP && barrier_done) {
152 printk(KERN_WARNING
153 "JBD: barrier-based sync failed on %s - "
154 "disabling barriers\n", journal->j_devname);
155 spin_lock(&journal->j_state_lock);
156 journal->j_flags &= ~JBD2_BARRIER;
157 spin_unlock(&journal->j_state_lock);
159 /* And try again, without the barrier */
160 lock_buffer(bh);
161 set_buffer_uptodate(bh);
162 clear_buffer_dirty(bh);
163 ret = submit_bh(WRITE_SYNC_PLUG, bh);
165 *cbh = bh;
166 return ret;
170 * This function along with journal_submit_commit_record
171 * allows to write the commit record asynchronously.
173 static int journal_wait_on_commit_record(journal_t *journal,
174 struct buffer_head *bh)
176 int ret = 0;
178 retry:
179 clear_buffer_dirty(bh);
180 wait_on_buffer(bh);
181 if (buffer_eopnotsupp(bh) && (journal->j_flags & JBD2_BARRIER)) {
182 printk(KERN_WARNING
183 "JBD2: wait_on_commit_record: sync failed on %s - "
184 "disabling barriers\n", journal->j_devname);
185 spin_lock(&journal->j_state_lock);
186 journal->j_flags &= ~JBD2_BARRIER;
187 spin_unlock(&journal->j_state_lock);
189 lock_buffer(bh);
190 clear_buffer_dirty(bh);
191 set_buffer_uptodate(bh);
192 bh->b_end_io = journal_end_buffer_io_sync;
194 ret = submit_bh(WRITE_SYNC_PLUG, bh);
195 if (ret) {
196 unlock_buffer(bh);
197 return ret;
199 goto retry;
202 if (unlikely(!buffer_uptodate(bh)))
203 ret = -EIO;
204 put_bh(bh); /* One for getblk() */
205 jbd2_journal_put_journal_head(bh2jh(bh));
207 return ret;
211 * write the filemap data using writepage() address_space_operations.
212 * We don't do block allocation here even for delalloc. We don't
213 * use writepages() because with dealyed allocation we may be doing
214 * block allocation in writepages().
216 static int journal_submit_inode_data_buffers(struct address_space *mapping)
218 int ret;
219 struct writeback_control wbc = {
220 .sync_mode = WB_SYNC_ALL,
221 .nr_to_write = mapping->nrpages * 2,
222 .range_start = 0,
223 .range_end = i_size_read(mapping->host),
226 ret = generic_writepages(mapping, &wbc);
227 return ret;
231 * Submit all the data buffers of inode associated with the transaction to
232 * disk.
234 * We are in a committing transaction. Therefore no new inode can be added to
235 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
236 * operate on from being released while we write out pages.
238 static int journal_submit_data_buffers(journal_t *journal,
239 transaction_t *commit_transaction)
241 struct jbd2_inode *jinode;
242 int err, ret = 0;
243 struct address_space *mapping;
245 spin_lock(&journal->j_list_lock);
246 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
247 mapping = jinode->i_vfs_inode->i_mapping;
248 jinode->i_flags |= JI_COMMIT_RUNNING;
249 spin_unlock(&journal->j_list_lock);
251 * submit the inode data buffers. We use writepage
252 * instead of writepages. Because writepages can do
253 * block allocation with delalloc. We need to write
254 * only allocated blocks here.
256 trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
257 err = journal_submit_inode_data_buffers(mapping);
258 if (!ret)
259 ret = err;
260 spin_lock(&journal->j_list_lock);
261 J_ASSERT(jinode->i_transaction == commit_transaction);
262 jinode->i_flags &= ~JI_COMMIT_RUNNING;
263 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
265 spin_unlock(&journal->j_list_lock);
266 return ret;
270 * Wait for data submitted for writeout, refile inodes to proper
271 * transaction if needed.
274 static int journal_finish_inode_data_buffers(journal_t *journal,
275 transaction_t *commit_transaction)
277 struct jbd2_inode *jinode, *next_i;
278 int err, ret = 0;
280 /* For locking, see the comment in journal_submit_data_buffers() */
281 spin_lock(&journal->j_list_lock);
282 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
283 jinode->i_flags |= JI_COMMIT_RUNNING;
284 spin_unlock(&journal->j_list_lock);
285 err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
286 if (err) {
288 * Because AS_EIO is cleared by
289 * wait_on_page_writeback_range(), set it again so
290 * that user process can get -EIO from fsync().
292 set_bit(AS_EIO,
293 &jinode->i_vfs_inode->i_mapping->flags);
295 if (!ret)
296 ret = err;
298 spin_lock(&journal->j_list_lock);
299 jinode->i_flags &= ~JI_COMMIT_RUNNING;
300 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
303 /* Now refile inode to proper lists */
304 list_for_each_entry_safe(jinode, next_i,
305 &commit_transaction->t_inode_list, i_list) {
306 list_del(&jinode->i_list);
307 if (jinode->i_next_transaction) {
308 jinode->i_transaction = jinode->i_next_transaction;
309 jinode->i_next_transaction = NULL;
310 list_add(&jinode->i_list,
311 &jinode->i_transaction->t_inode_list);
312 } else {
313 jinode->i_transaction = NULL;
316 spin_unlock(&journal->j_list_lock);
318 return ret;
321 static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
323 struct page *page = bh->b_page;
324 char *addr;
325 __u32 checksum;
327 addr = kmap_atomic(page, KM_USER0);
328 checksum = crc32_be(crc32_sum,
329 (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
330 kunmap_atomic(addr, KM_USER0);
332 return checksum;
335 static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
336 unsigned long long block)
338 tag->t_blocknr = cpu_to_be32(block & (u32)~0);
339 if (tag_bytes > JBD2_TAG_SIZE32)
340 tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
344 * jbd2_journal_commit_transaction
346 * The primary function for committing a transaction to the log. This
347 * function is called by the journal thread to begin a complete commit.
349 void jbd2_journal_commit_transaction(journal_t *journal)
351 struct transaction_stats_s stats;
352 transaction_t *commit_transaction;
353 struct journal_head *jh, *new_jh, *descriptor;
354 struct buffer_head **wbuf = journal->j_wbuf;
355 int bufs;
356 int flags;
357 int err;
358 unsigned long long blocknr;
359 ktime_t start_time;
360 u64 commit_time;
361 char *tagp = NULL;
362 journal_header_t *header;
363 journal_block_tag_t *tag = NULL;
364 int space_left = 0;
365 int first_tag = 0;
366 int tag_flag;
367 int i, to_free = 0;
368 int tag_bytes = journal_tag_bytes(journal);
369 struct buffer_head *cbh = NULL; /* For transactional checksums */
370 __u32 crc32_sum = ~0;
371 int write_op = WRITE;
374 * First job: lock down the current transaction and wait for
375 * all outstanding updates to complete.
378 #ifdef COMMIT_STATS
379 spin_lock(&journal->j_list_lock);
380 summarise_journal_usage(journal);
381 spin_unlock(&journal->j_list_lock);
382 #endif
384 /* Do we need to erase the effects of a prior jbd2_journal_flush? */
385 if (journal->j_flags & JBD2_FLUSHED) {
386 jbd_debug(3, "super block updated\n");
387 jbd2_journal_update_superblock(journal, 1);
388 } else {
389 jbd_debug(3, "superblock not updated\n");
392 J_ASSERT(journal->j_running_transaction != NULL);
393 J_ASSERT(journal->j_committing_transaction == NULL);
395 commit_transaction = journal->j_running_transaction;
396 J_ASSERT(commit_transaction->t_state == T_RUNNING);
398 trace_jbd2_start_commit(journal, commit_transaction);
399 jbd_debug(1, "JBD: starting commit of transaction %d\n",
400 commit_transaction->t_tid);
402 spin_lock(&journal->j_state_lock);
403 commit_transaction->t_state = T_LOCKED;
406 * Use plugged writes here, since we want to submit several before
407 * we unplug the device. We don't do explicit unplugging in here,
408 * instead we rely on sync_buffer() doing the unplug for us.
410 if (commit_transaction->t_synchronous_commit)
411 write_op = WRITE_SYNC_PLUG;
412 trace_jbd2_commit_locking(journal, commit_transaction);
413 stats.run.rs_wait = commit_transaction->t_max_wait;
414 stats.run.rs_locked = jiffies;
415 stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
416 stats.run.rs_locked);
418 spin_lock(&commit_transaction->t_handle_lock);
419 while (commit_transaction->t_updates) {
420 DEFINE_WAIT(wait);
422 prepare_to_wait(&journal->j_wait_updates, &wait,
423 TASK_UNINTERRUPTIBLE);
424 if (commit_transaction->t_updates) {
425 spin_unlock(&commit_transaction->t_handle_lock);
426 spin_unlock(&journal->j_state_lock);
427 schedule();
428 spin_lock(&journal->j_state_lock);
429 spin_lock(&commit_transaction->t_handle_lock);
431 finish_wait(&journal->j_wait_updates, &wait);
433 spin_unlock(&commit_transaction->t_handle_lock);
435 J_ASSERT (commit_transaction->t_outstanding_credits <=
436 journal->j_max_transaction_buffers);
439 * First thing we are allowed to do is to discard any remaining
440 * BJ_Reserved buffers. Note, it is _not_ permissible to assume
441 * that there are no such buffers: if a large filesystem
442 * operation like a truncate needs to split itself over multiple
443 * transactions, then it may try to do a jbd2_journal_restart() while
444 * there are still BJ_Reserved buffers outstanding. These must
445 * be released cleanly from the current transaction.
447 * In this case, the filesystem must still reserve write access
448 * again before modifying the buffer in the new transaction, but
449 * we do not require it to remember exactly which old buffers it
450 * has reserved. This is consistent with the existing behaviour
451 * that multiple jbd2_journal_get_write_access() calls to the same
452 * buffer are perfectly permissable.
454 while (commit_transaction->t_reserved_list) {
455 jh = commit_transaction->t_reserved_list;
456 JBUFFER_TRACE(jh, "reserved, unused: refile");
458 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
459 * leave undo-committed data.
461 if (jh->b_committed_data) {
462 struct buffer_head *bh = jh2bh(jh);
464 jbd_lock_bh_state(bh);
465 jbd2_free(jh->b_committed_data, bh->b_size);
466 jh->b_committed_data = NULL;
467 jbd_unlock_bh_state(bh);
469 jbd2_journal_refile_buffer(journal, jh);
473 * Now try to drop any written-back buffers from the journal's
474 * checkpoint lists. We do this *before* commit because it potentially
475 * frees some memory
477 spin_lock(&journal->j_list_lock);
478 __jbd2_journal_clean_checkpoint_list(journal);
479 spin_unlock(&journal->j_list_lock);
481 jbd_debug (3, "JBD: commit phase 1\n");
484 * Switch to a new revoke table.
486 jbd2_journal_switch_revoke_table(journal);
488 trace_jbd2_commit_flushing(journal, commit_transaction);
489 stats.run.rs_flushing = jiffies;
490 stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
491 stats.run.rs_flushing);
493 commit_transaction->t_state = T_FLUSH;
494 journal->j_committing_transaction = commit_transaction;
495 journal->j_running_transaction = NULL;
496 start_time = ktime_get();
497 commit_transaction->t_log_start = journal->j_head;
498 wake_up(&journal->j_wait_transaction_locked);
499 spin_unlock(&journal->j_state_lock);
501 jbd_debug (3, "JBD: commit phase 2\n");
504 * Now start flushing things to disk, in the order they appear
505 * on the transaction lists. Data blocks go first.
507 err = journal_submit_data_buffers(journal, commit_transaction);
508 if (err)
509 jbd2_journal_abort(journal, err);
511 jbd2_journal_write_revoke_records(journal, commit_transaction,
512 write_op);
514 jbd_debug(3, "JBD: commit phase 2\n");
517 * Way to go: we have now written out all of the data for a
518 * transaction! Now comes the tricky part: we need to write out
519 * metadata. Loop over the transaction's entire buffer list:
521 spin_lock(&journal->j_state_lock);
522 commit_transaction->t_state = T_COMMIT;
523 spin_unlock(&journal->j_state_lock);
525 trace_jbd2_commit_logging(journal, commit_transaction);
526 stats.run.rs_logging = jiffies;
527 stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
528 stats.run.rs_logging);
529 stats.run.rs_blocks = commit_transaction->t_outstanding_credits;
530 stats.run.rs_blocks_logged = 0;
532 J_ASSERT(commit_transaction->t_nr_buffers <=
533 commit_transaction->t_outstanding_credits);
535 err = 0;
536 descriptor = NULL;
537 bufs = 0;
538 while (commit_transaction->t_buffers) {
540 /* Find the next buffer to be journaled... */
542 jh = commit_transaction->t_buffers;
544 /* If we're in abort mode, we just un-journal the buffer and
545 release it. */
547 if (is_journal_aborted(journal)) {
548 clear_buffer_jbddirty(jh2bh(jh));
549 JBUFFER_TRACE(jh, "journal is aborting: refile");
550 jbd2_buffer_abort_trigger(jh,
551 jh->b_frozen_data ?
552 jh->b_frozen_triggers :
553 jh->b_triggers);
554 jbd2_journal_refile_buffer(journal, jh);
555 /* If that was the last one, we need to clean up
556 * any descriptor buffers which may have been
557 * already allocated, even if we are now
558 * aborting. */
559 if (!commit_transaction->t_buffers)
560 goto start_journal_io;
561 continue;
564 /* Make sure we have a descriptor block in which to
565 record the metadata buffer. */
567 if (!descriptor) {
568 struct buffer_head *bh;
570 J_ASSERT (bufs == 0);
572 jbd_debug(4, "JBD: get descriptor\n");
574 descriptor = jbd2_journal_get_descriptor_buffer(journal);
575 if (!descriptor) {
576 jbd2_journal_abort(journal, -EIO);
577 continue;
580 bh = jh2bh(descriptor);
581 jbd_debug(4, "JBD: got buffer %llu (%p)\n",
582 (unsigned long long)bh->b_blocknr, bh->b_data);
583 header = (journal_header_t *)&bh->b_data[0];
584 header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
585 header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
586 header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
588 tagp = &bh->b_data[sizeof(journal_header_t)];
589 space_left = bh->b_size - sizeof(journal_header_t);
590 first_tag = 1;
591 set_buffer_jwrite(bh);
592 set_buffer_dirty(bh);
593 wbuf[bufs++] = bh;
595 /* Record it so that we can wait for IO
596 completion later */
597 BUFFER_TRACE(bh, "ph3: file as descriptor");
598 jbd2_journal_file_buffer(descriptor, commit_transaction,
599 BJ_LogCtl);
602 /* Where is the buffer to be written? */
604 err = jbd2_journal_next_log_block(journal, &blocknr);
605 /* If the block mapping failed, just abandon the buffer
606 and repeat this loop: we'll fall into the
607 refile-on-abort condition above. */
608 if (err) {
609 jbd2_journal_abort(journal, err);
610 continue;
614 * start_this_handle() uses t_outstanding_credits to determine
615 * the free space in the log, but this counter is changed
616 * by jbd2_journal_next_log_block() also.
618 commit_transaction->t_outstanding_credits--;
620 /* Bump b_count to prevent truncate from stumbling over
621 the shadowed buffer! @@@ This can go if we ever get
622 rid of the BJ_IO/BJ_Shadow pairing of buffers. */
623 atomic_inc(&jh2bh(jh)->b_count);
625 /* Make a temporary IO buffer with which to write it out
626 (this will requeue both the metadata buffer and the
627 temporary IO buffer). new_bh goes on BJ_IO*/
629 set_bit(BH_JWrite, &jh2bh(jh)->b_state);
631 * akpm: jbd2_journal_write_metadata_buffer() sets
632 * new_bh->b_transaction to commit_transaction.
633 * We need to clean this up before we release new_bh
634 * (which is of type BJ_IO)
636 JBUFFER_TRACE(jh, "ph3: write metadata");
637 flags = jbd2_journal_write_metadata_buffer(commit_transaction,
638 jh, &new_jh, blocknr);
639 set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
640 wbuf[bufs++] = jh2bh(new_jh);
642 /* Record the new block's tag in the current descriptor
643 buffer */
645 tag_flag = 0;
646 if (flags & 1)
647 tag_flag |= JBD2_FLAG_ESCAPE;
648 if (!first_tag)
649 tag_flag |= JBD2_FLAG_SAME_UUID;
651 tag = (journal_block_tag_t *) tagp;
652 write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
653 tag->t_flags = cpu_to_be32(tag_flag);
654 tagp += tag_bytes;
655 space_left -= tag_bytes;
657 if (first_tag) {
658 memcpy (tagp, journal->j_uuid, 16);
659 tagp += 16;
660 space_left -= 16;
661 first_tag = 0;
664 /* If there's no more to do, or if the descriptor is full,
665 let the IO rip! */
667 if (bufs == journal->j_wbufsize ||
668 commit_transaction->t_buffers == NULL ||
669 space_left < tag_bytes + 16) {
671 jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
673 /* Write an end-of-descriptor marker before
674 submitting the IOs. "tag" still points to
675 the last tag we set up. */
677 tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
679 start_journal_io:
680 for (i = 0; i < bufs; i++) {
681 struct buffer_head *bh = wbuf[i];
683 * Compute checksum.
685 if (JBD2_HAS_COMPAT_FEATURE(journal,
686 JBD2_FEATURE_COMPAT_CHECKSUM)) {
687 crc32_sum =
688 jbd2_checksum_data(crc32_sum, bh);
691 lock_buffer(bh);
692 clear_buffer_dirty(bh);
693 set_buffer_uptodate(bh);
694 bh->b_end_io = journal_end_buffer_io_sync;
695 submit_bh(write_op, bh);
697 cond_resched();
698 stats.run.rs_blocks_logged += bufs;
700 /* Force a new descriptor to be generated next
701 time round the loop. */
702 descriptor = NULL;
703 bufs = 0;
707 /* Done it all: now write the commit record asynchronously. */
709 if (JBD2_HAS_INCOMPAT_FEATURE(journal,
710 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
711 err = journal_submit_commit_record(journal, commit_transaction,
712 &cbh, crc32_sum);
713 if (err)
714 __jbd2_journal_abort_hard(journal);
715 if (journal->j_flags & JBD2_BARRIER)
716 blkdev_issue_flush(journal->j_dev, NULL);
720 * This is the right place to wait for data buffers both for ASYNC
721 * and !ASYNC commit. If commit is ASYNC, we need to wait only after
722 * the commit block went to disk (which happens above). If commit is
723 * SYNC, we need to wait for data buffers before we start writing
724 * commit block, which happens below in such setting.
726 err = journal_finish_inode_data_buffers(journal, commit_transaction);
727 if (err) {
728 printk(KERN_WARNING
729 "JBD2: Detected IO errors while flushing file data "
730 "on %s\n", journal->j_devname);
731 if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
732 jbd2_journal_abort(journal, err);
733 err = 0;
736 /* Lo and behold: we have just managed to send a transaction to
737 the log. Before we can commit it, wait for the IO so far to
738 complete. Control buffers being written are on the
739 transaction's t_log_list queue, and metadata buffers are on
740 the t_iobuf_list queue.
742 Wait for the buffers in reverse order. That way we are
743 less likely to be woken up until all IOs have completed, and
744 so we incur less scheduling load.
747 jbd_debug(3, "JBD: commit phase 3\n");
750 * akpm: these are BJ_IO, and j_list_lock is not needed.
751 * See __journal_try_to_free_buffer.
753 wait_for_iobuf:
754 while (commit_transaction->t_iobuf_list != NULL) {
755 struct buffer_head *bh;
757 jh = commit_transaction->t_iobuf_list->b_tprev;
758 bh = jh2bh(jh);
759 if (buffer_locked(bh)) {
760 wait_on_buffer(bh);
761 goto wait_for_iobuf;
763 if (cond_resched())
764 goto wait_for_iobuf;
766 if (unlikely(!buffer_uptodate(bh)))
767 err = -EIO;
769 clear_buffer_jwrite(bh);
771 JBUFFER_TRACE(jh, "ph4: unfile after journal write");
772 jbd2_journal_unfile_buffer(journal, jh);
775 * ->t_iobuf_list should contain only dummy buffer_heads
776 * which were created by jbd2_journal_write_metadata_buffer().
778 BUFFER_TRACE(bh, "dumping temporary bh");
779 jbd2_journal_put_journal_head(jh);
780 __brelse(bh);
781 J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
782 free_buffer_head(bh);
784 /* We also have to unlock and free the corresponding
785 shadowed buffer */
786 jh = commit_transaction->t_shadow_list->b_tprev;
787 bh = jh2bh(jh);
788 clear_bit(BH_JWrite, &bh->b_state);
789 J_ASSERT_BH(bh, buffer_jbddirty(bh));
791 /* The metadata is now released for reuse, but we need
792 to remember it against this transaction so that when
793 we finally commit, we can do any checkpointing
794 required. */
795 JBUFFER_TRACE(jh, "file as BJ_Forget");
796 jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
797 /* Wake up any transactions which were waiting for this
798 IO to complete */
799 wake_up_bit(&bh->b_state, BH_Unshadow);
800 JBUFFER_TRACE(jh, "brelse shadowed buffer");
801 __brelse(bh);
804 J_ASSERT (commit_transaction->t_shadow_list == NULL);
806 jbd_debug(3, "JBD: commit phase 4\n");
808 /* Here we wait for the revoke record and descriptor record buffers */
809 wait_for_ctlbuf:
810 while (commit_transaction->t_log_list != NULL) {
811 struct buffer_head *bh;
813 jh = commit_transaction->t_log_list->b_tprev;
814 bh = jh2bh(jh);
815 if (buffer_locked(bh)) {
816 wait_on_buffer(bh);
817 goto wait_for_ctlbuf;
819 if (cond_resched())
820 goto wait_for_ctlbuf;
822 if (unlikely(!buffer_uptodate(bh)))
823 err = -EIO;
825 BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
826 clear_buffer_jwrite(bh);
827 jbd2_journal_unfile_buffer(journal, jh);
828 jbd2_journal_put_journal_head(jh);
829 __brelse(bh); /* One for getblk */
830 /* AKPM: bforget here */
833 if (err)
834 jbd2_journal_abort(journal, err);
836 jbd_debug(3, "JBD: commit phase 5\n");
838 if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
839 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
840 err = journal_submit_commit_record(journal, commit_transaction,
841 &cbh, crc32_sum);
842 if (err)
843 __jbd2_journal_abort_hard(journal);
845 if (!err && !is_journal_aborted(journal))
846 err = journal_wait_on_commit_record(journal, cbh);
848 if (err)
849 jbd2_journal_abort(journal, err);
851 /* End of a transaction! Finally, we can do checkpoint
852 processing: any buffers committed as a result of this
853 transaction can be removed from any checkpoint list it was on
854 before. */
856 jbd_debug(3, "JBD: commit phase 6\n");
858 J_ASSERT(list_empty(&commit_transaction->t_inode_list));
859 J_ASSERT(commit_transaction->t_buffers == NULL);
860 J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
861 J_ASSERT(commit_transaction->t_iobuf_list == NULL);
862 J_ASSERT(commit_transaction->t_shadow_list == NULL);
863 J_ASSERT(commit_transaction->t_log_list == NULL);
865 restart_loop:
867 * As there are other places (journal_unmap_buffer()) adding buffers
868 * to this list we have to be careful and hold the j_list_lock.
870 spin_lock(&journal->j_list_lock);
871 while (commit_transaction->t_forget) {
872 transaction_t *cp_transaction;
873 struct buffer_head *bh;
875 jh = commit_transaction->t_forget;
876 spin_unlock(&journal->j_list_lock);
877 bh = jh2bh(jh);
878 jbd_lock_bh_state(bh);
879 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction ||
880 jh->b_transaction == journal->j_running_transaction);
883 * If there is undo-protected committed data against
884 * this buffer, then we can remove it now. If it is a
885 * buffer needing such protection, the old frozen_data
886 * field now points to a committed version of the
887 * buffer, so rotate that field to the new committed
888 * data.
890 * Otherwise, we can just throw away the frozen data now.
892 * We also know that the frozen data has already fired
893 * its triggers if they exist, so we can clear that too.
895 if (jh->b_committed_data) {
896 jbd2_free(jh->b_committed_data, bh->b_size);
897 jh->b_committed_data = NULL;
898 if (jh->b_frozen_data) {
899 jh->b_committed_data = jh->b_frozen_data;
900 jh->b_frozen_data = NULL;
901 jh->b_frozen_triggers = NULL;
903 } else if (jh->b_frozen_data) {
904 jbd2_free(jh->b_frozen_data, bh->b_size);
905 jh->b_frozen_data = NULL;
906 jh->b_frozen_triggers = NULL;
909 spin_lock(&journal->j_list_lock);
910 cp_transaction = jh->b_cp_transaction;
911 if (cp_transaction) {
912 JBUFFER_TRACE(jh, "remove from old cp transaction");
913 cp_transaction->t_chp_stats.cs_dropped++;
914 __jbd2_journal_remove_checkpoint(jh);
917 /* Only re-checkpoint the buffer_head if it is marked
918 * dirty. If the buffer was added to the BJ_Forget list
919 * by jbd2_journal_forget, it may no longer be dirty and
920 * there's no point in keeping a checkpoint record for
921 * it. */
923 /* A buffer which has been freed while still being
924 * journaled by a previous transaction may end up still
925 * being dirty here, but we want to avoid writing back
926 * that buffer in the future now that the last use has
927 * been committed. That's not only a performance gain,
928 * it also stops aliasing problems if the buffer is left
929 * behind for writeback and gets reallocated for another
930 * use in a different page. */
931 if (buffer_freed(bh)) {
932 clear_buffer_freed(bh);
933 clear_buffer_jbddirty(bh);
936 if (buffer_jbddirty(bh)) {
937 JBUFFER_TRACE(jh, "add to new checkpointing trans");
938 __jbd2_journal_insert_checkpoint(jh, commit_transaction);
939 if (is_journal_aborted(journal))
940 clear_buffer_jbddirty(bh);
941 JBUFFER_TRACE(jh, "refile for checkpoint writeback");
942 __jbd2_journal_refile_buffer(jh);
943 jbd_unlock_bh_state(bh);
944 } else {
945 J_ASSERT_BH(bh, !buffer_dirty(bh));
946 /* The buffer on BJ_Forget list and not jbddirty means
947 * it has been freed by this transaction and hence it
948 * could not have been reallocated until this
949 * transaction has committed. *BUT* it could be
950 * reallocated once we have written all the data to
951 * disk and before we process the buffer on BJ_Forget
952 * list. */
953 JBUFFER_TRACE(jh, "refile or unfile freed buffer");
954 __jbd2_journal_refile_buffer(jh);
955 if (!jh->b_transaction) {
956 jbd_unlock_bh_state(bh);
957 /* needs a brelse */
958 jbd2_journal_remove_journal_head(bh);
959 release_buffer_page(bh);
960 } else
961 jbd_unlock_bh_state(bh);
963 cond_resched_lock(&journal->j_list_lock);
965 spin_unlock(&journal->j_list_lock);
967 * This is a bit sleazy. We use j_list_lock to protect transition
968 * of a transaction into T_FINISHED state and calling
969 * __jbd2_journal_drop_transaction(). Otherwise we could race with
970 * other checkpointing code processing the transaction...
972 spin_lock(&journal->j_state_lock);
973 spin_lock(&journal->j_list_lock);
975 * Now recheck if some buffers did not get attached to the transaction
976 * while the lock was dropped...
978 if (commit_transaction->t_forget) {
979 spin_unlock(&journal->j_list_lock);
980 spin_unlock(&journal->j_state_lock);
981 goto restart_loop;
984 /* Done with this transaction! */
986 jbd_debug(3, "JBD: commit phase 7\n");
988 J_ASSERT(commit_transaction->t_state == T_COMMIT);
990 commit_transaction->t_start = jiffies;
991 stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
992 commit_transaction->t_start);
995 * File the transaction statistics
997 stats.ts_tid = commit_transaction->t_tid;
998 stats.run.rs_handle_count = commit_transaction->t_handle_count;
999 trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
1000 commit_transaction->t_tid, &stats.run);
1003 * Calculate overall stats
1005 spin_lock(&journal->j_history_lock);
1006 journal->j_stats.ts_tid++;
1007 journal->j_stats.run.rs_wait += stats.run.rs_wait;
1008 journal->j_stats.run.rs_running += stats.run.rs_running;
1009 journal->j_stats.run.rs_locked += stats.run.rs_locked;
1010 journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
1011 journal->j_stats.run.rs_logging += stats.run.rs_logging;
1012 journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
1013 journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
1014 journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1015 spin_unlock(&journal->j_history_lock);
1017 commit_transaction->t_state = T_FINISHED;
1018 J_ASSERT(commit_transaction == journal->j_committing_transaction);
1019 journal->j_commit_sequence = commit_transaction->t_tid;
1020 journal->j_committing_transaction = NULL;
1021 commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1024 * weight the commit time higher than the average time so we don't
1025 * react too strongly to vast changes in the commit time
1027 if (likely(journal->j_average_commit_time))
1028 journal->j_average_commit_time = (commit_time +
1029 journal->j_average_commit_time*3) / 4;
1030 else
1031 journal->j_average_commit_time = commit_time;
1032 spin_unlock(&journal->j_state_lock);
1034 if (commit_transaction->t_checkpoint_list == NULL &&
1035 commit_transaction->t_checkpoint_io_list == NULL) {
1036 __jbd2_journal_drop_transaction(journal, commit_transaction);
1037 to_free = 1;
1038 } else {
1039 if (journal->j_checkpoint_transactions == NULL) {
1040 journal->j_checkpoint_transactions = commit_transaction;
1041 commit_transaction->t_cpnext = commit_transaction;
1042 commit_transaction->t_cpprev = commit_transaction;
1043 } else {
1044 commit_transaction->t_cpnext =
1045 journal->j_checkpoint_transactions;
1046 commit_transaction->t_cpprev =
1047 commit_transaction->t_cpnext->t_cpprev;
1048 commit_transaction->t_cpnext->t_cpprev =
1049 commit_transaction;
1050 commit_transaction->t_cpprev->t_cpnext =
1051 commit_transaction;
1054 spin_unlock(&journal->j_list_lock);
1056 if (journal->j_commit_callback)
1057 journal->j_commit_callback(journal, commit_transaction);
1059 trace_jbd2_end_commit(journal, commit_transaction);
1060 jbd_debug(1, "JBD: commit %d complete, head %d\n",
1061 journal->j_commit_sequence, journal->j_tail_sequence);
1062 if (to_free)
1063 kfree(commit_transaction);
1065 wake_up(&journal->j_wait_done_commit);