1 jbd2: fold __process_buffer() into jbd2_log_do_checkpoint()
3 __process_buffer() is only called by jbd2_log_do_checkpoint(), and it
4 had a very complex locking protocol where it would be called with the
5 j_list_lock, and sometimes exit with the lock held (if the return code
6 was 0), or release the lock.
8 This was confusing both to humans and to smatch (which erronously
9 complained that the lock was taken twice).
11 Folding __process_buffer() to the caller allows us to simplify the
12 control flow, making the resulting function easier to read and reason
13 about, and dropping the compiled size of fs/jbd2/checkpoint.c by 150
14 bytes (over 4% of the text size).
16 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
17 Reviewed-by: Jan Kara <jack@suse.cz>
19 fs/jbd2/checkpoint.c | 195 ++++++++++++++++++++++-----------------------------
20 1 file changed, 84 insertions(+), 111 deletions(-)
22 diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
23 index 7f34f47..993a187 100644
24 --- a/fs/jbd2/checkpoint.c
25 +++ b/fs/jbd2/checkpoint.c
26 @@ -255,81 +255,6 @@ __flush_batch(journal_t *journal, int *batch_count)
30 - * Try to flush one buffer from the checkpoint list to disk.
32 - * Return 1 if something happened which requires us to abort the current
33 - * scan of the checkpoint list. Return <0 if the buffer has failed to
36 - * Called with j_list_lock held and drops it if 1 is returned
38 -static int __process_buffer(journal_t *journal, struct journal_head *jh,
39 - int *batch_count, transaction_t *transaction)
41 - struct buffer_head *bh = jh2bh(jh);
44 - if (buffer_locked(bh)) {
46 - spin_unlock(&journal->j_list_lock);
48 - /* the journal_head may have gone by now */
49 - BUFFER_TRACE(bh, "brelse");
52 - } else if (jh->b_transaction != NULL) {
53 - transaction_t *t = jh->b_transaction;
54 - tid_t tid = t->t_tid;
56 - transaction->t_chp_stats.cs_forced_to_close++;
57 - spin_unlock(&journal->j_list_lock);
58 - if (unlikely(journal->j_flags & JBD2_UNMOUNT))
60 - * The journal thread is dead; so starting and
61 - * waiting for a commit to finish will cause
62 - * us to wait for a _very_ long time.
64 - printk(KERN_ERR "JBD2: %s: "
65 - "Waiting for Godot: block %llu\n",
67 - (unsigned long long) bh->b_blocknr);
68 - jbd2_log_start_commit(journal, tid);
69 - jbd2_log_wait_commit(journal, tid);
71 - } else if (!buffer_dirty(bh)) {
73 - if (unlikely(buffer_write_io_error(bh)))
76 - BUFFER_TRACE(bh, "remove from checkpoint");
77 - __jbd2_journal_remove_checkpoint(jh);
78 - spin_unlock(&journal->j_list_lock);
82 - * Important: we are about to write the buffer, and
83 - * possibly block, while still holding the journal lock.
84 - * We cannot afford to let the transaction logic start
85 - * messing around with this buffer before we write it to
86 - * disk, as that would break recoverability.
88 - BUFFER_TRACE(bh, "queue");
90 - J_ASSERT_BH(bh, !buffer_jwrite(bh));
91 - journal->j_chkpt_bhs[*batch_count] = bh;
92 - __buffer_relink_io(jh);
93 - transaction->t_chp_stats.cs_written++;
95 - if (*batch_count == JBD2_NR_BATCH) {
96 - spin_unlock(&journal->j_list_lock);
97 - __flush_batch(journal, batch_count);
105 * Perform an actual checkpoint. We take the first transaction on the
106 * list of transactions to be checkpointed and send all its buffers
107 * to disk. We submit larger chunks of data at once.
108 @@ -339,9 +264,11 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
110 int jbd2_log_do_checkpoint(journal_t *journal)
112 - transaction_t *transaction;
115 + struct journal_head *jh;
116 + struct buffer_head *bh;
117 + transaction_t *transaction;
119 + int err, result, batch_count = 0;
121 jbd_debug(1, "Start checkpoint\n");
123 @@ -374,46 +301,92 @@ restart:
124 * done (maybe it's a new transaction, but it fell at the same
127 - if (journal->j_checkpoint_transactions == transaction &&
128 - transaction->t_tid == this_tid) {
129 - int batch_count = 0;
130 - struct journal_head *jh;
131 - int retry = 0, err;
133 - while (!retry && transaction->t_checkpoint_list) {
134 - jh = transaction->t_checkpoint_list;
135 - retry = __process_buffer(journal, jh, &batch_count,
137 - if (retry < 0 && !result)
139 - if (!retry && (need_resched() ||
140 - spin_needbreak(&journal->j_list_lock))) {
141 - spin_unlock(&journal->j_list_lock);
146 + if (journal->j_checkpoint_transactions != transaction ||
147 + transaction->t_tid != this_tid)
152 - spin_unlock(&journal->j_list_lock);
155 - __flush_batch(journal, &batch_count);
156 + /* checkpoint all of the transaction's buffers */
157 + while (transaction->t_checkpoint_list) {
158 + jh = transaction->t_checkpoint_list;
161 + if (buffer_locked(bh)) {
163 + spin_unlock(&journal->j_list_lock);
164 + wait_on_buffer(bh);
165 + /* the journal_head may have gone by now */
166 + BUFFER_TRACE(bh, "brelse");
170 + if (jh->b_transaction != NULL) {
171 + transaction_t *t = jh->b_transaction;
172 + tid_t tid = t->t_tid;
175 - spin_lock(&journal->j_list_lock);
177 + transaction->t_chp_stats.cs_forced_to_close++;
178 + spin_unlock(&journal->j_list_lock);
179 + if (unlikely(journal->j_flags & JBD2_UNMOUNT))
181 + * The journal thread is dead; so
182 + * starting and waiting for a commit
183 + * to finish will cause us to wait for
184 + * a _very_ long time.
187 + "JBD2: %s: Waiting for Godot: block %llu\n",
188 + journal->j_devname, (unsigned long long) bh->b_blocknr);
190 + jbd2_log_start_commit(journal, tid);
191 + jbd2_log_wait_commit(journal, tid);
194 + if (!buffer_dirty(bh)) {
195 + if (unlikely(buffer_write_io_error(bh)) && !result)
198 + BUFFER_TRACE(bh, "remove from checkpoint");
199 + __jbd2_journal_remove_checkpoint(jh);
200 + spin_unlock(&journal->j_list_lock);
205 - * Now we have cleaned up the first transaction's checkpoint
206 - * list. Let's clean up the second one
207 + * Important: we are about to write the buffer, and
208 + * possibly block, while still holding the journal
209 + * lock. We cannot afford to let the transaction
210 + * logic start messing around with this buffer before
211 + * we write it to disk, as that would break
214 - err = __wait_cp_io(journal, transaction);
217 + BUFFER_TRACE(bh, "queue");
219 + J_ASSERT_BH(bh, !buffer_jwrite(bh));
220 + journal->j_chkpt_bhs[batch_count++] = bh;
221 + __buffer_relink_io(jh);
222 + transaction->t_chp_stats.cs_written++;
223 + if ((batch_count == JBD2_NR_BATCH) ||
225 + spin_needbreak(&journal->j_list_lock))
226 + goto unlock_and_flush;
231 + spin_unlock(&journal->j_list_lock);
234 + __flush_batch(journal, &batch_count);
235 + spin_lock(&journal->j_list_lock);
240 + * Now we issued all of the transaction's buffers, let's deal
241 + * with the buffers that are out for I/O.
243 + err = __wait_cp_io(journal, transaction);
247 spin_unlock(&journal->j_list_lock);