Fix random whitespace issues and other checkpatch flameage. Also made
[ext4-patch-queue.git] / jbd2-Remove-data-ordered-mode-support-using-jbd-buf.patch
blob63b44d65efa831e7a122b7f8fa13b04b22c247f2
1 jbd2: Remove data=ordered mode support using jbd buffer heads
3 From: Jan Kara <jack@suse.cz>
5 Signed-off-by: Jan Kara <jack@suse.cz>
6 ---
7 fs/jbd2/checkpoint.c | 1
8 fs/jbd2/commit.c | 221 +-------------------------------------------------
9 fs/jbd2/journal.c | 1
10 fs/jbd2/transaction.c | 217 +------------------------------------------------
11 include/linux/jbd2.h | 29 +-----
12 5 files changed, 21 insertions(+), 448 deletions(-)
14 Index: linux-2.6.26-rc6/fs/jbd2/checkpoint.c
15 ===================================================================
16 --- linux-2.6.26-rc6.orig/fs/jbd2/checkpoint.c 2008-06-17 10:43:16.000000000 -0700
17 +++ linux-2.6.26-rc6/fs/jbd2/checkpoint.c 2008-06-17 10:43:37.000000000 -0700
18 @@ -688,7 +688,6 @@ void __jbd2_journal_drop_transaction(jou
20 J_ASSERT(transaction->t_state == T_FINISHED);
21 J_ASSERT(transaction->t_buffers == NULL);
22 - J_ASSERT(transaction->t_sync_datalist == NULL);
23 J_ASSERT(transaction->t_forget == NULL);
24 J_ASSERT(transaction->t_iobuf_list == NULL);
25 J_ASSERT(transaction->t_shadow_list == NULL);
26 Index: linux-2.6.26-rc6/fs/jbd2/commit.c
27 ===================================================================
28 --- linux-2.6.26-rc6.orig/fs/jbd2/commit.c 2008-06-17 10:43:35.000000000 -0700
29 +++ linux-2.6.26-rc6/fs/jbd2/commit.c 2008-06-17 10:43:37.000000000 -0700
30 @@ -37,8 +37,8 @@ static void journal_end_buffer_io_sync(s
34 - * When an ext3-ordered file is truncated, it is possible that many pages are
35 - * not sucessfully freed, because they are attached to a committing transaction.
36 + * When an ext4 file is truncated, it is possible that some pages are not
37 + * successfully freed, because they are attached to a committing transaction.
38 * After the transaction commits, these pages are left on the LRU, with no
39 * ->mapping, and with attached buffers. These pages are trivially reclaimable
40 * by the VM, but their apparent absence upsets the VM accounting, and it makes
41 @@ -80,21 +80,6 @@ nope:
45 - * Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is
46 - * held. For ranking reasons we must trylock. If we lose, schedule away and
47 - * return 0. j_list_lock is dropped in this case.
48 - */
49 -static int inverted_lock(journal_t *journal, struct buffer_head *bh)
51 - if (!jbd_trylock_bh_state(bh)) {
52 - spin_unlock(&journal->j_list_lock);
53 - schedule();
54 - return 0;
55 - }
56 - return 1;
59 -/*
60 * Done it all: now submit the commit record. We should have
61 * cleaned up our previous buffers by now, so if we are in abort
62 * mode we can now just skip the rest of the journal write
63 @@ -200,162 +185,6 @@ static int journal_wait_on_commit_record
67 - * Wait for all submitted IO to complete.
68 - */
69 -static int journal_wait_on_locked_list(journal_t *journal,
70 - transaction_t *commit_transaction)
72 - int ret = 0;
73 - struct journal_head *jh;
75 - while (commit_transaction->t_locked_list) {
76 - struct buffer_head *bh;
78 - jh = commit_transaction->t_locked_list->b_tprev;
79 - bh = jh2bh(jh);
80 - get_bh(bh);
81 - if (buffer_locked(bh)) {
82 - spin_unlock(&journal->j_list_lock);
83 - wait_on_buffer(bh);
84 - if (unlikely(!buffer_uptodate(bh)))
85 - ret = -EIO;
86 - spin_lock(&journal->j_list_lock);
87 - }
88 - if (!inverted_lock(journal, bh)) {
89 - put_bh(bh);
90 - spin_lock(&journal->j_list_lock);
91 - continue;
92 - }
93 - if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) {
94 - __jbd2_journal_unfile_buffer(jh);
95 - jbd_unlock_bh_state(bh);
96 - jbd2_journal_remove_journal_head(bh);
97 - put_bh(bh);
98 - } else {
99 - jbd_unlock_bh_state(bh);
101 - put_bh(bh);
102 - cond_resched_lock(&journal->j_list_lock);
104 - return ret;
107 -static void journal_do_submit_data(struct buffer_head **wbuf, int bufs)
109 - int i;
111 - for (i = 0; i < bufs; i++) {
112 - wbuf[i]->b_end_io = end_buffer_write_sync;
113 - /* We use-up our safety reference in submit_bh() */
114 - submit_bh(WRITE, wbuf[i]);
119 - * Submit all the data buffers to disk
120 - */
121 -static void journal_submit_data_buffers(journal_t *journal,
122 - transaction_t *commit_transaction)
124 - struct journal_head *jh;
125 - struct buffer_head *bh;
126 - int locked;
127 - int bufs = 0;
128 - struct buffer_head **wbuf = journal->j_wbuf;
130 - /*
131 - * Whenever we unlock the journal and sleep, things can get added
132 - * onto ->t_sync_datalist, so we have to keep looping back to
133 - * write_out_data until we *know* that the list is empty.
135 - * Cleanup any flushed data buffers from the data list. Even in
136 - * abort mode, we want to flush this out as soon as possible.
137 - */
138 -write_out_data:
139 - cond_resched();
140 - spin_lock(&journal->j_list_lock);
142 - while (commit_transaction->t_sync_datalist) {
143 - jh = commit_transaction->t_sync_datalist;
144 - bh = jh2bh(jh);
145 - locked = 0;
147 - /* Get reference just to make sure buffer does not disappear
148 - * when we are forced to drop various locks */
149 - get_bh(bh);
150 - /* If the buffer is dirty, we need to submit IO and hence
151 - * we need the buffer lock. We try to lock the buffer without
152 - * blocking. If we fail, we need to drop j_list_lock and do
153 - * blocking lock_buffer().
154 - */
155 - if (buffer_dirty(bh)) {
156 - if (test_set_buffer_locked(bh)) {
157 - BUFFER_TRACE(bh, "needs blocking lock");
158 - spin_unlock(&journal->j_list_lock);
159 - /* Write out all data to prevent deadlocks */
160 - journal_do_submit_data(wbuf, bufs);
161 - bufs = 0;
162 - lock_buffer(bh);
163 - spin_lock(&journal->j_list_lock);
165 - locked = 1;
167 - /* We have to get bh_state lock. Again out of order, sigh. */
168 - if (!inverted_lock(journal, bh)) {
169 - jbd_lock_bh_state(bh);
170 - spin_lock(&journal->j_list_lock);
172 - /* Someone already cleaned up the buffer? */
173 - if (!buffer_jbd(bh)
174 - || jh->b_transaction != commit_transaction
175 - || jh->b_jlist != BJ_SyncData) {
176 - jbd_unlock_bh_state(bh);
177 - if (locked)
178 - unlock_buffer(bh);
179 - BUFFER_TRACE(bh, "already cleaned up");
180 - put_bh(bh);
181 - continue;
183 - if (locked && test_clear_buffer_dirty(bh)) {
184 - BUFFER_TRACE(bh, "needs writeout, adding to array");
185 - wbuf[bufs++] = bh;
186 - __jbd2_journal_file_buffer(jh, commit_transaction,
187 - BJ_Locked);
188 - jbd_unlock_bh_state(bh);
189 - if (bufs == journal->j_wbufsize) {
190 - spin_unlock(&journal->j_list_lock);
191 - journal_do_submit_data(wbuf, bufs);
192 - bufs = 0;
193 - goto write_out_data;
195 - } else if (!locked && buffer_locked(bh)) {
196 - __jbd2_journal_file_buffer(jh, commit_transaction,
197 - BJ_Locked);
198 - jbd_unlock_bh_state(bh);
199 - put_bh(bh);
200 - } else {
201 - BUFFER_TRACE(bh, "writeout complete: unfile");
202 - __jbd2_journal_unfile_buffer(jh);
203 - jbd_unlock_bh_state(bh);
204 - if (locked)
205 - unlock_buffer(bh);
206 - jbd2_journal_remove_journal_head(bh);
207 - /* Once for our safety reference, once for
208 - * jbd2_journal_remove_journal_head() */
209 - put_bh(bh);
210 - put_bh(bh);
213 - if (need_resched() || spin_needbreak(&journal->j_list_lock)) {
214 - spin_unlock(&journal->j_list_lock);
215 - goto write_out_data;
218 - spin_unlock(&journal->j_list_lock);
219 - journal_do_submit_data(wbuf, bufs);
223 * Submit all the data buffers of inode associated with the transaction to
224 * disk.
226 @@ -602,42 +431,15 @@ void jbd2_journal_commit_transaction(jou
227 * Now start flushing things to disk, in the order they appear
228 * on the transaction lists. Data blocks go first.
230 - err = 0;
231 - journal_submit_data_buffers(journal, commit_transaction);
232 err = journal_submit_inode_data_buffers(journal, commit_transaction);
233 if (err)
234 jbd2_journal_abort(journal, err);
236 - /*
237 - * Wait for all previously submitted IO to complete if commit
238 - * record is to be written synchronously.
239 - */
240 - spin_lock(&journal->j_list_lock);
241 - if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
242 - JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
243 - err = journal_wait_on_locked_list(journal,
244 - commit_transaction);
246 - spin_unlock(&journal->j_list_lock);
248 - if (err)
249 - jbd2_journal_abort(journal, err);
251 jbd2_journal_write_revoke_records(journal, commit_transaction);
253 jbd_debug(3, "JBD: commit phase 2\n");
256 - * If we found any dirty or locked buffers, then we should have
257 - * looped back up to the write_out_data label. If there weren't
258 - * any then journal_clean_data_list should have wiped the list
259 - * clean by now, so check that it is in fact empty.
260 - */
261 - J_ASSERT (commit_transaction->t_sync_datalist == NULL);
263 - jbd_debug (3, "JBD: commit phase 3\n");
265 - /*
266 * Way to go: we have now written out all of the data for a
267 * transaction! Now comes the tricky part: we need to write out
268 * metadata. Loop over the transaction's entire buffer list:
269 @@ -655,6 +457,7 @@ void jbd2_journal_commit_transaction(jou
270 J_ASSERT(commit_transaction->t_nr_buffers <=
271 commit_transaction->t_outstanding_credits);
273 + err = 0;
274 descriptor = NULL;
275 bufs = 0;
276 while (commit_transaction->t_buffers) {
277 @@ -829,13 +632,6 @@ start_journal_io:
278 &cbh, crc32_sum);
279 if (err)
280 __jbd2_journal_abort_hard(journal);
282 - spin_lock(&journal->j_list_lock);
283 - err = journal_wait_on_locked_list(journal,
284 - commit_transaction);
285 - spin_unlock(&journal->j_list_lock);
286 - if (err)
287 - __jbd2_journal_abort_hard(journal);
291 @@ -860,7 +656,7 @@ start_journal_io:
292 so we incur less scheduling load.
295 - jbd_debug(3, "JBD: commit phase 4\n");
296 + jbd_debug(3, "JBD: commit phase 3\n");
299 * akpm: these are BJ_IO, and j_list_lock is not needed.
300 @@ -919,7 +715,7 @@ wait_for_iobuf:
302 J_ASSERT (commit_transaction->t_shadow_list == NULL);
304 - jbd_debug(3, "JBD: commit phase 5\n");
305 + jbd_debug(3, "JBD: commit phase 4\n");
307 /* Here we wait for the revoke record and descriptor record buffers */
308 wait_for_ctlbuf:
309 @@ -946,7 +742,7 @@ wait_for_iobuf:
310 /* AKPM: bforget here */
313 - jbd_debug(3, "JBD: commit phase 6\n");
314 + jbd_debug(3, "JBD: commit phase 5\n");
316 if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
317 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
318 @@ -966,9 +762,8 @@ wait_for_iobuf:
319 transaction can be removed from any checkpoint list it was on
320 before. */
322 - jbd_debug(3, "JBD: commit phase 7\n");
323 + jbd_debug(3, "JBD: commit phase 6\n");
325 - J_ASSERT(commit_transaction->t_sync_datalist == NULL);
326 J_ASSERT(list_empty(&commit_transaction->t_inode_list));
327 J_ASSERT(commit_transaction->t_buffers == NULL);
328 J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
329 @@ -1090,7 +885,7 @@ restart_loop:
331 /* Done with this transaction! */
333 - jbd_debug(3, "JBD: commit phase 8\n");
334 + jbd_debug(3, "JBD: commit phase 7\n");
336 J_ASSERT(commit_transaction->t_state == T_COMMIT);
338 Index: linux-2.6.26-rc6/fs/jbd2/journal.c
339 ===================================================================
340 --- linux-2.6.26-rc6.orig/fs/jbd2/journal.c 2008-06-17 10:43:35.000000000 -0700
341 +++ linux-2.6.26-rc6/fs/jbd2/journal.c 2008-06-17 10:43:37.000000000 -0700
342 @@ -50,7 +50,6 @@ EXPORT_SYMBOL(jbd2_journal_unlock_update
343 EXPORT_SYMBOL(jbd2_journal_get_write_access);
344 EXPORT_SYMBOL(jbd2_journal_get_create_access);
345 EXPORT_SYMBOL(jbd2_journal_get_undo_access);
346 -EXPORT_SYMBOL(jbd2_journal_dirty_data);
347 EXPORT_SYMBOL(jbd2_journal_dirty_metadata);
348 EXPORT_SYMBOL(jbd2_journal_release_buffer);
349 EXPORT_SYMBOL(jbd2_journal_forget);
350 Index: linux-2.6.26-rc6/fs/jbd2/transaction.c
351 ===================================================================
352 --- linux-2.6.26-rc6.orig/fs/jbd2/transaction.c 2008-06-17 10:43:35.000000000 -0700
353 +++ linux-2.6.26-rc6/fs/jbd2/transaction.c 2008-06-17 10:43:37.000000000 -0700
354 @@ -943,183 +943,6 @@ out:
358 - * int jbd2_journal_dirty_data() - mark a buffer as containing dirty data which
359 - * needs to be flushed before we can commit the
360 - * current transaction.
361 - * @handle: transaction
362 - * @bh: bufferhead to mark
364 - * The buffer is placed on the transaction's data list and is marked as
365 - * belonging to the transaction.
367 - * Returns error number or 0 on success.
369 - * jbd2_journal_dirty_data() can be called via page_launder->ext3_writepage
370 - * by kswapd.
371 - */
372 -int jbd2_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
374 - journal_t *journal = handle->h_transaction->t_journal;
375 - int need_brelse = 0;
376 - struct journal_head *jh;
378 - if (is_handle_aborted(handle))
379 - return 0;
381 - jh = jbd2_journal_add_journal_head(bh);
382 - JBUFFER_TRACE(jh, "entry");
384 - /*
385 - * The buffer could *already* be dirty. Writeout can start
386 - * at any time.
387 - */
388 - jbd_debug(4, "jh: %p, tid:%d\n", jh, handle->h_transaction->t_tid);
390 - /*
391 - * What if the buffer is already part of a running transaction?
393 - * There are two cases:
394 - * 1) It is part of the current running transaction. Refile it,
395 - * just in case we have allocated it as metadata, deallocated
396 - * it, then reallocated it as data.
397 - * 2) It is part of the previous, still-committing transaction.
398 - * If all we want to do is to guarantee that the buffer will be
399 - * written to disk before this new transaction commits, then
400 - * being sure that the *previous* transaction has this same
401 - * property is sufficient for us! Just leave it on its old
402 - * transaction.
404 - * In case (2), the buffer must not already exist as metadata
405 - * --- that would violate write ordering (a transaction is free
406 - * to write its data at any point, even before the previous
407 - * committing transaction has committed). The caller must
408 - * never, ever allow this to happen: there's nothing we can do
409 - * about it in this layer.
410 - */
411 - jbd_lock_bh_state(bh);
412 - spin_lock(&journal->j_list_lock);
414 - /* Now that we have bh_state locked, are we really still mapped? */
415 - if (!buffer_mapped(bh)) {
416 - JBUFFER_TRACE(jh, "unmapped buffer, bailing out");
417 - goto no_journal;
420 - if (jh->b_transaction) {
421 - JBUFFER_TRACE(jh, "has transaction");
422 - if (jh->b_transaction != handle->h_transaction) {
423 - JBUFFER_TRACE(jh, "belongs to older transaction");
424 - J_ASSERT_JH(jh, jh->b_transaction ==
425 - journal->j_committing_transaction);
427 - /* @@@ IS THIS TRUE ? */
428 - /*
429 - * Not any more. Scenario: someone does a write()
430 - * in data=journal mode. The buffer's transaction has
431 - * moved into commit. Then someone does another
432 - * write() to the file. We do the frozen data copyout
433 - * and set b_next_transaction to point to j_running_t.
434 - * And while we're in that state, someone does a
435 - * writepage() in an attempt to pageout the same area
436 - * of the file via a shared mapping. At present that
437 - * calls jbd2_journal_dirty_data(), and we get right here.
438 - * It may be too late to journal the data. Simply
439 - * falling through to the next test will suffice: the
440 - * data will be dirty and wil be checkpointed. The
441 - * ordering comments in the next comment block still
442 - * apply.
443 - */
444 - //J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
446 - /*
447 - * If we're journalling data, and this buffer was
448 - * subject to a write(), it could be metadata, forget
449 - * or shadow against the committing transaction. Now,
450 - * someone has dirtied the same darn page via a mapping
451 - * and it is being writepage()'d.
452 - * We *could* just steal the page from commit, with some
453 - * fancy locking there. Instead, we just skip it -
454 - * don't tie the page's buffers to the new transaction
455 - * at all.
456 - * Implication: if we crash before the writepage() data
457 - * is written into the filesystem, recovery will replay
458 - * the write() data.
459 - */
460 - if (jh->b_jlist != BJ_None &&
461 - jh->b_jlist != BJ_SyncData &&
462 - jh->b_jlist != BJ_Locked) {
463 - JBUFFER_TRACE(jh, "Not stealing");
464 - goto no_journal;
467 - /*
468 - * This buffer may be undergoing writeout in commit. We
469 - * can't return from here and let the caller dirty it
470 - * again because that can cause the write-out loop in
471 - * commit to never terminate.
472 - */
473 - if (buffer_dirty(bh)) {
474 - get_bh(bh);
475 - spin_unlock(&journal->j_list_lock);
476 - jbd_unlock_bh_state(bh);
477 - need_brelse = 1;
478 - sync_dirty_buffer(bh);
479 - jbd_lock_bh_state(bh);
480 - spin_lock(&journal->j_list_lock);
481 - /* Since we dropped the lock... */
482 - if (!buffer_mapped(bh)) {
483 - JBUFFER_TRACE(jh, "buffer got unmapped");
484 - goto no_journal;
486 - /* The buffer may become locked again at any
487 - time if it is redirtied */
490 - /* journal_clean_data_list() may have got there first */
491 - if (jh->b_transaction != NULL) {
492 - JBUFFER_TRACE(jh, "unfile from commit");
493 - __jbd2_journal_temp_unlink_buffer(jh);
494 - /* It still points to the committing
495 - * transaction; move it to this one so
496 - * that the refile assert checks are
497 - * happy. */
498 - jh->b_transaction = handle->h_transaction;
500 - /* The buffer will be refiled below */
503 - /*
504 - * Special case --- the buffer might actually have been
505 - * allocated and then immediately deallocated in the previous,
506 - * committing transaction, so might still be left on that
507 - * transaction's metadata lists.
508 - */
509 - if (jh->b_jlist != BJ_SyncData && jh->b_jlist != BJ_Locked) {
510 - JBUFFER_TRACE(jh, "not on correct data list: unfile");
511 - J_ASSERT_JH(jh, jh->b_jlist != BJ_Shadow);
512 - __jbd2_journal_temp_unlink_buffer(jh);
513 - jh->b_transaction = handle->h_transaction;
514 - JBUFFER_TRACE(jh, "file as data");
515 - __jbd2_journal_file_buffer(jh, handle->h_transaction,
516 - BJ_SyncData);
518 - } else {
519 - JBUFFER_TRACE(jh, "not on a transaction");
520 - __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_SyncData);
522 -no_journal:
523 - spin_unlock(&journal->j_list_lock);
524 - jbd_unlock_bh_state(bh);
525 - if (need_brelse) {
526 - BUFFER_TRACE(bh, "brelse");
527 - __brelse(bh);
529 - JBUFFER_TRACE(jh, "exit");
530 - jbd2_journal_put_journal_head(jh);
531 - return 0;
534 -/**
535 * int jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata
536 * @handle: transaction to add buffer to.
537 * @bh: buffer to mark
538 @@ -1541,10 +1364,10 @@ __blist_del_buffer(struct journal_head *
539 * Remove a buffer from the appropriate transaction list.
541 * Note that this function can *change* the value of
542 - * bh->b_transaction->t_sync_datalist, t_buffers, t_forget,
543 - * t_iobuf_list, t_shadow_list, t_log_list or t_reserved_list. If the caller
544 - * is holding onto a copy of one of thee pointers, it could go bad.
545 - * Generally the caller needs to re-read the pointer from the transaction_t.
546 + * bh->b_transaction->t_buffers, t_forget, t_iobuf_list, t_shadow_list,
547 + * t_log_list or t_reserved_list. If the caller is holding onto a copy of one
548 + * of these pointers, it could go bad. Generally the caller needs to re-read
549 + * the pointer from the transaction_t.
551 * Called under j_list_lock. The journal may not be locked.
553 @@ -1566,9 +1389,6 @@ void __jbd2_journal_temp_unlink_buffer(s
554 switch (jh->b_jlist) {
555 case BJ_None:
556 return;
557 - case BJ_SyncData:
558 - list = &transaction->t_sync_datalist;
559 - break;
560 case BJ_Metadata:
561 transaction->t_nr_buffers--;
562 J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
563 @@ -1589,9 +1409,6 @@ void __jbd2_journal_temp_unlink_buffer(s
564 case BJ_Reserved:
565 list = &transaction->t_reserved_list;
566 break;
567 - case BJ_Locked:
568 - list = &transaction->t_locked_list;
569 - break;
572 __blist_del_buffer(list, jh);
573 @@ -1634,15 +1451,7 @@ __journal_try_to_free_buffer(journal_t *
574 goto out;
576 spin_lock(&journal->j_list_lock);
577 - if (jh->b_transaction != NULL && jh->b_cp_transaction == NULL) {
578 - if (jh->b_jlist == BJ_SyncData || jh->b_jlist == BJ_Locked) {
579 - /* A written-back ordered data buffer */
580 - JBUFFER_TRACE(jh, "release data");
581 - __jbd2_journal_unfile_buffer(jh);
582 - jbd2_journal_remove_journal_head(bh);
583 - __brelse(bh);
585 - } else if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) {
586 + if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) {
587 /* written-back checkpointed metadata buffer */
588 if (jh->b_jlist == BJ_None) {
589 JBUFFER_TRACE(jh, "remove from checkpoint list");
590 @@ -1878,6 +1687,7 @@ static int journal_unmap_buffer(journal_
591 if (!buffer_jbd(bh))
592 goto zap_buffer_unlocked;
594 + /* OK, we have data buffer in journaled mode */
595 spin_lock(&journal->j_state_lock);
596 jbd_lock_bh_state(bh);
597 spin_lock(&journal->j_list_lock);
598 @@ -1941,15 +1751,6 @@ static int journal_unmap_buffer(journal_
600 } else if (transaction == journal->j_committing_transaction) {
601 JBUFFER_TRACE(jh, "on committing transaction");
602 - if (jh->b_jlist == BJ_Locked) {
603 - /*
604 - * The buffer is on the committing transaction's locked
605 - * list. We have the buffer locked, so I/O has
606 - * completed. So we can nail the buffer now.
607 - */
608 - may_free = __dispose_buffer(jh, transaction);
609 - goto zap_buffer;
612 * If it is committing, we simply cannot touch it. We
613 * can remove it's next_transaction pointer from the
614 @@ -2082,9 +1883,6 @@ void __jbd2_journal_file_buffer(struct j
615 J_ASSERT_JH(jh, !jh->b_committed_data);
616 J_ASSERT_JH(jh, !jh->b_frozen_data);
617 return;
618 - case BJ_SyncData:
619 - list = &transaction->t_sync_datalist;
620 - break;
621 case BJ_Metadata:
622 transaction->t_nr_buffers++;
623 list = &transaction->t_buffers;
624 @@ -2104,9 +1902,6 @@ void __jbd2_journal_file_buffer(struct j
625 case BJ_Reserved:
626 list = &transaction->t_reserved_list;
627 break;
628 - case BJ_Locked:
629 - list = &transaction->t_locked_list;
630 - break;
633 __blist_add_buffer(list, jh);
634 Index: linux-2.6.26-rc6/include/linux/jbd2.h
635 ===================================================================
636 --- linux-2.6.26-rc6.orig/include/linux/jbd2.h 2008-06-17 10:43:35.000000000 -0700
637 +++ linux-2.6.26-rc6/include/linux/jbd2.h 2008-06-17 10:43:37.000000000 -0700
638 @@ -543,24 +543,12 @@ struct transaction_s
639 struct journal_head *t_reserved_list;
642 - * Doubly-linked circular list of all buffers under writeout during
643 - * commit [j_list_lock]
644 - */
645 - struct journal_head *t_locked_list;
647 - /*
648 * Doubly-linked circular list of all metadata buffers owned by this
649 * transaction [j_list_lock]
651 struct journal_head *t_buffers;
654 - * Doubly-linked circular list of all data buffers still to be
655 - * flushed before this transaction can be committed [j_list_lock]
656 - */
657 - struct journal_head *t_sync_datalist;
659 - /*
660 * Doubly-linked circular list of all forget buffers (superseded
661 * buffers which we can un-checkpoint once this transaction commits)
662 * [j_list_lock]
663 @@ -1044,7 +1032,6 @@ extern int jbd2_journal_extend (handle_
664 extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *);
665 extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *);
666 extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *);
667 -extern int jbd2_journal_dirty_data (handle_t *, struct buffer_head *);
668 extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
669 extern void jbd2_journal_release_buffer (handle_t *, struct buffer_head *);
670 extern int jbd2_journal_forget (handle_t *, struct buffer_head *);
671 @@ -1223,15 +1210,13 @@ static inline int jbd_space_needed(journ
673 /* journaling buffer types */
674 #define BJ_None 0 /* Not journaled */
675 -#define BJ_SyncData 1 /* Normal data: flush before commit */
676 -#define BJ_Metadata 2 /* Normal journaled metadata */
677 -#define BJ_Forget 3 /* Buffer superseded by this transaction */
678 -#define BJ_IO 4 /* Buffer is for temporary IO use */
679 -#define BJ_Shadow 5 /* Buffer contents being shadowed to the log */
680 -#define BJ_LogCtl 6 /* Buffer contains log descriptors */
681 -#define BJ_Reserved 7 /* Buffer is reserved for access by journal */
682 -#define BJ_Locked 8 /* Locked for I/O during commit */
683 -#define BJ_Types 9
684 +#define BJ_Metadata 1 /* Normal journaled metadata */
685 +#define BJ_Forget 2 /* Buffer superseded by this transaction */
686 +#define BJ_IO 3 /* Buffer is for temporary IO use */
687 +#define BJ_Shadow 4 /* Buffer contents being shadowed to the log */
688 +#define BJ_LogCtl 5 /* Buffer contains log descriptors */
689 +#define BJ_Reserved 6 /* Buffer is reserved for access by journal */
690 +#define BJ_Types 7
692 extern int jbd_blocks_per_page(struct inode *inode);