2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/lm_interface.h>
30 static void glock_lo_add(struct gfs2_sbd
*sdp
, struct gfs2_log_element
*le
)
32 struct gfs2_glock
*gl
;
33 struct gfs2_trans
*tr
= current
->journal_info
;
37 gl
= container_of(le
, struct gfs2_glock
, gl_le
);
38 if (gfs2_assert_withdraw(sdp
, gfs2_glock_is_held_excl(gl
)))
42 if (!list_empty(&le
->le_list
)){
47 set_bit(GLF_DIRTY
, &gl
->gl_flags
);
49 list_add(&le
->le_list
, &sdp
->sd_log_le_gl
);
53 static void glock_lo_after_commit(struct gfs2_sbd
*sdp
, struct gfs2_ail
*ai
)
55 struct list_head
*head
= &sdp
->sd_log_le_gl
;
56 struct gfs2_glock
*gl
;
58 while (!list_empty(head
)) {
59 gl
= list_entry(head
->next
, struct gfs2_glock
, gl_le
.le_list
);
60 list_del_init(&gl
->gl_le
.le_list
);
63 gfs2_assert_withdraw(sdp
, gfs2_glock_is_held_excl(gl
));
66 gfs2_assert_warn(sdp
, !sdp
->sd_log_num_gl
);
69 static void buf_lo_add(struct gfs2_sbd
*sdp
, struct gfs2_log_element
*le
)
71 struct gfs2_bufdata
*bd
= container_of(le
, struct gfs2_bufdata
, bd_le
);
72 struct gfs2_trans
*tr
;
75 if (!list_empty(&bd
->bd_list_tr
)) {
79 tr
= current
->journal_info
;
82 list_add(&bd
->bd_list_tr
, &tr
->tr_list_buf
);
85 if (!list_empty(&le
->le_list
))
88 gfs2_trans_add_gl(bd
->bd_gl
);
90 gfs2_meta_check(sdp
, bd
->bd_bh
);
91 gfs2_pin(sdp
, bd
->bd_bh
);
93 sdp
->sd_log_num_buf
++;
94 list_add(&le
->le_list
, &sdp
->sd_log_le_buf
);
100 static void buf_lo_incore_commit(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
102 struct list_head
*head
= &tr
->tr_list_buf
;
103 struct gfs2_bufdata
*bd
;
106 while (!list_empty(head
)) {
107 bd
= list_entry(head
->next
, struct gfs2_bufdata
, bd_list_tr
);
108 list_del_init(&bd
->bd_list_tr
);
111 gfs2_log_unlock(sdp
);
112 gfs2_assert_warn(sdp
, !tr
->tr_num_buf
);
115 static void buf_lo_before_commit(struct gfs2_sbd
*sdp
)
117 struct buffer_head
*bh
;
118 struct gfs2_log_descriptor
*ld
;
119 struct gfs2_bufdata
*bd1
= NULL
, *bd2
;
120 unsigned int total
= sdp
->sd_log_num_buf
;
121 unsigned int offset
= BUF_OFFSET
;
127 limit
= buf_limit(sdp
);
128 /* for 4k blocks, limit = 503 */
130 bd1
= bd2
= list_prepare_entry(bd1
, &sdp
->sd_log_le_buf
, bd_le
.le_list
);
135 bh
= gfs2_log_get_buf(sdp
);
136 ld
= (struct gfs2_log_descriptor
*)bh
->b_data
;
137 ptr
= (__be64
*)(bh
->b_data
+ offset
);
138 ld
->ld_header
.mh_magic
= cpu_to_be32(GFS2_MAGIC
);
139 ld
->ld_header
.mh_type
= cpu_to_be32(GFS2_METATYPE_LD
);
140 ld
->ld_header
.mh_format
= cpu_to_be32(GFS2_FORMAT_LD
);
141 ld
->ld_type
= cpu_to_be32(GFS2_LOG_DESC_METADATA
);
142 ld
->ld_length
= cpu_to_be32(num
+ 1);
143 ld
->ld_data1
= cpu_to_be32(num
);
144 ld
->ld_data2
= cpu_to_be32(0);
145 memset(ld
->ld_reserved
, 0, sizeof(ld
->ld_reserved
));
148 list_for_each_entry_continue(bd1
, &sdp
->sd_log_le_buf
,
150 *ptr
++ = cpu_to_be64(bd1
->bd_bh
->b_blocknr
);
155 set_buffer_dirty(bh
);
156 ll_rw_block(WRITE
, 1, &bh
);
159 list_for_each_entry_continue(bd2
, &sdp
->sd_log_le_buf
,
161 bh
= gfs2_log_fake_buf(sdp
, bd2
->bd_bh
);
162 set_buffer_dirty(bh
);
163 ll_rw_block(WRITE
, 1, &bh
);
172 static void buf_lo_after_commit(struct gfs2_sbd
*sdp
, struct gfs2_ail
*ai
)
174 struct list_head
*head
= &sdp
->sd_log_le_buf
;
175 struct gfs2_bufdata
*bd
;
177 while (!list_empty(head
)) {
178 bd
= list_entry(head
->next
, struct gfs2_bufdata
, bd_le
.le_list
);
179 list_del_init(&bd
->bd_le
.le_list
);
180 sdp
->sd_log_num_buf
--;
182 gfs2_unpin(sdp
, bd
->bd_bh
, ai
);
184 gfs2_assert_warn(sdp
, !sdp
->sd_log_num_buf
);
187 static void buf_lo_before_scan(struct gfs2_jdesc
*jd
,
188 struct gfs2_log_header_host
*head
, int pass
)
190 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
195 sdp
->sd_found_blocks
= 0;
196 sdp
->sd_replayed_blocks
= 0;
199 static int buf_lo_scan_elements(struct gfs2_jdesc
*jd
, unsigned int start
,
200 struct gfs2_log_descriptor
*ld
, __be64
*ptr
,
203 struct gfs2_inode
*ip
= GFS2_I(jd
->jd_inode
);
204 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
205 struct gfs2_glock
*gl
= ip
->i_gl
;
206 unsigned int blks
= be32_to_cpu(ld
->ld_data1
);
207 struct buffer_head
*bh_log
, *bh_ip
;
211 if (pass
!= 1 || be32_to_cpu(ld
->ld_type
) != GFS2_LOG_DESC_METADATA
)
214 gfs2_replay_incr_blk(sdp
, &start
);
216 for (; blks
; gfs2_replay_incr_blk(sdp
, &start
), blks
--) {
217 blkno
= be64_to_cpu(*ptr
++);
219 sdp
->sd_found_blocks
++;
221 if (gfs2_revoke_check(sdp
, blkno
, start
))
224 error
= gfs2_replay_read_block(jd
, start
, &bh_log
);
228 bh_ip
= gfs2_meta_new(gl
, blkno
);
229 memcpy(bh_ip
->b_data
, bh_log
->b_data
, bh_log
->b_size
);
231 if (gfs2_meta_check(sdp
, bh_ip
))
234 mark_buffer_dirty(bh_ip
);
242 sdp
->sd_replayed_blocks
++;
248 static void buf_lo_after_scan(struct gfs2_jdesc
*jd
, int error
, int pass
)
250 struct gfs2_inode
*ip
= GFS2_I(jd
->jd_inode
);
251 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
254 gfs2_meta_sync(ip
->i_gl
);
260 gfs2_meta_sync(ip
->i_gl
);
262 fs_info(sdp
, "jid=%u: Replayed %u of %u blocks\n",
263 jd
->jd_jid
, sdp
->sd_replayed_blocks
, sdp
->sd_found_blocks
);
266 static void revoke_lo_add(struct gfs2_sbd
*sdp
, struct gfs2_log_element
*le
)
268 struct gfs2_trans
*tr
;
270 tr
= current
->journal_info
;
275 sdp
->sd_log_num_revoke
++;
276 list_add(&le
->le_list
, &sdp
->sd_log_le_revoke
);
277 gfs2_log_unlock(sdp
);
280 static void revoke_lo_before_commit(struct gfs2_sbd
*sdp
)
282 struct gfs2_log_descriptor
*ld
;
283 struct gfs2_meta_header
*mh
;
284 struct buffer_head
*bh
;
286 struct list_head
*head
= &sdp
->sd_log_le_revoke
;
287 struct gfs2_revoke
*rv
;
289 if (!sdp
->sd_log_num_revoke
)
292 bh
= gfs2_log_get_buf(sdp
);
293 ld
= (struct gfs2_log_descriptor
*)bh
->b_data
;
294 ld
->ld_header
.mh_magic
= cpu_to_be32(GFS2_MAGIC
);
295 ld
->ld_header
.mh_type
= cpu_to_be32(GFS2_METATYPE_LD
);
296 ld
->ld_header
.mh_format
= cpu_to_be32(GFS2_FORMAT_LD
);
297 ld
->ld_type
= cpu_to_be32(GFS2_LOG_DESC_REVOKE
);
298 ld
->ld_length
= cpu_to_be32(gfs2_struct2blk(sdp
, sdp
->sd_log_num_revoke
,
300 ld
->ld_data1
= cpu_to_be32(sdp
->sd_log_num_revoke
);
301 ld
->ld_data2
= cpu_to_be32(0);
302 memset(ld
->ld_reserved
, 0, sizeof(ld
->ld_reserved
));
303 offset
= sizeof(struct gfs2_log_descriptor
);
305 while (!list_empty(head
)) {
306 rv
= list_entry(head
->next
, struct gfs2_revoke
, rv_le
.le_list
);
307 list_del_init(&rv
->rv_le
.le_list
);
308 sdp
->sd_log_num_revoke
--;
310 if (offset
+ sizeof(u64
) > sdp
->sd_sb
.sb_bsize
) {
311 set_buffer_dirty(bh
);
312 ll_rw_block(WRITE
, 1, &bh
);
314 bh
= gfs2_log_get_buf(sdp
);
315 mh
= (struct gfs2_meta_header
*)bh
->b_data
;
316 mh
->mh_magic
= cpu_to_be32(GFS2_MAGIC
);
317 mh
->mh_type
= cpu_to_be32(GFS2_METATYPE_LB
);
318 mh
->mh_format
= cpu_to_be32(GFS2_FORMAT_LB
);
319 offset
= sizeof(struct gfs2_meta_header
);
322 *(__be64
*)(bh
->b_data
+ offset
) = cpu_to_be64(rv
->rv_blkno
);
325 offset
+= sizeof(u64
);
327 gfs2_assert_withdraw(sdp
, !sdp
->sd_log_num_revoke
);
329 set_buffer_dirty(bh
);
330 ll_rw_block(WRITE
, 1, &bh
);
333 static void revoke_lo_before_scan(struct gfs2_jdesc
*jd
,
334 struct gfs2_log_header_host
*head
, int pass
)
336 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
341 sdp
->sd_found_revokes
= 0;
342 sdp
->sd_replay_tail
= head
->lh_tail
;
345 static int revoke_lo_scan_elements(struct gfs2_jdesc
*jd
, unsigned int start
,
346 struct gfs2_log_descriptor
*ld
, __be64
*ptr
,
349 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
350 unsigned int blks
= be32_to_cpu(ld
->ld_length
);
351 unsigned int revokes
= be32_to_cpu(ld
->ld_data1
);
352 struct buffer_head
*bh
;
358 if (pass
!= 0 || be32_to_cpu(ld
->ld_type
) != GFS2_LOG_DESC_REVOKE
)
361 offset
= sizeof(struct gfs2_log_descriptor
);
363 for (; blks
; gfs2_replay_incr_blk(sdp
, &start
), blks
--) {
364 error
= gfs2_replay_read_block(jd
, start
, &bh
);
369 gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_LB
);
371 while (offset
+ sizeof(u64
) <= sdp
->sd_sb
.sb_bsize
) {
372 blkno
= be64_to_cpu(*(__be64
*)(bh
->b_data
+ offset
));
374 error
= gfs2_revoke_add(sdp
, blkno
, start
);
378 sdp
->sd_found_revokes
++;
382 offset
+= sizeof(u64
);
386 offset
= sizeof(struct gfs2_meta_header
);
393 static void revoke_lo_after_scan(struct gfs2_jdesc
*jd
, int error
, int pass
)
395 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
398 gfs2_revoke_clean(sdp
);
404 fs_info(sdp
, "jid=%u: Found %u revoke tags\n",
405 jd
->jd_jid
, sdp
->sd_found_revokes
);
407 gfs2_revoke_clean(sdp
);
410 static void rg_lo_add(struct gfs2_sbd
*sdp
, struct gfs2_log_element
*le
)
412 struct gfs2_rgrpd
*rgd
;
413 struct gfs2_trans
*tr
= current
->journal_info
;
417 rgd
= container_of(le
, struct gfs2_rgrpd
, rd_le
);
420 if (!list_empty(&le
->le_list
)){
421 gfs2_log_unlock(sdp
);
424 gfs2_rgrp_bh_hold(rgd
);
425 sdp
->sd_log_num_rg
++;
426 list_add(&le
->le_list
, &sdp
->sd_log_le_rg
);
427 gfs2_log_unlock(sdp
);
430 static void rg_lo_after_commit(struct gfs2_sbd
*sdp
, struct gfs2_ail
*ai
)
432 struct list_head
*head
= &sdp
->sd_log_le_rg
;
433 struct gfs2_rgrpd
*rgd
;
435 while (!list_empty(head
)) {
436 rgd
= list_entry(head
->next
, struct gfs2_rgrpd
, rd_le
.le_list
);
437 list_del_init(&rgd
->rd_le
.le_list
);
438 sdp
->sd_log_num_rg
--;
440 gfs2_rgrp_repolish_clones(rgd
);
441 gfs2_rgrp_bh_put(rgd
);
443 gfs2_assert_warn(sdp
, !sdp
->sd_log_num_rg
);
447 * databuf_lo_add - Add a databuf to the transaction.
449 * This is used in two distinct cases:
450 * i) In ordered write mode
451 * We put the data buffer on a list so that we can ensure that its
452 * synced to disk at the right time
453 * ii) In journaled data mode
454 * We need to journal the data block in the same way as metadata in
455 * the functions above. The difference is that here we have a tag
456 * which is two __be64's being the block number (as per meta data)
457 * and a flag which says whether the data block needs escaping or
458 * not. This means we need a new log entry for each 251 or so data
459 * blocks, which isn't an enormous overhead but twice as much as
460 * for normal metadata blocks.
462 static void databuf_lo_add(struct gfs2_sbd
*sdp
, struct gfs2_log_element
*le
)
464 struct gfs2_bufdata
*bd
= container_of(le
, struct gfs2_bufdata
, bd_le
);
465 struct gfs2_trans
*tr
= current
->journal_info
;
466 struct address_space
*mapping
= bd
->bd_bh
->b_page
->mapping
;
467 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
470 if (!list_empty(&bd
->bd_list_tr
)) {
471 gfs2_log_unlock(sdp
);
475 if (gfs2_is_jdata(ip
)) {
477 list_add(&bd
->bd_list_tr
, &tr
->tr_list_buf
);
479 gfs2_log_unlock(sdp
);
480 if (!list_empty(&le
->le_list
))
483 gfs2_trans_add_gl(bd
->bd_gl
);
484 if (gfs2_is_jdata(ip
)) {
485 sdp
->sd_log_num_jdata
++;
486 gfs2_pin(sdp
, bd
->bd_bh
);
487 tr
->tr_num_databuf_new
++;
489 sdp
->sd_log_num_databuf
++;
491 list_add(&le
->le_list
, &sdp
->sd_log_le_databuf
);
492 gfs2_log_unlock(sdp
);
495 static int gfs2_check_magic(struct buffer_head
*bh
)
497 struct page
*page
= bh
->b_page
;
502 kaddr
= kmap_atomic(page
, KM_USER0
);
503 ptr
= kaddr
+ bh_offset(bh
);
504 if (*ptr
== cpu_to_be32(GFS2_MAGIC
))
506 kunmap_atomic(kaddr
, KM_USER0
);
512 * databuf_lo_before_commit - Scan the data buffers, writing as we go
514 * Here we scan through the lists of buffers and make the assumption
515 * that any buffer thats been pinned is being journaled, and that
516 * any unpinned buffer is an ordered write data buffer and therefore
517 * will be written back rather than journaled.
519 static void databuf_lo_before_commit(struct gfs2_sbd
*sdp
)
522 struct gfs2_bufdata
*bd1
= NULL
, *bd2
, *bdt
;
523 struct buffer_head
*bh
= NULL
,*bh1
= NULL
;
524 struct gfs2_log_descriptor
*ld
;
526 unsigned int total_dbuf
= sdp
->sd_log_num_databuf
;
527 unsigned int total_jdata
= sdp
->sd_log_num_jdata
;
531 limit
= databuf_limit(sdp
);
534 * Start writing ordered buffers, write journaled buffers
535 * into the log along with a header
538 bd2
= bd1
= list_prepare_entry(bd1
, &sdp
->sd_log_le_databuf
,
545 list_for_each_entry_safe_continue(bd1
, bdt
,
546 &sdp
->sd_log_le_databuf
,
548 /* store off the buffer head in a local ptr since
549 * gfs2_bufdata might change when we drop the log lock
553 /* An ordered write buffer */
554 if (bh1
&& !buffer_pinned(bh1
)) {
555 list_move(&bd1
->bd_le
.le_list
, &started
);
558 bd2
= list_prepare_entry(bd2
,
559 &sdp
->sd_log_le_databuf
,
564 if (buffer_dirty(bh1
)) {
567 gfs2_log_unlock(sdp
);
569 ll_rw_block(SWRITE
, 1, &bh1
);
577 } else if (bh1
) { /* A journaled buffer */
579 gfs2_log_unlock(sdp
);
581 bh
= gfs2_log_get_buf(sdp
);
582 ld
= (struct gfs2_log_descriptor
*)
584 ptr
= (__be64
*)(bh
->b_data
+
586 ld
->ld_header
.mh_magic
=
587 cpu_to_be32(GFS2_MAGIC
);
588 ld
->ld_header
.mh_type
=
589 cpu_to_be32(GFS2_METATYPE_LD
);
590 ld
->ld_header
.mh_format
=
591 cpu_to_be32(GFS2_FORMAT_LD
);
593 cpu_to_be32(GFS2_LOG_DESC_JDATA
);
594 ld
->ld_length
= cpu_to_be32(num
+ 1);
595 ld
->ld_data1
= cpu_to_be32(num
);
596 ld
->ld_data2
= cpu_to_be32(0);
597 memset(ld
->ld_reserved
, 0, sizeof(ld
->ld_reserved
));
599 magic
= gfs2_check_magic(bh1
);
600 *ptr
++ = cpu_to_be64(bh1
->b_blocknr
);
601 *ptr
++ = cpu_to_be64((__u64
)magic
);
602 clear_buffer_escaped(bh1
);
603 if (unlikely(magic
!= 0))
604 set_buffer_escaped(bh1
);
610 sdp
->sd_log_num_databuf
--;
611 list_del_init(&bd1
->bd_le
.le_list
);
614 bd2
= list_prepare_entry(bd2
,
615 &sdp
->sd_log_le_databuf
,
618 kmem_cache_free(gfs2_bufdata_cachep
, bd1
);
621 gfs2_log_unlock(sdp
);
623 set_buffer_mapped(bh
);
624 set_buffer_dirty(bh
);
625 ll_rw_block(WRITE
, 1, &bh
);
630 list_for_each_entry_continue(bd2
, &sdp
->sd_log_le_databuf
,
634 /* copy buffer if it needs escaping */
635 gfs2_log_unlock(sdp
);
636 if (unlikely(buffer_escaped(bd2
->bd_bh
))) {
638 struct page
*page
= bd2
->bd_bh
->b_page
;
639 bh
= gfs2_log_get_buf(sdp
);
640 kaddr
= kmap_atomic(page
, KM_USER0
);
642 kaddr
+ bh_offset(bd2
->bd_bh
),
643 sdp
->sd_sb
.sb_bsize
);
644 kunmap_atomic(kaddr
, KM_USER0
);
645 *(__be32
*)bh
->b_data
= 0;
647 bh
= gfs2_log_fake_buf(sdp
, bd2
->bd_bh
);
649 set_buffer_dirty(bh
);
650 ll_rw_block(WRITE
, 1, &bh
);
659 gfs2_log_unlock(sdp
);
661 /* Wait on all ordered buffers */
662 while (!list_empty(&started
)) {
664 bd1
= list_entry(started
.next
, struct gfs2_bufdata
,
666 list_del_init(&bd1
->bd_le
.le_list
);
667 sdp
->sd_log_num_databuf
--;
670 bh
->b_private
= NULL
;
672 gfs2_log_unlock(sdp
);
676 gfs2_log_unlock(sdp
);
678 kmem_cache_free(gfs2_bufdata_cachep
, bd1
);
681 /* We've removed all the ordered write bufs here, so only jdata left */
682 gfs2_assert_warn(sdp
, sdp
->sd_log_num_databuf
== sdp
->sd_log_num_jdata
);
685 static int databuf_lo_scan_elements(struct gfs2_jdesc
*jd
, unsigned int start
,
686 struct gfs2_log_descriptor
*ld
,
687 __be64
*ptr
, int pass
)
689 struct gfs2_inode
*ip
= GFS2_I(jd
->jd_inode
);
690 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
691 struct gfs2_glock
*gl
= ip
->i_gl
;
692 unsigned int blks
= be32_to_cpu(ld
->ld_data1
);
693 struct buffer_head
*bh_log
, *bh_ip
;
698 if (pass
!= 1 || be32_to_cpu(ld
->ld_type
) != GFS2_LOG_DESC_JDATA
)
701 gfs2_replay_incr_blk(sdp
, &start
);
702 for (; blks
; gfs2_replay_incr_blk(sdp
, &start
), blks
--) {
703 blkno
= be64_to_cpu(*ptr
++);
704 esc
= be64_to_cpu(*ptr
++);
706 sdp
->sd_found_blocks
++;
708 if (gfs2_revoke_check(sdp
, blkno
, start
))
711 error
= gfs2_replay_read_block(jd
, start
, &bh_log
);
715 bh_ip
= gfs2_meta_new(gl
, blkno
);
716 memcpy(bh_ip
->b_data
, bh_log
->b_data
, bh_log
->b_size
);
720 __be32
*eptr
= (__be32
*)bh_ip
->b_data
;
721 *eptr
= cpu_to_be32(GFS2_MAGIC
);
723 mark_buffer_dirty(bh_ip
);
730 sdp
->sd_replayed_blocks
++;
736 /* FIXME: sort out accounting for log blocks etc. */
738 static void databuf_lo_after_scan(struct gfs2_jdesc
*jd
, int error
, int pass
)
740 struct gfs2_inode
*ip
= GFS2_I(jd
->jd_inode
);
741 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
744 gfs2_meta_sync(ip
->i_gl
);
751 gfs2_meta_sync(ip
->i_gl
);
753 fs_info(sdp
, "jid=%u: Replayed %u of %u data blocks\n",
754 jd
->jd_jid
, sdp
->sd_replayed_blocks
, sdp
->sd_found_blocks
);
757 static void databuf_lo_after_commit(struct gfs2_sbd
*sdp
, struct gfs2_ail
*ai
)
759 struct list_head
*head
= &sdp
->sd_log_le_databuf
;
760 struct gfs2_bufdata
*bd
;
762 while (!list_empty(head
)) {
763 bd
= list_entry(head
->next
, struct gfs2_bufdata
, bd_le
.le_list
);
764 list_del_init(&bd
->bd_le
.le_list
);
765 sdp
->sd_log_num_databuf
--;
766 sdp
->sd_log_num_jdata
--;
767 gfs2_unpin(sdp
, bd
->bd_bh
, ai
);
769 gfs2_assert_warn(sdp
, !sdp
->sd_log_num_databuf
);
770 gfs2_assert_warn(sdp
, !sdp
->sd_log_num_jdata
);
774 const struct gfs2_log_operations gfs2_glock_lops
= {
775 .lo_add
= glock_lo_add
,
776 .lo_after_commit
= glock_lo_after_commit
,
780 const struct gfs2_log_operations gfs2_buf_lops
= {
781 .lo_add
= buf_lo_add
,
782 .lo_incore_commit
= buf_lo_incore_commit
,
783 .lo_before_commit
= buf_lo_before_commit
,
784 .lo_after_commit
= buf_lo_after_commit
,
785 .lo_before_scan
= buf_lo_before_scan
,
786 .lo_scan_elements
= buf_lo_scan_elements
,
787 .lo_after_scan
= buf_lo_after_scan
,
791 const struct gfs2_log_operations gfs2_revoke_lops
= {
792 .lo_add
= revoke_lo_add
,
793 .lo_before_commit
= revoke_lo_before_commit
,
794 .lo_before_scan
= revoke_lo_before_scan
,
795 .lo_scan_elements
= revoke_lo_scan_elements
,
796 .lo_after_scan
= revoke_lo_after_scan
,
800 const struct gfs2_log_operations gfs2_rg_lops
= {
802 .lo_after_commit
= rg_lo_after_commit
,
806 const struct gfs2_log_operations gfs2_databuf_lops
= {
807 .lo_add
= databuf_lo_add
,
808 .lo_incore_commit
= buf_lo_incore_commit
,
809 .lo_before_commit
= databuf_lo_before_commit
,
810 .lo_after_commit
= databuf_lo_after_commit
,
811 .lo_scan_elements
= databuf_lo_scan_elements
,
812 .lo_after_scan
= databuf_lo_after_scan
,
813 .lo_name
= "databuf",
816 const struct gfs2_log_operations
*gfs2_log_ops
[] = {