2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/crc32.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <linux/bio.h>
18 #include <linux/lm_interface.h>
36 static const u32 gfs2_old_fs_formats
[] = {
40 static const u32 gfs2_old_multihost_formats
[] = {
45 * gfs2_tune_init - Fill a gfs2_tune structure with default values
50 void gfs2_tune_init(struct gfs2_tune
*gt
)
52 spin_lock_init(>
->gt_spin
);
55 gt
->gt_ilimit_tries
= 3;
56 gt
->gt_ilimit_min
= 1;
57 gt
->gt_demote_secs
= 300;
58 gt
->gt_incore_log_blocks
= 1024;
59 gt
->gt_log_flush_secs
= 60;
60 gt
->gt_jindex_refresh_secs
= 60;
61 gt
->gt_scand_secs
= 15;
62 gt
->gt_recoverd_secs
= 60;
64 gt
->gt_quotad_secs
= 5;
65 gt
->gt_quota_simul_sync
= 64;
66 gt
->gt_quota_warn_period
= 10;
67 gt
->gt_quota_scale_num
= 1;
68 gt
->gt_quota_scale_den
= 1;
69 gt
->gt_quota_cache_secs
= 300;
70 gt
->gt_quota_quantum
= 60;
71 gt
->gt_atime_quantum
= 3600;
72 gt
->gt_new_files_jdata
= 0;
73 gt
->gt_new_files_directio
= 0;
74 gt
->gt_max_readahead
= 1 << 18;
75 gt
->gt_lockdump_size
= 131072;
76 gt
->gt_stall_secs
= 600;
77 gt
->gt_complain_secs
= 10;
78 gt
->gt_reclaim_limit
= 5000;
79 gt
->gt_statfs_quantum
= 30;
80 gt
->gt_statfs_slow
= 0;
84 * gfs2_check_sb - Check superblock
85 * @sdp: the filesystem
87 * @silent: Don't print a message if the check fails
89 * Checks the version code of the FS is one that we understand how to
90 * read and that the sizes of the various on-disk structures have not
94 int gfs2_check_sb(struct gfs2_sbd
*sdp
, struct gfs2_sb_host
*sb
, int silent
)
98 if (sb
->sb_magic
!= GFS2_MAGIC
||
99 sb
->sb_type
!= GFS2_METATYPE_SB
) {
101 printk(KERN_WARNING
"GFS2: not a GFS2 filesystem\n");
105 /* If format numbers match exactly, we're done. */
107 if (sb
->sb_fs_format
== GFS2_FORMAT_FS
&&
108 sb
->sb_multihost_format
== GFS2_FORMAT_MULTI
)
111 if (sb
->sb_fs_format
!= GFS2_FORMAT_FS
) {
112 for (x
= 0; gfs2_old_fs_formats
[x
]; x
++)
113 if (gfs2_old_fs_formats
[x
] == sb
->sb_fs_format
)
116 if (!gfs2_old_fs_formats
[x
]) {
118 "GFS2: code version (%u, %u) is incompatible "
119 "with ondisk format (%u, %u)\n",
120 GFS2_FORMAT_FS
, GFS2_FORMAT_MULTI
,
121 sb
->sb_fs_format
, sb
->sb_multihost_format
);
123 "GFS2: I don't know how to upgrade this FS\n");
128 if (sb
->sb_multihost_format
!= GFS2_FORMAT_MULTI
) {
129 for (x
= 0; gfs2_old_multihost_formats
[x
]; x
++)
130 if (gfs2_old_multihost_formats
[x
] ==
131 sb
->sb_multihost_format
)
134 if (!gfs2_old_multihost_formats
[x
]) {
136 "GFS2: code version (%u, %u) is incompatible "
137 "with ondisk format (%u, %u)\n",
138 GFS2_FORMAT_FS
, GFS2_FORMAT_MULTI
,
139 sb
->sb_fs_format
, sb
->sb_multihost_format
);
141 "GFS2: I don't know how to upgrade this FS\n");
146 if (!sdp
->sd_args
.ar_upgrade
) {
148 "GFS2: code version (%u, %u) is incompatible "
149 "with ondisk format (%u, %u)\n",
150 GFS2_FORMAT_FS
, GFS2_FORMAT_MULTI
,
151 sb
->sb_fs_format
, sb
->sb_multihost_format
);
153 "GFS2: Use the \"upgrade\" mount option to upgrade "
155 printk(KERN_INFO
"GFS2: See the manual for more details\n");
163 static int end_bio_io_page(struct bio
*bio
, unsigned int bytes_done
, int error
)
165 struct page
*page
= bio
->bi_private
;
170 SetPageUptodate(page
);
172 printk(KERN_WARNING
"gfs2: error %d reading superblock\n", error
);
177 static void gfs2_sb_in(struct gfs2_sb_host
*sb
, const void *buf
)
179 const struct gfs2_sb
*str
= buf
;
181 sb
->sb_magic
= be32_to_cpu(str
->sb_header
.mh_magic
);
182 sb
->sb_type
= be32_to_cpu(str
->sb_header
.mh_type
);
183 sb
->sb_format
= be32_to_cpu(str
->sb_header
.mh_format
);
184 sb
->sb_fs_format
= be32_to_cpu(str
->sb_fs_format
);
185 sb
->sb_multihost_format
= be32_to_cpu(str
->sb_multihost_format
);
186 sb
->sb_bsize
= be32_to_cpu(str
->sb_bsize
);
187 sb
->sb_bsize_shift
= be32_to_cpu(str
->sb_bsize_shift
);
188 sb
->sb_master_dir
.no_addr
= be64_to_cpu(str
->sb_master_dir
.no_addr
);
189 sb
->sb_master_dir
.no_formal_ino
= be64_to_cpu(str
->sb_master_dir
.no_formal_ino
);
190 sb
->sb_root_dir
.no_addr
= be64_to_cpu(str
->sb_root_dir
.no_addr
);
191 sb
->sb_root_dir
.no_formal_ino
= be64_to_cpu(str
->sb_root_dir
.no_formal_ino
);
193 memcpy(sb
->sb_lockproto
, str
->sb_lockproto
, GFS2_LOCKNAME_LEN
);
194 memcpy(sb
->sb_locktable
, str
->sb_locktable
, GFS2_LOCKNAME_LEN
);
198 * gfs2_read_super - Read the gfs2 super block from disk
199 * @sdp: The GFS2 super block
200 * @sector: The location of the super block
201 * @error: The error code to return
203 * This uses the bio functions to read the super block from disk
204 * because we want to be 100% sure that we never read cached data.
205 * A super block is read twice only during each GFS2 mount and is
206 * never written to by the filesystem. The first time its read no
207 * locks are held, and the only details which are looked at are those
208 * relating to the locking protocol. Once locking is up and working,
209 * the sb is read again under the lock to establish the location of
210 * the master directory (contains pointers to journals etc) and the
213 * Returns: 0 on success or error
216 int gfs2_read_super(struct gfs2_sbd
*sdp
, sector_t sector
)
218 struct super_block
*sb
= sdp
->sd_vfs
;
223 page
= alloc_page(GFP_KERNEL
);
227 ClearPageUptodate(page
);
228 ClearPageDirty(page
);
231 bio
= bio_alloc(GFP_KERNEL
, 1);
232 if (unlikely(!bio
)) {
237 bio
->bi_sector
= sector
* (sb
->s_blocksize
>> 9);
238 bio
->bi_bdev
= sb
->s_bdev
;
239 bio_add_page(bio
, page
, PAGE_SIZE
, 0);
241 bio
->bi_end_io
= end_bio_io_page
;
242 bio
->bi_private
= page
;
243 submit_bio(READ_SYNC
| (1 << BIO_RW_META
), bio
);
244 wait_on_page_locked(page
);
246 if (!PageUptodate(page
)) {
251 gfs2_sb_in(&sdp
->sd_sb
, p
);
258 * gfs2_read_sb - Read super block
259 * @sdp: The GFS2 superblock
260 * @gl: the glock for the superblock (assumed to be held)
261 * @silent: Don't print message if mount fails
265 int gfs2_read_sb(struct gfs2_sbd
*sdp
, struct gfs2_glock
*gl
, int silent
)
267 u32 hash_blocks
, ind_blocks
, leaf_blocks
;
272 error
= gfs2_read_super(sdp
, GFS2_SB_ADDR
>> sdp
->sd_fsb2bb_shift
);
275 fs_err(sdp
, "can't read superblock\n");
279 error
= gfs2_check_sb(sdp
, &sdp
->sd_sb
, silent
);
283 sdp
->sd_fsb2bb_shift
= sdp
->sd_sb
.sb_bsize_shift
-
284 GFS2_BASIC_BLOCK_SHIFT
;
285 sdp
->sd_fsb2bb
= 1 << sdp
->sd_fsb2bb_shift
;
286 sdp
->sd_diptrs
= (sdp
->sd_sb
.sb_bsize
-
287 sizeof(struct gfs2_dinode
)) / sizeof(u64
);
288 sdp
->sd_inptrs
= (sdp
->sd_sb
.sb_bsize
-
289 sizeof(struct gfs2_meta_header
)) / sizeof(u64
);
290 sdp
->sd_jbsize
= sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_meta_header
);
291 sdp
->sd_hash_bsize
= sdp
->sd_sb
.sb_bsize
/ 2;
292 sdp
->sd_hash_bsize_shift
= sdp
->sd_sb
.sb_bsize_shift
- 1;
293 sdp
->sd_hash_ptrs
= sdp
->sd_hash_bsize
/ sizeof(u64
);
294 sdp
->sd_qc_per_block
= (sdp
->sd_sb
.sb_bsize
-
295 sizeof(struct gfs2_meta_header
)) /
296 sizeof(struct gfs2_quota_change
);
298 /* Compute maximum reservation required to add a entry to a directory */
300 hash_blocks
= DIV_ROUND_UP(sizeof(u64
) * (1 << GFS2_DIR_MAX_DEPTH
),
304 for (tmp_blocks
= hash_blocks
; tmp_blocks
> sdp
->sd_diptrs
;) {
305 tmp_blocks
= DIV_ROUND_UP(tmp_blocks
, sdp
->sd_inptrs
);
306 ind_blocks
+= tmp_blocks
;
309 leaf_blocks
= 2 + GFS2_DIR_MAX_DEPTH
;
311 sdp
->sd_max_dirres
= hash_blocks
+ ind_blocks
+ leaf_blocks
;
313 sdp
->sd_heightsize
[0] = sdp
->sd_sb
.sb_bsize
-
314 sizeof(struct gfs2_dinode
);
315 sdp
->sd_heightsize
[1] = sdp
->sd_sb
.sb_bsize
* sdp
->sd_diptrs
;
320 space
= sdp
->sd_heightsize
[x
- 1] * sdp
->sd_inptrs
;
322 m
= do_div(d
, sdp
->sd_inptrs
);
324 if (d
!= sdp
->sd_heightsize
[x
- 1] || m
)
326 sdp
->sd_heightsize
[x
] = space
;
328 sdp
->sd_max_height
= x
;
329 gfs2_assert(sdp
, sdp
->sd_max_height
<= GFS2_MAX_META_HEIGHT
);
331 sdp
->sd_jheightsize
[0] = sdp
->sd_sb
.sb_bsize
-
332 sizeof(struct gfs2_dinode
);
333 sdp
->sd_jheightsize
[1] = sdp
->sd_jbsize
* sdp
->sd_diptrs
;
338 space
= sdp
->sd_jheightsize
[x
- 1] * sdp
->sd_inptrs
;
340 m
= do_div(d
, sdp
->sd_inptrs
);
342 if (d
!= sdp
->sd_jheightsize
[x
- 1] || m
)
344 sdp
->sd_jheightsize
[x
] = space
;
346 sdp
->sd_max_jheight
= x
;
347 gfs2_assert(sdp
, sdp
->sd_max_jheight
<= GFS2_MAX_META_HEIGHT
);
353 * gfs2_jindex_hold - Grab a lock on the jindex
354 * @sdp: The GFS2 superblock
355 * @ji_gh: the holder for the jindex glock
357 * This is very similar to the gfs2_rindex_hold() function, except that
358 * in general we hold the jindex lock for longer periods of time and
359 * we grab it far less frequently (in general) then the rgrp lock.
364 int gfs2_jindex_hold(struct gfs2_sbd
*sdp
, struct gfs2_holder
*ji_gh
)
366 struct gfs2_inode
*dip
= GFS2_I(sdp
->sd_jindex
);
369 struct gfs2_jdesc
*jd
;
374 mutex_lock(&sdp
->sd_jindex_mutex
);
377 error
= gfs2_glock_nq_init(dip
->i_gl
, LM_ST_SHARED
, 0, ji_gh
);
381 name
.len
= sprintf(buf
, "journal%u", sdp
->sd_journals
);
382 name
.hash
= gfs2_disk_hash(name
.name
, name
.len
);
384 error
= gfs2_dir_check(sdp
->sd_jindex
, &name
, NULL
);
385 if (error
== -ENOENT
) {
390 gfs2_glock_dq_uninit(ji_gh
);
396 jd
= kzalloc(sizeof(struct gfs2_jdesc
), GFP_KERNEL
);
400 jd
->jd_inode
= gfs2_lookupi(sdp
->sd_jindex
, &name
, 1, NULL
);
401 if (!jd
->jd_inode
|| IS_ERR(jd
->jd_inode
)) {
405 error
= PTR_ERR(jd
->jd_inode
);
410 spin_lock(&sdp
->sd_jindex_spin
);
411 jd
->jd_jid
= sdp
->sd_journals
++;
412 list_add_tail(&jd
->jd_list
, &sdp
->sd_jindex_list
);
413 spin_unlock(&sdp
->sd_jindex_spin
);
416 mutex_unlock(&sdp
->sd_jindex_mutex
);
422 * gfs2_jindex_free - Clear all the journal index information
423 * @sdp: The GFS2 superblock
427 void gfs2_jindex_free(struct gfs2_sbd
*sdp
)
429 struct list_head list
;
430 struct gfs2_jdesc
*jd
;
432 spin_lock(&sdp
->sd_jindex_spin
);
433 list_add(&list
, &sdp
->sd_jindex_list
);
434 list_del_init(&sdp
->sd_jindex_list
);
435 sdp
->sd_journals
= 0;
436 spin_unlock(&sdp
->sd_jindex_spin
);
438 while (!list_empty(&list
)) {
439 jd
= list_entry(list
.next
, struct gfs2_jdesc
, jd_list
);
440 list_del(&jd
->jd_list
);
446 static struct gfs2_jdesc
*jdesc_find_i(struct list_head
*head
, unsigned int jid
)
448 struct gfs2_jdesc
*jd
;
451 list_for_each_entry(jd
, head
, jd_list
) {
452 if (jd
->jd_jid
== jid
) {
464 struct gfs2_jdesc
*gfs2_jdesc_find(struct gfs2_sbd
*sdp
, unsigned int jid
)
466 struct gfs2_jdesc
*jd
;
468 spin_lock(&sdp
->sd_jindex_spin
);
469 jd
= jdesc_find_i(&sdp
->sd_jindex_list
, jid
);
470 spin_unlock(&sdp
->sd_jindex_spin
);
475 void gfs2_jdesc_make_dirty(struct gfs2_sbd
*sdp
, unsigned int jid
)
477 struct gfs2_jdesc
*jd
;
479 spin_lock(&sdp
->sd_jindex_spin
);
480 jd
= jdesc_find_i(&sdp
->sd_jindex_list
, jid
);
483 spin_unlock(&sdp
->sd_jindex_spin
);
486 struct gfs2_jdesc
*gfs2_jdesc_find_dirty(struct gfs2_sbd
*sdp
)
488 struct gfs2_jdesc
*jd
;
491 spin_lock(&sdp
->sd_jindex_spin
);
493 list_for_each_entry(jd
, &sdp
->sd_jindex_list
, jd_list
) {
500 spin_unlock(&sdp
->sd_jindex_spin
);
508 int gfs2_jdesc_check(struct gfs2_jdesc
*jd
)
510 struct gfs2_inode
*ip
= GFS2_I(jd
->jd_inode
);
511 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
515 if (ip
->i_di
.di_size
< (8 << 20) || ip
->i_di
.di_size
> (1 << 30) ||
516 (ip
->i_di
.di_size
& (sdp
->sd_sb
.sb_bsize
- 1))) {
517 gfs2_consist_inode(ip
);
520 jd
->jd_blocks
= ip
->i_di
.di_size
>> sdp
->sd_sb
.sb_bsize_shift
;
522 error
= gfs2_write_alloc_required(ip
, 0, ip
->i_di
.di_size
, &ar
);
524 gfs2_consist_inode(ip
);
532 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
533 * @sdp: the filesystem
538 int gfs2_make_fs_rw(struct gfs2_sbd
*sdp
)
540 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_jdesc
->jd_inode
);
541 struct gfs2_glock
*j_gl
= ip
->i_gl
;
542 struct gfs2_holder t_gh
;
543 struct gfs2_log_header_host head
;
546 error
= gfs2_glock_nq_init(sdp
->sd_trans_gl
, LM_ST_SHARED
, 0, &t_gh
);
550 gfs2_meta_cache_flush(ip
);
551 j_gl
->gl_ops
->go_inval(j_gl
, DIO_METADATA
);
553 error
= gfs2_find_jhead(sdp
->sd_jdesc
, &head
);
557 if (!(head
.lh_flags
& GFS2_LOG_HEAD_UNMOUNT
)) {
563 /* Initialize some head of the log stuff */
564 sdp
->sd_log_sequence
= head
.lh_sequence
+ 1;
565 gfs2_log_pointers_init(sdp
, head
.lh_blkno
);
567 error
= gfs2_quota_init(sdp
);
571 set_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
);
573 gfs2_glock_dq_uninit(&t_gh
);
578 t_gh
.gh_flags
|= GL_NOCACHE
;
579 gfs2_glock_dq_uninit(&t_gh
);
585 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
586 * @sdp: the filesystem
591 int gfs2_make_fs_ro(struct gfs2_sbd
*sdp
)
593 struct gfs2_holder t_gh
;
596 gfs2_quota_sync(sdp
);
597 gfs2_statfs_sync(sdp
);
599 error
= gfs2_glock_nq_init(sdp
->sd_trans_gl
, LM_ST_SHARED
, GL_NOCACHE
,
601 if (error
&& !test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))
604 gfs2_meta_syncfs(sdp
);
605 gfs2_log_shutdown(sdp
);
607 clear_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
);
610 gfs2_glock_dq_uninit(&t_gh
);
612 gfs2_quota_cleanup(sdp
);
617 static void gfs2_statfs_change_in(struct gfs2_statfs_change_host
*sc
, const void *buf
)
619 const struct gfs2_statfs_change
*str
= buf
;
621 sc
->sc_total
= be64_to_cpu(str
->sc_total
);
622 sc
->sc_free
= be64_to_cpu(str
->sc_free
);
623 sc
->sc_dinodes
= be64_to_cpu(str
->sc_dinodes
);
626 static void gfs2_statfs_change_out(const struct gfs2_statfs_change_host
*sc
, void *buf
)
628 struct gfs2_statfs_change
*str
= buf
;
630 str
->sc_total
= cpu_to_be64(sc
->sc_total
);
631 str
->sc_free
= cpu_to_be64(sc
->sc_free
);
632 str
->sc_dinodes
= cpu_to_be64(sc
->sc_dinodes
);
635 int gfs2_statfs_init(struct gfs2_sbd
*sdp
)
637 struct gfs2_inode
*m_ip
= GFS2_I(sdp
->sd_statfs_inode
);
638 struct gfs2_statfs_change_host
*m_sc
= &sdp
->sd_statfs_master
;
639 struct gfs2_inode
*l_ip
= GFS2_I(sdp
->sd_sc_inode
);
640 struct gfs2_statfs_change_host
*l_sc
= &sdp
->sd_statfs_local
;
641 struct buffer_head
*m_bh
, *l_bh
;
642 struct gfs2_holder gh
;
645 error
= gfs2_glock_nq_init(m_ip
->i_gl
, LM_ST_EXCLUSIVE
, GL_NOCACHE
,
650 error
= gfs2_meta_inode_buffer(m_ip
, &m_bh
);
654 if (sdp
->sd_args
.ar_spectator
) {
655 spin_lock(&sdp
->sd_statfs_spin
);
656 gfs2_statfs_change_in(m_sc
, m_bh
->b_data
+
657 sizeof(struct gfs2_dinode
));
658 spin_unlock(&sdp
->sd_statfs_spin
);
660 error
= gfs2_meta_inode_buffer(l_ip
, &l_bh
);
664 spin_lock(&sdp
->sd_statfs_spin
);
665 gfs2_statfs_change_in(m_sc
, m_bh
->b_data
+
666 sizeof(struct gfs2_dinode
));
667 gfs2_statfs_change_in(l_sc
, l_bh
->b_data
+
668 sizeof(struct gfs2_dinode
));
669 spin_unlock(&sdp
->sd_statfs_spin
);
677 gfs2_glock_dq_uninit(&gh
);
681 void gfs2_statfs_change(struct gfs2_sbd
*sdp
, s64 total
, s64 free
,
684 struct gfs2_inode
*l_ip
= GFS2_I(sdp
->sd_sc_inode
);
685 struct gfs2_statfs_change_host
*l_sc
= &sdp
->sd_statfs_local
;
686 struct buffer_head
*l_bh
;
689 error
= gfs2_meta_inode_buffer(l_ip
, &l_bh
);
693 mutex_lock(&sdp
->sd_statfs_mutex
);
694 gfs2_trans_add_bh(l_ip
->i_gl
, l_bh
, 1);
695 mutex_unlock(&sdp
->sd_statfs_mutex
);
697 spin_lock(&sdp
->sd_statfs_spin
);
698 l_sc
->sc_total
+= total
;
699 l_sc
->sc_free
+= free
;
700 l_sc
->sc_dinodes
+= dinodes
;
701 gfs2_statfs_change_out(l_sc
, l_bh
->b_data
+ sizeof(struct gfs2_dinode
));
702 spin_unlock(&sdp
->sd_statfs_spin
);
707 int gfs2_statfs_sync(struct gfs2_sbd
*sdp
)
709 struct gfs2_inode
*m_ip
= GFS2_I(sdp
->sd_statfs_inode
);
710 struct gfs2_inode
*l_ip
= GFS2_I(sdp
->sd_sc_inode
);
711 struct gfs2_statfs_change_host
*m_sc
= &sdp
->sd_statfs_master
;
712 struct gfs2_statfs_change_host
*l_sc
= &sdp
->sd_statfs_local
;
713 struct gfs2_holder gh
;
714 struct buffer_head
*m_bh
, *l_bh
;
717 error
= gfs2_glock_nq_init(m_ip
->i_gl
, LM_ST_EXCLUSIVE
, GL_NOCACHE
,
722 error
= gfs2_meta_inode_buffer(m_ip
, &m_bh
);
726 spin_lock(&sdp
->sd_statfs_spin
);
727 gfs2_statfs_change_in(m_sc
, m_bh
->b_data
+
728 sizeof(struct gfs2_dinode
));
729 if (!l_sc
->sc_total
&& !l_sc
->sc_free
&& !l_sc
->sc_dinodes
) {
730 spin_unlock(&sdp
->sd_statfs_spin
);
733 spin_unlock(&sdp
->sd_statfs_spin
);
735 error
= gfs2_meta_inode_buffer(l_ip
, &l_bh
);
739 error
= gfs2_trans_begin(sdp
, 2 * RES_DINODE
, 0);
743 mutex_lock(&sdp
->sd_statfs_mutex
);
744 gfs2_trans_add_bh(l_ip
->i_gl
, l_bh
, 1);
745 mutex_unlock(&sdp
->sd_statfs_mutex
);
747 spin_lock(&sdp
->sd_statfs_spin
);
748 m_sc
->sc_total
+= l_sc
->sc_total
;
749 m_sc
->sc_free
+= l_sc
->sc_free
;
750 m_sc
->sc_dinodes
+= l_sc
->sc_dinodes
;
751 memset(l_sc
, 0, sizeof(struct gfs2_statfs_change
));
752 memset(l_bh
->b_data
+ sizeof(struct gfs2_dinode
),
753 0, sizeof(struct gfs2_statfs_change
));
754 spin_unlock(&sdp
->sd_statfs_spin
);
756 gfs2_trans_add_bh(m_ip
->i_gl
, m_bh
, 1);
757 gfs2_statfs_change_out(m_sc
, m_bh
->b_data
+ sizeof(struct gfs2_dinode
));
766 gfs2_glock_dq_uninit(&gh
);
771 * gfs2_statfs_i - Do a statfs
772 * @sdp: the filesystem
773 * @sg: the sg structure
778 int gfs2_statfs_i(struct gfs2_sbd
*sdp
, struct gfs2_statfs_change_host
*sc
)
780 struct gfs2_statfs_change_host
*m_sc
= &sdp
->sd_statfs_master
;
781 struct gfs2_statfs_change_host
*l_sc
= &sdp
->sd_statfs_local
;
783 spin_lock(&sdp
->sd_statfs_spin
);
786 sc
->sc_total
+= l_sc
->sc_total
;
787 sc
->sc_free
+= l_sc
->sc_free
;
788 sc
->sc_dinodes
+= l_sc
->sc_dinodes
;
790 spin_unlock(&sdp
->sd_statfs_spin
);
794 if (sc
->sc_free
> sc
->sc_total
)
795 sc
->sc_free
= sc
->sc_total
;
796 if (sc
->sc_dinodes
< 0)
803 * statfs_fill - fill in the sg for a given RG
805 * @sc: the sc structure
807 * Returns: 0 on success, -ESTALE if the LVB is invalid
810 static int statfs_slow_fill(struct gfs2_rgrpd
*rgd
,
811 struct gfs2_statfs_change_host
*sc
)
813 gfs2_rgrp_verify(rgd
);
814 sc
->sc_total
+= rgd
->rd_data
;
815 sc
->sc_free
+= rgd
->rd_rg
.rg_free
;
816 sc
->sc_dinodes
+= rgd
->rd_rg
.rg_dinodes
;
821 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
822 * @sdp: the filesystem
823 * @sc: the sc info that will be returned
825 * Any error (other than a signal) will cause this routine to fall back
826 * to the synchronous version.
828 * FIXME: This really shouldn't busy wait like this.
833 int gfs2_statfs_slow(struct gfs2_sbd
*sdp
, struct gfs2_statfs_change_host
*sc
)
835 struct gfs2_holder ri_gh
;
836 struct gfs2_rgrpd
*rgd_next
;
837 struct gfs2_holder
*gha
, *gh
;
838 unsigned int slots
= 64;
843 memset(sc
, 0, sizeof(struct gfs2_statfs_change_host
));
844 gha
= kcalloc(slots
, sizeof(struct gfs2_holder
), GFP_KERNEL
);
848 error
= gfs2_rindex_hold(sdp
, &ri_gh
);
852 rgd_next
= gfs2_rgrpd_get_first(sdp
);
857 for (x
= 0; x
< slots
; x
++) {
860 if (gh
->gh_gl
&& gfs2_glock_poll(gh
)) {
861 err
= gfs2_glock_wait(gh
);
863 gfs2_holder_uninit(gh
);
867 error
= statfs_slow_fill(
868 gh
->gh_gl
->gl_object
, sc
);
869 gfs2_glock_dq_uninit(gh
);
875 else if (rgd_next
&& !error
) {
876 error
= gfs2_glock_nq_init(rgd_next
->rd_gl
,
880 rgd_next
= gfs2_rgrpd_get_next(rgd_next
);
884 if (signal_pending(current
))
885 error
= -ERESTARTSYS
;
894 gfs2_glock_dq_uninit(&ri_gh
);
902 struct list_head list
;
903 struct gfs2_holder gh
;
907 * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
909 * @sdp: the file system
910 * @state: the state to put the transaction lock into
911 * @t_gh: the hold on the transaction lock
916 static int gfs2_lock_fs_check_clean(struct gfs2_sbd
*sdp
,
917 struct gfs2_holder
*t_gh
)
919 struct gfs2_inode
*ip
;
920 struct gfs2_holder ji_gh
;
921 struct gfs2_jdesc
*jd
;
924 struct gfs2_log_header_host lh
;
927 error
= gfs2_jindex_hold(sdp
, &ji_gh
);
931 list_for_each_entry(jd
, &sdp
->sd_jindex_list
, jd_list
) {
932 lfcc
= kmalloc(sizeof(struct lfcc
), GFP_KERNEL
);
937 ip
= GFS2_I(jd
->jd_inode
);
938 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, 0, &lfcc
->gh
);
943 list_add(&lfcc
->list
, &list
);
946 error
= gfs2_glock_nq_init(sdp
->sd_trans_gl
, LM_ST_DEFERRED
,
947 LM_FLAG_PRIORITY
| GL_NOCACHE
,
950 list_for_each_entry(jd
, &sdp
->sd_jindex_list
, jd_list
) {
951 error
= gfs2_jdesc_check(jd
);
954 error
= gfs2_find_jhead(jd
, &lh
);
957 if (!(lh
.lh_flags
& GFS2_LOG_HEAD_UNMOUNT
)) {
964 gfs2_glock_dq_uninit(t_gh
);
967 while (!list_empty(&list
)) {
968 lfcc
= list_entry(list
.next
, struct lfcc
, list
);
969 list_del(&lfcc
->list
);
970 gfs2_glock_dq_uninit(&lfcc
->gh
);
973 gfs2_glock_dq_uninit(&ji_gh
);
978 * gfs2_freeze_fs - freezes the file system
979 * @sdp: the file system
981 * This function flushes data and meta data for all machines by
982 * aquiring the transaction log exclusively. All journals are
983 * ensured to be in a clean state as well.
988 int gfs2_freeze_fs(struct gfs2_sbd
*sdp
)
992 mutex_lock(&sdp
->sd_freeze_lock
);
994 if (!sdp
->sd_freeze_count
++) {
995 error
= gfs2_lock_fs_check_clean(sdp
, &sdp
->sd_freeze_gh
);
997 sdp
->sd_freeze_count
--;
1000 mutex_unlock(&sdp
->sd_freeze_lock
);
1006 * gfs2_unfreeze_fs - unfreezes the file system
1007 * @sdp: the file system
1009 * This function allows the file system to proceed by unlocking
1010 * the exclusively held transaction lock. Other GFS2 nodes are
1011 * now free to acquire the lock shared and go on with their lives.
1015 void gfs2_unfreeze_fs(struct gfs2_sbd
*sdp
)
1017 mutex_lock(&sdp
->sd_freeze_lock
);
1019 if (sdp
->sd_freeze_count
&& !--sdp
->sd_freeze_count
)
1020 gfs2_glock_dq_uninit(&sdp
->sd_freeze_gh
);
1022 mutex_unlock(&sdp
->sd_freeze_lock
);