2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/blkdev.h>
16 #include <linux/kthread.h>
17 #include <linux/export.h>
18 #include <linux/namei.h>
19 #include <linux/mount.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/quotaops.h>
37 #include "trace_gfs2.h"
43 * gfs2_tune_init - Fill a gfs2_tune structure with default values
48 static void gfs2_tune_init(struct gfs2_tune
*gt
)
50 spin_lock_init(>
->gt_spin
);
52 gt
->gt_quota_simul_sync
= 64;
53 gt
->gt_quota_warn_period
= 10;
54 gt
->gt_quota_scale_num
= 1;
55 gt
->gt_quota_scale_den
= 1;
56 gt
->gt_new_files_jdata
= 0;
57 gt
->gt_max_readahead
= 1 << 18;
58 gt
->gt_complain_secs
= 10;
61 static struct gfs2_sbd
*init_sbd(struct super_block
*sb
)
65 sdp
= kzalloc(sizeof(struct gfs2_sbd
), GFP_KERNEL
);
71 set_bit(SDF_NOJOURNALID
, &sdp
->sd_flags
);
72 gfs2_tune_init(&sdp
->sd_tune
);
74 init_waitqueue_head(&sdp
->sd_glock_wait
);
75 atomic_set(&sdp
->sd_glock_disposal
, 0);
76 init_completion(&sdp
->sd_locking_init
);
77 spin_lock_init(&sdp
->sd_statfs_spin
);
79 spin_lock_init(&sdp
->sd_rindex_spin
);
80 mutex_init(&sdp
->sd_rindex_mutex
);
81 sdp
->sd_rindex_tree
.rb_node
= NULL
;
83 INIT_LIST_HEAD(&sdp
->sd_jindex_list
);
84 spin_lock_init(&sdp
->sd_jindex_spin
);
85 mutex_init(&sdp
->sd_jindex_mutex
);
87 INIT_LIST_HEAD(&sdp
->sd_quota_list
);
88 mutex_init(&sdp
->sd_quota_mutex
);
89 init_waitqueue_head(&sdp
->sd_quota_wait
);
90 INIT_LIST_HEAD(&sdp
->sd_trunc_list
);
91 spin_lock_init(&sdp
->sd_trunc_lock
);
93 spin_lock_init(&sdp
->sd_log_lock
);
94 atomic_set(&sdp
->sd_log_pinned
, 0);
95 INIT_LIST_HEAD(&sdp
->sd_log_le_buf
);
96 INIT_LIST_HEAD(&sdp
->sd_log_le_revoke
);
97 INIT_LIST_HEAD(&sdp
->sd_log_le_rg
);
98 INIT_LIST_HEAD(&sdp
->sd_log_le_databuf
);
99 INIT_LIST_HEAD(&sdp
->sd_log_le_ordered
);
101 init_waitqueue_head(&sdp
->sd_log_waitq
);
102 init_waitqueue_head(&sdp
->sd_logd_waitq
);
103 spin_lock_init(&sdp
->sd_ail_lock
);
104 INIT_LIST_HEAD(&sdp
->sd_ail1_list
);
105 INIT_LIST_HEAD(&sdp
->sd_ail2_list
);
107 init_rwsem(&sdp
->sd_log_flush_lock
);
108 atomic_set(&sdp
->sd_log_in_flight
, 0);
109 init_waitqueue_head(&sdp
->sd_log_flush_wait
);
111 INIT_LIST_HEAD(&sdp
->sd_revoke_list
);
113 mutex_init(&sdp
->sd_freeze_lock
);
120 * gfs2_check_sb - Check superblock
121 * @sdp: the filesystem
122 * @sb: The superblock
123 * @silent: Don't print a message if the check fails
125 * Checks the version code of the FS is one that we understand how to
126 * read and that the sizes of the various on-disk structures have not
130 static int gfs2_check_sb(struct gfs2_sbd
*sdp
, int silent
)
132 struct gfs2_sb_host
*sb
= &sdp
->sd_sb
;
134 if (sb
->sb_magic
!= GFS2_MAGIC
||
135 sb
->sb_type
!= GFS2_METATYPE_SB
) {
137 printk(KERN_WARNING
"GFS2: not a GFS2 filesystem\n");
141 /* If format numbers match exactly, we're done. */
143 if (sb
->sb_fs_format
== GFS2_FORMAT_FS
&&
144 sb
->sb_multihost_format
== GFS2_FORMAT_MULTI
)
147 fs_warn(sdp
, "Unknown on-disk format, unable to mount\n");
152 static void end_bio_io_page(struct bio
*bio
, int error
)
154 struct page
*page
= bio
->bi_private
;
157 SetPageUptodate(page
);
159 printk(KERN_WARNING
"gfs2: error %d reading superblock\n", error
);
163 static void gfs2_sb_in(struct gfs2_sbd
*sdp
, const void *buf
)
165 struct gfs2_sb_host
*sb
= &sdp
->sd_sb
;
166 struct super_block
*s
= sdp
->sd_vfs
;
167 const struct gfs2_sb
*str
= buf
;
169 sb
->sb_magic
= be32_to_cpu(str
->sb_header
.mh_magic
);
170 sb
->sb_type
= be32_to_cpu(str
->sb_header
.mh_type
);
171 sb
->sb_format
= be32_to_cpu(str
->sb_header
.mh_format
);
172 sb
->sb_fs_format
= be32_to_cpu(str
->sb_fs_format
);
173 sb
->sb_multihost_format
= be32_to_cpu(str
->sb_multihost_format
);
174 sb
->sb_bsize
= be32_to_cpu(str
->sb_bsize
);
175 sb
->sb_bsize_shift
= be32_to_cpu(str
->sb_bsize_shift
);
176 sb
->sb_master_dir
.no_addr
= be64_to_cpu(str
->sb_master_dir
.no_addr
);
177 sb
->sb_master_dir
.no_formal_ino
= be64_to_cpu(str
->sb_master_dir
.no_formal_ino
);
178 sb
->sb_root_dir
.no_addr
= be64_to_cpu(str
->sb_root_dir
.no_addr
);
179 sb
->sb_root_dir
.no_formal_ino
= be64_to_cpu(str
->sb_root_dir
.no_formal_ino
);
181 memcpy(sb
->sb_lockproto
, str
->sb_lockproto
, GFS2_LOCKNAME_LEN
);
182 memcpy(sb
->sb_locktable
, str
->sb_locktable
, GFS2_LOCKNAME_LEN
);
183 memcpy(s
->s_uuid
, str
->sb_uuid
, 16);
187 * gfs2_read_super - Read the gfs2 super block from disk
188 * @sdp: The GFS2 super block
189 * @sector: The location of the super block
190 * @error: The error code to return
192 * This uses the bio functions to read the super block from disk
193 * because we want to be 100% sure that we never read cached data.
194 * A super block is read twice only during each GFS2 mount and is
195 * never written to by the filesystem. The first time its read no
196 * locks are held, and the only details which are looked at are those
197 * relating to the locking protocol. Once locking is up and working,
198 * the sb is read again under the lock to establish the location of
199 * the master directory (contains pointers to journals etc) and the
202 * Returns: 0 on success or error
205 static int gfs2_read_super(struct gfs2_sbd
*sdp
, sector_t sector
, int silent
)
207 struct super_block
*sb
= sdp
->sd_vfs
;
212 page
= alloc_page(GFP_NOFS
);
216 ClearPageUptodate(page
);
217 ClearPageDirty(page
);
220 bio
= bio_alloc(GFP_NOFS
, 1);
221 bio
->bi_sector
= sector
* (sb
->s_blocksize
>> 9);
222 bio
->bi_bdev
= sb
->s_bdev
;
223 bio_add_page(bio
, page
, PAGE_SIZE
, 0);
225 bio
->bi_end_io
= end_bio_io_page
;
226 bio
->bi_private
= page
;
227 submit_bio(READ_SYNC
| REQ_META
, bio
);
228 wait_on_page_locked(page
);
230 if (!PageUptodate(page
)) {
238 return gfs2_check_sb(sdp
, silent
);
242 * gfs2_read_sb - Read super block
243 * @sdp: The GFS2 superblock
244 * @silent: Don't print message if mount fails
248 static int gfs2_read_sb(struct gfs2_sbd
*sdp
, int silent
)
250 u32 hash_blocks
, ind_blocks
, leaf_blocks
;
255 error
= gfs2_read_super(sdp
, GFS2_SB_ADDR
>> sdp
->sd_fsb2bb_shift
, silent
);
258 fs_err(sdp
, "can't read superblock\n");
262 sdp
->sd_fsb2bb_shift
= sdp
->sd_sb
.sb_bsize_shift
-
263 GFS2_BASIC_BLOCK_SHIFT
;
264 sdp
->sd_fsb2bb
= 1 << sdp
->sd_fsb2bb_shift
;
265 sdp
->sd_diptrs
= (sdp
->sd_sb
.sb_bsize
-
266 sizeof(struct gfs2_dinode
)) / sizeof(u64
);
267 sdp
->sd_inptrs
= (sdp
->sd_sb
.sb_bsize
-
268 sizeof(struct gfs2_meta_header
)) / sizeof(u64
);
269 sdp
->sd_jbsize
= sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_meta_header
);
270 sdp
->sd_hash_bsize
= sdp
->sd_sb
.sb_bsize
/ 2;
271 sdp
->sd_hash_bsize_shift
= sdp
->sd_sb
.sb_bsize_shift
- 1;
272 sdp
->sd_hash_ptrs
= sdp
->sd_hash_bsize
/ sizeof(u64
);
273 sdp
->sd_qc_per_block
= (sdp
->sd_sb
.sb_bsize
-
274 sizeof(struct gfs2_meta_header
)) /
275 sizeof(struct gfs2_quota_change
);
277 /* Compute maximum reservation required to add a entry to a directory */
279 hash_blocks
= DIV_ROUND_UP(sizeof(u64
) * (1 << GFS2_DIR_MAX_DEPTH
),
283 for (tmp_blocks
= hash_blocks
; tmp_blocks
> sdp
->sd_diptrs
;) {
284 tmp_blocks
= DIV_ROUND_UP(tmp_blocks
, sdp
->sd_inptrs
);
285 ind_blocks
+= tmp_blocks
;
288 leaf_blocks
= 2 + GFS2_DIR_MAX_DEPTH
;
290 sdp
->sd_max_dirres
= hash_blocks
+ ind_blocks
+ leaf_blocks
;
292 sdp
->sd_heightsize
[0] = sdp
->sd_sb
.sb_bsize
-
293 sizeof(struct gfs2_dinode
);
294 sdp
->sd_heightsize
[1] = sdp
->sd_sb
.sb_bsize
* sdp
->sd_diptrs
;
299 space
= sdp
->sd_heightsize
[x
- 1] * sdp
->sd_inptrs
;
301 m
= do_div(d
, sdp
->sd_inptrs
);
303 if (d
!= sdp
->sd_heightsize
[x
- 1] || m
)
305 sdp
->sd_heightsize
[x
] = space
;
307 sdp
->sd_max_height
= x
;
308 sdp
->sd_heightsize
[x
] = ~0;
309 gfs2_assert(sdp
, sdp
->sd_max_height
<= GFS2_MAX_META_HEIGHT
);
311 sdp
->sd_jheightsize
[0] = sdp
->sd_sb
.sb_bsize
-
312 sizeof(struct gfs2_dinode
);
313 sdp
->sd_jheightsize
[1] = sdp
->sd_jbsize
* sdp
->sd_diptrs
;
318 space
= sdp
->sd_jheightsize
[x
- 1] * sdp
->sd_inptrs
;
320 m
= do_div(d
, sdp
->sd_inptrs
);
322 if (d
!= sdp
->sd_jheightsize
[x
- 1] || m
)
324 sdp
->sd_jheightsize
[x
] = space
;
326 sdp
->sd_max_jheight
= x
;
327 sdp
->sd_jheightsize
[x
] = ~0;
328 gfs2_assert(sdp
, sdp
->sd_max_jheight
<= GFS2_MAX_META_HEIGHT
);
333 static int init_names(struct gfs2_sbd
*sdp
, int silent
)
338 proto
= sdp
->sd_args
.ar_lockproto
;
339 table
= sdp
->sd_args
.ar_locktable
;
341 /* Try to autodetect */
343 if (!proto
[0] || !table
[0]) {
344 error
= gfs2_read_super(sdp
, GFS2_SB_ADDR
>> sdp
->sd_fsb2bb_shift
, silent
);
349 proto
= sdp
->sd_sb
.sb_lockproto
;
351 table
= sdp
->sd_sb
.sb_locktable
;
355 table
= sdp
->sd_vfs
->s_id
;
357 strlcpy(sdp
->sd_proto_name
, proto
, GFS2_FSNAME_LEN
);
358 strlcpy(sdp
->sd_table_name
, table
, GFS2_FSNAME_LEN
);
360 table
= sdp
->sd_table_name
;
361 while ((table
= strchr(table
, '/')))
367 static int init_locking(struct gfs2_sbd
*sdp
, struct gfs2_holder
*mount_gh
,
375 error
= gfs2_glock_nq_num(sdp
,
376 GFS2_MOUNT_LOCK
, &gfs2_nondisk_glops
,
377 LM_ST_EXCLUSIVE
, LM_FLAG_NOEXP
| GL_NOCACHE
,
380 fs_err(sdp
, "can't acquire mount glock: %d\n", error
);
384 error
= gfs2_glock_nq_num(sdp
,
385 GFS2_LIVE_LOCK
, &gfs2_nondisk_glops
,
387 LM_FLAG_NOEXP
| GL_EXACT
,
390 fs_err(sdp
, "can't acquire live glock: %d\n", error
);
394 error
= gfs2_glock_get(sdp
, GFS2_RENAME_LOCK
, &gfs2_nondisk_glops
,
395 CREATE
, &sdp
->sd_rename_gl
);
397 fs_err(sdp
, "can't create rename glock: %d\n", error
);
401 error
= gfs2_glock_get(sdp
, GFS2_TRANS_LOCK
, &gfs2_trans_glops
,
402 CREATE
, &sdp
->sd_trans_gl
);
404 fs_err(sdp
, "can't create transaction glock: %d\n", error
);
411 gfs2_glock_put(sdp
->sd_trans_gl
);
413 gfs2_glock_put(sdp
->sd_rename_gl
);
415 gfs2_glock_dq_uninit(&sdp
->sd_live_gh
);
417 gfs2_glock_dq_uninit(mount_gh
);
422 static int gfs2_lookup_root(struct super_block
*sb
, struct dentry
**dptr
,
423 u64 no_addr
, const char *name
)
425 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
426 struct dentry
*dentry
;
429 inode
= gfs2_inode_lookup(sb
, DT_DIR
, no_addr
, 0, 0);
431 fs_err(sdp
, "can't read in %s inode: %ld\n", name
, PTR_ERR(inode
));
432 return PTR_ERR(inode
);
434 dentry
= d_alloc_root(inode
);
436 fs_err(sdp
, "can't alloc %s dentry\n", name
);
444 static int init_sb(struct gfs2_sbd
*sdp
, int silent
)
446 struct super_block
*sb
= sdp
->sd_vfs
;
447 struct gfs2_holder sb_gh
;
451 ret
= gfs2_glock_nq_num(sdp
, GFS2_SB_LOCK
, &gfs2_meta_glops
,
452 LM_ST_SHARED
, 0, &sb_gh
);
454 fs_err(sdp
, "can't acquire superblock glock: %d\n", ret
);
458 ret
= gfs2_read_sb(sdp
, silent
);
460 fs_err(sdp
, "can't read superblock: %d\n", ret
);
464 /* Set up the buffer cache and SB for real */
465 if (sdp
->sd_sb
.sb_bsize
< bdev_logical_block_size(sb
->s_bdev
)) {
467 fs_err(sdp
, "FS block size (%u) is too small for device "
469 sdp
->sd_sb
.sb_bsize
, bdev_logical_block_size(sb
->s_bdev
));
472 if (sdp
->sd_sb
.sb_bsize
> PAGE_SIZE
) {
474 fs_err(sdp
, "FS block size (%u) is too big for machine "
476 sdp
->sd_sb
.sb_bsize
, (unsigned int)PAGE_SIZE
);
479 sb_set_blocksize(sb
, sdp
->sd_sb
.sb_bsize
);
481 /* Get the root inode */
482 no_addr
= sdp
->sd_sb
.sb_root_dir
.no_addr
;
483 ret
= gfs2_lookup_root(sb
, &sdp
->sd_root_dir
, no_addr
, "root");
487 /* Get the master inode */
488 no_addr
= sdp
->sd_sb
.sb_master_dir
.no_addr
;
489 ret
= gfs2_lookup_root(sb
, &sdp
->sd_master_dir
, no_addr
, "master");
491 dput(sdp
->sd_root_dir
);
494 sb
->s_root
= dget(sdp
->sd_args
.ar_meta
? sdp
->sd_master_dir
: sdp
->sd_root_dir
);
496 gfs2_glock_dq_uninit(&sb_gh
);
501 * map_journal_extents - create a reusable "extent" mapping from all logical
502 * blocks to all physical blocks for the given journal. This will save
503 * us time when writing journal blocks. Most journals will have only one
504 * extent that maps all their logical blocks. That's because gfs2.mkfs
505 * arranges the journal blocks sequentially to maximize performance.
506 * So the extent would map the first block for the entire file length.
507 * However, gfs2_jadd can happen while file activity is happening, so
508 * those journals may not be sequential. Less likely is the case where
509 * the users created their own journals by mounting the metafs and
510 * laying it out. But it's still possible. These journals might have
513 * TODO: This should be done in bigger chunks rather than one block at a time,
514 * but since it's only done at mount time, I'm not worried about the
517 static int map_journal_extents(struct gfs2_sbd
*sdp
)
519 struct gfs2_jdesc
*jd
= sdp
->sd_jdesc
;
521 u64 db
, prev_db
; /* logical block, disk block, prev disk block */
522 struct gfs2_inode
*ip
= GFS2_I(jd
->jd_inode
);
523 struct gfs2_journal_extent
*jext
= NULL
;
524 struct buffer_head bh
;
529 for (lb
= 0; lb
< i_size_read(jd
->jd_inode
) >> sdp
->sd_sb
.sb_bsize_shift
; lb
++) {
532 bh
.b_size
= 1 << ip
->i_inode
.i_blkbits
;
533 rc
= gfs2_block_map(jd
->jd_inode
, lb
, &bh
, 0);
536 printk(KERN_INFO
"GFS2 journal mapping error %d: lb="
537 "%u db=%llu\n", rc
, lb
, (unsigned long long)db
);
540 if (!prev_db
|| db
!= prev_db
+ 1) {
541 jext
= kzalloc(sizeof(struct gfs2_journal_extent
),
544 printk(KERN_INFO
"GFS2 error: out of memory "
545 "mapping journal extents.\n");
552 list_add_tail(&jext
->extent_list
, &jd
->extent_list
);
561 static void gfs2_others_may_mount(struct gfs2_sbd
*sdp
)
563 char *message
= "FIRSTMOUNT=Done";
564 char *envp
[] = { message
, NULL
};
566 fs_info(sdp
, "first mount done, others may mount\n");
568 if (sdp
->sd_lockstruct
.ls_ops
->lm_first_done
)
569 sdp
->sd_lockstruct
.ls_ops
->lm_first_done(sdp
);
571 kobject_uevent_env(&sdp
->sd_kobj
, KOBJ_CHANGE
, envp
);
575 * gfs2_jindex_hold - Grab a lock on the jindex
576 * @sdp: The GFS2 superblock
577 * @ji_gh: the holder for the jindex glock
582 static int gfs2_jindex_hold(struct gfs2_sbd
*sdp
, struct gfs2_holder
*ji_gh
)
584 struct gfs2_inode
*dip
= GFS2_I(sdp
->sd_jindex
);
587 struct gfs2_jdesc
*jd
;
592 mutex_lock(&sdp
->sd_jindex_mutex
);
595 error
= gfs2_glock_nq_init(dip
->i_gl
, LM_ST_SHARED
, 0, ji_gh
);
599 name
.len
= sprintf(buf
, "journal%u", sdp
->sd_journals
);
600 name
.hash
= gfs2_disk_hash(name
.name
, name
.len
);
602 error
= gfs2_dir_check(sdp
->sd_jindex
, &name
, NULL
);
603 if (error
== -ENOENT
) {
608 gfs2_glock_dq_uninit(ji_gh
);
614 jd
= kzalloc(sizeof(struct gfs2_jdesc
), GFP_KERNEL
);
618 INIT_LIST_HEAD(&jd
->extent_list
);
619 INIT_WORK(&jd
->jd_work
, gfs2_recover_func
);
620 jd
->jd_inode
= gfs2_lookupi(sdp
->sd_jindex
, &name
, 1);
621 if (!jd
->jd_inode
|| IS_ERR(jd
->jd_inode
)) {
625 error
= PTR_ERR(jd
->jd_inode
);
630 spin_lock(&sdp
->sd_jindex_spin
);
631 jd
->jd_jid
= sdp
->sd_journals
++;
632 list_add_tail(&jd
->jd_list
, &sdp
->sd_jindex_list
);
633 spin_unlock(&sdp
->sd_jindex_spin
);
636 mutex_unlock(&sdp
->sd_jindex_mutex
);
641 static int init_journal(struct gfs2_sbd
*sdp
, int undo
)
643 struct inode
*master
= sdp
->sd_master_dir
->d_inode
;
644 struct gfs2_holder ji_gh
;
645 struct gfs2_inode
*ip
;
654 sdp
->sd_jindex
= gfs2_lookup_simple(master
, "jindex");
655 if (IS_ERR(sdp
->sd_jindex
)) {
656 fs_err(sdp
, "can't lookup journal index: %d\n", error
);
657 return PTR_ERR(sdp
->sd_jindex
);
660 /* Load in the journal index special file */
662 error
= gfs2_jindex_hold(sdp
, &ji_gh
);
664 fs_err(sdp
, "can't read journal index: %d\n", error
);
669 if (!gfs2_jindex_size(sdp
)) {
670 fs_err(sdp
, "no journals!\n");
674 if (sdp
->sd_args
.ar_spectator
) {
675 sdp
->sd_jdesc
= gfs2_jdesc_find(sdp
, 0);
676 atomic_set(&sdp
->sd_log_blks_free
, sdp
->sd_jdesc
->jd_blocks
);
677 atomic_set(&sdp
->sd_log_thresh1
, 2*sdp
->sd_jdesc
->jd_blocks
/5);
678 atomic_set(&sdp
->sd_log_thresh2
, 4*sdp
->sd_jdesc
->jd_blocks
/5);
680 if (sdp
->sd_lockstruct
.ls_jid
>= gfs2_jindex_size(sdp
)) {
681 fs_err(sdp
, "can't mount journal #%u\n",
682 sdp
->sd_lockstruct
.ls_jid
);
683 fs_err(sdp
, "there are only %u journals (0 - %u)\n",
684 gfs2_jindex_size(sdp
),
685 gfs2_jindex_size(sdp
) - 1);
688 sdp
->sd_jdesc
= gfs2_jdesc_find(sdp
, sdp
->sd_lockstruct
.ls_jid
);
690 error
= gfs2_glock_nq_num(sdp
, sdp
->sd_lockstruct
.ls_jid
,
692 LM_ST_EXCLUSIVE
, LM_FLAG_NOEXP
,
693 &sdp
->sd_journal_gh
);
695 fs_err(sdp
, "can't acquire journal glock: %d\n", error
);
699 ip
= GFS2_I(sdp
->sd_jdesc
->jd_inode
);
700 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
,
701 LM_FLAG_NOEXP
| GL_EXACT
| GL_NOCACHE
,
704 fs_err(sdp
, "can't acquire journal inode glock: %d\n",
706 goto fail_journal_gh
;
709 error
= gfs2_jdesc_check(sdp
->sd_jdesc
);
711 fs_err(sdp
, "my journal (%u) is bad: %d\n",
712 sdp
->sd_jdesc
->jd_jid
, error
);
715 atomic_set(&sdp
->sd_log_blks_free
, sdp
->sd_jdesc
->jd_blocks
);
716 atomic_set(&sdp
->sd_log_thresh1
, 2*sdp
->sd_jdesc
->jd_blocks
/5);
717 atomic_set(&sdp
->sd_log_thresh2
, 4*sdp
->sd_jdesc
->jd_blocks
/5);
719 /* Map the extents for this journal's blocks */
720 map_journal_extents(sdp
);
722 trace_gfs2_log_blocks(sdp
, atomic_read(&sdp
->sd_log_blks_free
));
724 if (sdp
->sd_lockstruct
.ls_first
) {
726 for (x
= 0; x
< sdp
->sd_journals
; x
++) {
727 error
= gfs2_recover_journal(gfs2_jdesc_find(sdp
, x
),
730 fs_err(sdp
, "error recovering journal %u: %d\n",
736 gfs2_others_may_mount(sdp
);
737 } else if (!sdp
->sd_args
.ar_spectator
) {
738 error
= gfs2_recover_journal(sdp
->sd_jdesc
, true);
740 fs_err(sdp
, "error recovering my journal: %d\n", error
);
745 set_bit(SDF_JOURNAL_CHECKED
, &sdp
->sd_flags
);
746 gfs2_glock_dq_uninit(&ji_gh
);
752 if (!sdp
->sd_args
.ar_spectator
)
753 gfs2_glock_dq_uninit(&sdp
->sd_jinode_gh
);
755 if (!sdp
->sd_args
.ar_spectator
)
756 gfs2_glock_dq_uninit(&sdp
->sd_journal_gh
);
758 gfs2_jindex_free(sdp
);
760 gfs2_glock_dq_uninit(&ji_gh
);
762 iput(sdp
->sd_jindex
);
767 static int init_inodes(struct gfs2_sbd
*sdp
, int undo
)
770 struct inode
*master
= sdp
->sd_master_dir
->d_inode
;
775 error
= init_journal(sdp
, undo
);
779 /* Read in the master statfs inode */
780 sdp
->sd_statfs_inode
= gfs2_lookup_simple(master
, "statfs");
781 if (IS_ERR(sdp
->sd_statfs_inode
)) {
782 error
= PTR_ERR(sdp
->sd_statfs_inode
);
783 fs_err(sdp
, "can't read in statfs inode: %d\n", error
);
787 /* Read in the resource index inode */
788 sdp
->sd_rindex
= gfs2_lookup_simple(master
, "rindex");
789 if (IS_ERR(sdp
->sd_rindex
)) {
790 error
= PTR_ERR(sdp
->sd_rindex
);
791 fs_err(sdp
, "can't get resource index inode: %d\n", error
);
794 sdp
->sd_rindex_uptodate
= 0;
796 /* Read in the quota inode */
797 sdp
->sd_quota_inode
= gfs2_lookup_simple(master
, "quota");
798 if (IS_ERR(sdp
->sd_quota_inode
)) {
799 error
= PTR_ERR(sdp
->sd_quota_inode
);
800 fs_err(sdp
, "can't get quota file inode: %d\n", error
);
804 error
= gfs2_rindex_update(sdp
);
811 iput(sdp
->sd_quota_inode
);
813 gfs2_clear_rgrpd(sdp
);
814 iput(sdp
->sd_rindex
);
816 iput(sdp
->sd_statfs_inode
);
818 init_journal(sdp
, UNDO
);
823 static int init_per_node(struct gfs2_sbd
*sdp
, int undo
)
825 struct inode
*pn
= NULL
;
828 struct gfs2_inode
*ip
;
829 struct inode
*master
= sdp
->sd_master_dir
->d_inode
;
831 if (sdp
->sd_args
.ar_spectator
)
837 pn
= gfs2_lookup_simple(master
, "per_node");
840 fs_err(sdp
, "can't find per_node directory: %d\n", error
);
844 sprintf(buf
, "statfs_change%u", sdp
->sd_jdesc
->jd_jid
);
845 sdp
->sd_sc_inode
= gfs2_lookup_simple(pn
, buf
);
846 if (IS_ERR(sdp
->sd_sc_inode
)) {
847 error
= PTR_ERR(sdp
->sd_sc_inode
);
848 fs_err(sdp
, "can't find local \"sc\" file: %d\n", error
);
852 sprintf(buf
, "quota_change%u", sdp
->sd_jdesc
->jd_jid
);
853 sdp
->sd_qc_inode
= gfs2_lookup_simple(pn
, buf
);
854 if (IS_ERR(sdp
->sd_qc_inode
)) {
855 error
= PTR_ERR(sdp
->sd_qc_inode
);
856 fs_err(sdp
, "can't find local \"qc\" file: %d\n", error
);
863 ip
= GFS2_I(sdp
->sd_sc_inode
);
864 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0,
867 fs_err(sdp
, "can't lock local \"sc\" file: %d\n", error
);
871 ip
= GFS2_I(sdp
->sd_qc_inode
);
872 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0,
875 fs_err(sdp
, "can't lock local \"qc\" file: %d\n", error
);
882 gfs2_glock_dq_uninit(&sdp
->sd_qc_gh
);
884 gfs2_glock_dq_uninit(&sdp
->sd_sc_gh
);
886 iput(sdp
->sd_qc_inode
);
888 iput(sdp
->sd_sc_inode
);
895 static int init_threads(struct gfs2_sbd
*sdp
, int undo
)
897 struct task_struct
*p
;
903 p
= kthread_run(gfs2_logd
, sdp
, "gfs2_logd");
906 fs_err(sdp
, "can't start logd thread: %d\n", error
);
909 sdp
->sd_logd_process
= p
;
911 p
= kthread_run(gfs2_quotad
, sdp
, "gfs2_quotad");
914 fs_err(sdp
, "can't start quotad thread: %d\n", error
);
917 sdp
->sd_quotad_process
= p
;
923 kthread_stop(sdp
->sd_quotad_process
);
925 kthread_stop(sdp
->sd_logd_process
);
929 static const match_table_t nolock_tokens
= {
930 { Opt_jid
, "jid=%d\n", },
934 static const struct lm_lockops nolock_ops
= {
935 .lm_proto_name
= "lock_nolock",
936 .lm_put_lock
= gfs2_glock_free
,
937 .lm_tokens
= &nolock_tokens
,
941 * gfs2_lm_mount - mount a locking protocol
942 * @sdp: the filesystem
943 * @args: mount arguments
944 * @silent: if 1, don't complain if the FS isn't a GFS2 fs
949 static int gfs2_lm_mount(struct gfs2_sbd
*sdp
, int silent
)
951 const struct lm_lockops
*lm
;
952 struct lm_lockstruct
*ls
= &sdp
->sd_lockstruct
;
953 struct gfs2_args
*args
= &sdp
->sd_args
;
954 const char *proto
= sdp
->sd_proto_name
;
955 const char *table
= sdp
->sd_table_name
;
959 if (!strcmp("lock_nolock", proto
)) {
961 sdp
->sd_args
.ar_localflocks
= 1;
962 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
963 } else if (!strcmp("lock_dlm", proto
)) {
967 printk(KERN_INFO
"GFS2: can't find protocol %s\n", proto
);
971 fs_info(sdp
, "Trying to join cluster \"%s\", \"%s\"\n", proto
, table
);
976 for (options
= args
->ar_hostdata
; (o
= strsep(&options
, ":")); ) {
977 substring_t tmp
[MAX_OPT_ARGS
];
983 token
= match_token(o
, *lm
->lm_tokens
, tmp
);
986 ret
= match_int(&tmp
[0], &option
);
987 if (ret
|| option
< 0)
989 if (test_and_clear_bit(SDF_NOJOURNALID
, &sdp
->sd_flags
))
993 /* Obsolete, but left for backward compat purposes */
996 ret
= match_int(&tmp
[0], &option
);
997 if (ret
|| (option
!= 0 && option
!= 1))
999 ls
->ls_first
= option
;
1002 ret
= match_int(&tmp
[0], &option
);
1003 if (ret
|| (option
!= 0 && option
!= 1))
1004 goto hostdata_error
;
1005 ls
->ls_nodir
= option
;
1010 fs_info(sdp
, "unknown hostdata (%s)\n", o
);
1015 if (lm
->lm_mount
== NULL
) {
1016 fs_info(sdp
, "Now mounting FS...\n");
1017 complete_all(&sdp
->sd_locking_init
);
1020 ret
= lm
->lm_mount(sdp
, table
);
1022 fs_info(sdp
, "Joined cluster. Now mounting FS...\n");
1023 complete_all(&sdp
->sd_locking_init
);
1027 void gfs2_lm_unmount(struct gfs2_sbd
*sdp
)
1029 const struct lm_lockops
*lm
= sdp
->sd_lockstruct
.ls_ops
;
1030 if (likely(!test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)) &&
1032 lm
->lm_unmount(sdp
);
1035 static int gfs2_journalid_wait(void *word
)
1037 if (signal_pending(current
))
1043 static int wait_on_journal(struct gfs2_sbd
*sdp
)
1045 if (sdp
->sd_lockstruct
.ls_ops
->lm_mount
== NULL
)
1048 return wait_on_bit(&sdp
->sd_flags
, SDF_NOJOURNALID
, gfs2_journalid_wait
, TASK_INTERRUPTIBLE
);
1051 void gfs2_online_uevent(struct gfs2_sbd
*sdp
)
1053 struct super_block
*sb
= sdp
->sd_vfs
;
1056 char *envp
[] = { ro
, spectator
, NULL
};
1057 sprintf(ro
, "RDONLY=%d", (sb
->s_flags
& MS_RDONLY
) ? 1 : 0);
1058 sprintf(spectator
, "SPECTATOR=%d", sdp
->sd_args
.ar_spectator
? 1 : 0);
1059 kobject_uevent_env(&sdp
->sd_kobj
, KOBJ_ONLINE
, envp
);
1063 * fill_super - Read in superblock
1064 * @sb: The VFS superblock
1065 * @data: Mount options
1066 * @silent: Don't complain if it's not a GFS2 filesystem
1071 static int fill_super(struct super_block
*sb
, struct gfs2_args
*args
, int silent
)
1073 struct gfs2_sbd
*sdp
;
1074 struct gfs2_holder mount_gh
;
1079 printk(KERN_WARNING
"GFS2: can't alloc struct gfs2_sbd\n");
1082 sdp
->sd_args
= *args
;
1084 if (sdp
->sd_args
.ar_spectator
) {
1085 sb
->s_flags
|= MS_RDONLY
;
1086 set_bit(SDF_RORECOVERY
, &sdp
->sd_flags
);
1088 if (sdp
->sd_args
.ar_posix_acl
)
1089 sb
->s_flags
|= MS_POSIXACL
;
1090 if (sdp
->sd_args
.ar_nobarrier
)
1091 set_bit(SDF_NOBARRIERS
, &sdp
->sd_flags
);
1093 sb
->s_flags
|= MS_NOSEC
;
1094 sb
->s_magic
= GFS2_MAGIC
;
1095 sb
->s_op
= &gfs2_super_ops
;
1096 sb
->s_d_op
= &gfs2_dops
;
1097 sb
->s_export_op
= &gfs2_export_ops
;
1098 sb
->s_xattr
= gfs2_xattr_handlers
;
1099 sb
->s_qcop
= &gfs2_quotactl_ops
;
1100 sb_dqopt(sb
)->flags
|= DQUOT_QUOTA_SYS_FILE
;
1101 sb
->s_time_gran
= 1;
1102 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
1104 /* Set up the buffer cache and fill in some fake block size values
1105 to allow us to read-in the on-disk superblock. */
1106 sdp
->sd_sb
.sb_bsize
= sb_min_blocksize(sb
, GFS2_BASIC_BLOCK
);
1107 sdp
->sd_sb
.sb_bsize_shift
= sb
->s_blocksize_bits
;
1108 sdp
->sd_fsb2bb_shift
= sdp
->sd_sb
.sb_bsize_shift
-
1109 GFS2_BASIC_BLOCK_SHIFT
;
1110 sdp
->sd_fsb2bb
= 1 << sdp
->sd_fsb2bb_shift
;
1112 sdp
->sd_tune
.gt_logd_secs
= sdp
->sd_args
.ar_commit
;
1113 sdp
->sd_tune
.gt_quota_quantum
= sdp
->sd_args
.ar_quota_quantum
;
1114 if (sdp
->sd_args
.ar_statfs_quantum
) {
1115 sdp
->sd_tune
.gt_statfs_slow
= 0;
1116 sdp
->sd_tune
.gt_statfs_quantum
= sdp
->sd_args
.ar_statfs_quantum
;
1118 sdp
->sd_tune
.gt_statfs_slow
= 1;
1119 sdp
->sd_tune
.gt_statfs_quantum
= 30;
1122 error
= init_names(sdp
, silent
);
1126 snprintf(sdp
->sd_fsname
, GFS2_FSNAME_LEN
, "%s", sdp
->sd_table_name
);
1128 gfs2_create_debugfs_file(sdp
);
1130 error
= gfs2_sys_fs_add(sdp
);
1134 error
= gfs2_lm_mount(sdp
, silent
);
1138 error
= init_locking(sdp
, &mount_gh
, DO
);
1142 error
= init_sb(sdp
, silent
);
1146 error
= wait_on_journal(sdp
);
1151 * If user space has failed to join the cluster or some similar
1152 * failure has occurred, then the journal id will contain a
1153 * negative (error) number. This will then be returned to the
1154 * caller (of the mount syscall). We do this even for spectator
1155 * mounts (which just write a jid of 0 to indicate "ok" even though
1156 * the jid is unused in the spectator case)
1158 if (sdp
->sd_lockstruct
.ls_jid
< 0) {
1159 error
= sdp
->sd_lockstruct
.ls_jid
;
1160 sdp
->sd_lockstruct
.ls_jid
= 0;
1164 if (sdp
->sd_args
.ar_spectator
)
1165 snprintf(sdp
->sd_fsname
, GFS2_FSNAME_LEN
, "%s.s",
1166 sdp
->sd_table_name
);
1168 snprintf(sdp
->sd_fsname
, GFS2_FSNAME_LEN
, "%s.%u",
1169 sdp
->sd_table_name
, sdp
->sd_lockstruct
.ls_jid
);
1171 error
= init_inodes(sdp
, DO
);
1175 error
= init_per_node(sdp
, DO
);
1179 error
= gfs2_statfs_init(sdp
);
1181 fs_err(sdp
, "can't initialize statfs subsystem: %d\n", error
);
1185 error
= init_threads(sdp
, DO
);
1189 if (!(sb
->s_flags
& MS_RDONLY
)) {
1190 error
= gfs2_make_fs_rw(sdp
);
1192 fs_err(sdp
, "can't make FS RW: %d\n", error
);
1197 gfs2_glock_dq_uninit(&mount_gh
);
1198 gfs2_online_uevent(sdp
);
1202 init_threads(sdp
, UNDO
);
1204 init_per_node(sdp
, UNDO
);
1206 init_inodes(sdp
, UNDO
);
1208 if (sdp
->sd_root_dir
)
1209 dput(sdp
->sd_root_dir
);
1210 if (sdp
->sd_master_dir
)
1211 dput(sdp
->sd_master_dir
);
1216 init_locking(sdp
, &mount_gh
, UNDO
);
1218 gfs2_gl_hash_clear(sdp
);
1219 gfs2_lm_unmount(sdp
);
1221 gfs2_sys_fs_del(sdp
);
1223 gfs2_delete_debugfs_file(sdp
);
1225 sb
->s_fs_info
= NULL
;
1229 static int set_gfs2_super(struct super_block
*s
, void *data
)
1232 s
->s_dev
= s
->s_bdev
->bd_dev
;
1235 * We set the bdi here to the queue backing, file systems can
1236 * overwrite this in ->fill_super()
1238 s
->s_bdi
= &bdev_get_queue(s
->s_bdev
)->backing_dev_info
;
1242 static int test_gfs2_super(struct super_block
*s
, void *ptr
)
1244 struct block_device
*bdev
= ptr
;
1245 return (bdev
== s
->s_bdev
);
1249 * gfs2_mount - Get the GFS2 superblock
1250 * @fs_type: The GFS2 filesystem type
1251 * @flags: Mount flags
1252 * @dev_name: The name of the device
1253 * @data: The mount arguments
1255 * Q. Why not use get_sb_bdev() ?
1256 * A. We need to select one of two root directories to mount, independent
1257 * of whether this is the initial, or subsequent, mount of this sb
1259 * Returns: 0 or -ve on error
1262 static struct dentry
*gfs2_mount(struct file_system_type
*fs_type
, int flags
,
1263 const char *dev_name
, void *data
)
1265 struct block_device
*bdev
;
1266 struct super_block
*s
;
1267 fmode_t mode
= FMODE_READ
| FMODE_EXCL
;
1269 struct gfs2_args args
;
1270 struct gfs2_sbd
*sdp
;
1272 if (!(flags
& MS_RDONLY
))
1273 mode
|= FMODE_WRITE
;
1275 bdev
= blkdev_get_by_path(dev_name
, mode
, fs_type
);
1277 return ERR_CAST(bdev
);
1280 * once the super is inserted into the list by sget, s_umount
1281 * will protect the lockfs code from trying to start a snapshot
1282 * while we are mounting
1284 mutex_lock(&bdev
->bd_fsfreeze_mutex
);
1285 if (bdev
->bd_fsfreeze_count
> 0) {
1286 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
1290 s
= sget(fs_type
, test_gfs2_super
, set_gfs2_super
, bdev
);
1291 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
1297 blkdev_put(bdev
, mode
);
1299 memset(&args
, 0, sizeof(args
));
1300 args
.ar_quota
= GFS2_QUOTA_DEFAULT
;
1301 args
.ar_data
= GFS2_DATA_DEFAULT
;
1302 args
.ar_commit
= 30;
1303 args
.ar_statfs_quantum
= 30;
1304 args
.ar_quota_quantum
= 60;
1305 args
.ar_errors
= GFS2_ERRORS_DEFAULT
;
1307 error
= gfs2_mount_args(&args
, data
);
1309 printk(KERN_WARNING
"GFS2: can't parse mount arguments\n");
1315 if ((flags
^ s
->s_flags
) & MS_RDONLY
)
1318 char b
[BDEVNAME_SIZE
];
1322 strlcpy(s
->s_id
, bdevname(bdev
, b
), sizeof(s
->s_id
));
1323 sb_set_blocksize(s
, block_size(bdev
));
1324 error
= fill_super(s
, &args
, flags
& MS_SILENT
? 1 : 0);
1327 s
->s_flags
|= MS_ACTIVE
;
1333 return dget(sdp
->sd_master_dir
);
1335 return dget(sdp
->sd_root_dir
);
1338 deactivate_locked_super(s
);
1339 return ERR_PTR(error
);
1341 blkdev_put(bdev
, mode
);
1342 return ERR_PTR(error
);
1345 static int set_meta_super(struct super_block
*s
, void *ptr
)
1350 static struct dentry
*gfs2_mount_meta(struct file_system_type
*fs_type
,
1351 int flags
, const char *dev_name
, void *data
)
1353 struct super_block
*s
;
1354 struct gfs2_sbd
*sdp
;
1358 error
= kern_path(dev_name
, LOOKUP_FOLLOW
, &path
);
1360 printk(KERN_WARNING
"GFS2: path_lookup on %s returned error %d\n",
1362 return ERR_PTR(error
);
1364 s
= sget(&gfs2_fs_type
, test_gfs2_super
, set_meta_super
,
1365 path
.dentry
->d_inode
->i_sb
->s_bdev
);
1368 printk(KERN_WARNING
"GFS2: gfs2 mount does not exist\n");
1371 if ((flags
^ s
->s_flags
) & MS_RDONLY
) {
1372 deactivate_locked_super(s
);
1373 return ERR_PTR(-EBUSY
);
1376 return dget(sdp
->sd_master_dir
);
1379 static void gfs2_kill_sb(struct super_block
*sb
)
1381 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1384 kill_block_super(sb
);
1388 gfs2_meta_syncfs(sdp
);
1389 dput(sdp
->sd_root_dir
);
1390 dput(sdp
->sd_master_dir
);
1391 sdp
->sd_root_dir
= NULL
;
1392 sdp
->sd_master_dir
= NULL
;
1393 shrink_dcache_sb(sb
);
1394 kill_block_super(sb
);
1395 gfs2_delete_debugfs_file(sdp
);
1399 struct file_system_type gfs2_fs_type
= {
1401 .fs_flags
= FS_REQUIRES_DEV
,
1402 .mount
= gfs2_mount
,
1403 .kill_sb
= gfs2_kill_sb
,
1404 .owner
= THIS_MODULE
,
1407 struct file_system_type gfs2meta_fs_type
= {
1409 .fs_flags
= FS_REQUIRES_DEV
,
1410 .mount
= gfs2_mount_meta
,
1411 .owner
= THIS_MODULE
,