sparc32: fix broken set_pte()
[linux-2.6/btrfs-unstable.git] / fs / gfs2 / incore.h
blob7a2dbbc0d6348e39872491b3e6a070c5729f93f5
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
10 #ifndef __INCORE_DOT_H__
11 #define __INCORE_DOT_H__
13 #include <linux/fs.h>
14 #include <linux/kobject.h>
15 #include <linux/workqueue.h>
16 #include <linux/dlm.h>
17 #include <linux/buffer_head.h>
18 #include <linux/rcupdate.h>
19 #include <linux/rculist_bl.h>
20 #include <linux/completion.h>
21 #include <linux/rbtree.h>
22 #include <linux/ktime.h>
23 #include <linux/percpu.h>
24 #include <linux/lockref.h>
26 #define DIO_WAIT 0x00000010
27 #define DIO_METADATA 0x00000020
29 struct gfs2_log_operations;
30 struct gfs2_bufdata;
31 struct gfs2_holder;
32 struct gfs2_glock;
33 struct gfs2_quota_data;
34 struct gfs2_trans;
35 struct gfs2_jdesc;
36 struct gfs2_sbd;
37 struct lm_lockops;
39 typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
41 struct gfs2_log_header_host {
42 u64 lh_sequence; /* Sequence number of this transaction */
43 u32 lh_flags; /* GFS2_LOG_HEAD_... */
44 u32 lh_tail; /* Block number of log tail */
45 u32 lh_blkno;
46 u32 lh_hash;
50 * Structure of operations that are associated with each
51 * type of element in the log.
54 struct gfs2_log_operations {
55 void (*lo_before_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
56 void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
57 void (*lo_before_scan) (struct gfs2_jdesc *jd,
58 struct gfs2_log_header_host *head, int pass);
59 int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start,
60 struct gfs2_log_descriptor *ld, __be64 *ptr,
61 int pass);
62 void (*lo_after_scan) (struct gfs2_jdesc *jd, int error, int pass);
63 const char *lo_name;
66 #define GBF_FULL 1
68 struct gfs2_bitmap {
69 struct buffer_head *bi_bh;
70 char *bi_clone;
71 unsigned long bi_flags;
72 u32 bi_offset;
73 u32 bi_start;
74 u32 bi_len;
75 u32 bi_blocks;
78 struct gfs2_rgrpd {
79 struct rb_node rd_node; /* Link with superblock */
80 struct gfs2_glock *rd_gl; /* Glock for this rgrp */
81 u64 rd_addr; /* grp block disk address */
82 u64 rd_data0; /* first data location */
83 u32 rd_length; /* length of rgrp header in fs blocks */
84 u32 rd_data; /* num of data blocks in rgrp */
85 u32 rd_bitbytes; /* number of bytes in data bitmaps */
86 u32 rd_free;
87 u32 rd_reserved; /* number of blocks reserved */
88 u32 rd_free_clone;
89 u32 rd_dinodes;
90 u64 rd_igeneration;
91 struct gfs2_bitmap *rd_bits;
92 struct gfs2_sbd *rd_sbd;
93 struct gfs2_rgrp_lvb *rd_rgl;
94 u32 rd_last_alloc;
95 u32 rd_flags;
96 u32 rd_extfail_pt; /* extent failure point */
97 #define GFS2_RDF_CHECK 0x10000000 /* check for unlinked inodes */
98 #define GFS2_RDF_UPTODATE 0x20000000 /* rg is up to date */
99 #define GFS2_RDF_ERROR 0x40000000 /* error in rg */
100 #define GFS2_RDF_PREFERRED 0x80000000 /* This rgrp is preferred */
101 #define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */
102 spinlock_t rd_rsspin; /* protects reservation related vars */
103 struct rb_root rd_rstree; /* multi-block reservation tree */
106 struct gfs2_rbm {
107 struct gfs2_rgrpd *rgd;
108 u32 offset; /* The offset is bitmap relative */
109 int bii; /* Bitmap index */
112 static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
114 return rbm->rgd->rd_bits + rbm->bii;
117 static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
119 return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
120 rbm->offset;
123 static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1,
124 const struct gfs2_rbm *rbm2)
126 return (rbm1->rgd == rbm2->rgd) && (rbm1->bii == rbm2->bii) &&
127 (rbm1->offset == rbm2->offset);
130 enum gfs2_state_bits {
131 BH_Pinned = BH_PrivateStart,
132 BH_Escaped = BH_PrivateStart + 1,
133 BH_Zeronew = BH_PrivateStart + 2,
136 BUFFER_FNS(Pinned, pinned)
137 TAS_BUFFER_FNS(Pinned, pinned)
138 BUFFER_FNS(Escaped, escaped)
139 TAS_BUFFER_FNS(Escaped, escaped)
140 BUFFER_FNS(Zeronew, zeronew)
141 TAS_BUFFER_FNS(Zeronew, zeronew)
143 struct gfs2_bufdata {
144 struct buffer_head *bd_bh;
145 struct gfs2_glock *bd_gl;
146 u64 bd_blkno;
148 struct list_head bd_list;
149 const struct gfs2_log_operations *bd_ops;
151 struct gfs2_trans *bd_tr;
152 struct list_head bd_ail_st_list;
153 struct list_head bd_ail_gl_list;
157 * Internally, we prefix things with gdlm_ and GDLM_ (for gfs-dlm) since a
158 * prefix of lock_dlm_ gets awkward.
161 #define GDLM_STRNAME_BYTES 25
162 #define GDLM_LVB_SIZE 32
165 * ls_recover_flags:
167 * DFL_BLOCK_LOCKS: dlm is in recovery and will grant locks that had been
168 * held by failed nodes whose journals need recovery. Those locks should
169 * only be used for journal recovery until the journal recovery is done.
170 * This is set by the dlm recover_prep callback and cleared by the
171 * gfs2_control thread when journal recovery is complete. To avoid
172 * races between recover_prep setting and gfs2_control clearing, recover_spin
173 * is held while changing this bit and reading/writing recover_block
174 * and recover_start.
176 * DFL_NO_DLM_OPS: dlm lockspace ops/callbacks are not being used.
178 * DFL_FIRST_MOUNT: this node is the first to mount this fs and is doing
179 * recovery of all journals before allowing other nodes to mount the fs.
180 * This is cleared when FIRST_MOUNT_DONE is set.
182 * DFL_FIRST_MOUNT_DONE: this node was the first mounter, and has finished
183 * recovery of all journals, and now allows other nodes to mount the fs.
185 * DFL_MOUNT_DONE: gdlm_mount has completed successfully and cleared
186 * BLOCK_LOCKS for the first time. The gfs2_control thread should now
187 * control clearing BLOCK_LOCKS for further recoveries.
189 * DFL_UNMOUNT: gdlm_unmount sets to keep sdp off gfs2_control_wq.
191 * DFL_DLM_RECOVERY: set while dlm is in recovery, between recover_prep()
192 * and recover_done(), i.e. set while recover_block == recover_start.
195 enum {
196 DFL_BLOCK_LOCKS = 0,
197 DFL_NO_DLM_OPS = 1,
198 DFL_FIRST_MOUNT = 2,
199 DFL_FIRST_MOUNT_DONE = 3,
200 DFL_MOUNT_DONE = 4,
201 DFL_UNMOUNT = 5,
202 DFL_DLM_RECOVERY = 6,
205 struct lm_lockname {
206 u64 ln_number;
207 unsigned int ln_type;
210 #define lm_name_equal(name1, name2) \
211 (((name1)->ln_number == (name2)->ln_number) && \
212 ((name1)->ln_type == (name2)->ln_type))
215 struct gfs2_glock_operations {
216 void (*go_sync) (struct gfs2_glock *gl);
217 int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
218 void (*go_inval) (struct gfs2_glock *gl, int flags);
219 int (*go_demote_ok) (const struct gfs2_glock *gl);
220 int (*go_lock) (struct gfs2_holder *gh);
221 void (*go_unlock) (struct gfs2_holder *gh);
222 void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
223 void (*go_callback)(struct gfs2_glock *gl, bool remote);
224 const int go_type;
225 const unsigned long go_flags;
226 #define GLOF_ASPACE 1
227 #define GLOF_LVB 2
230 enum {
231 GFS2_LKS_SRTT = 0, /* Non blocking smoothed round trip time */
232 GFS2_LKS_SRTTVAR = 1, /* Non blocking smoothed variance */
233 GFS2_LKS_SRTTB = 2, /* Blocking smoothed round trip time */
234 GFS2_LKS_SRTTVARB = 3, /* Blocking smoothed variance */
235 GFS2_LKS_SIRT = 4, /* Smoothed Inter-request time */
236 GFS2_LKS_SIRTVAR = 5, /* Smoothed Inter-request variance */
237 GFS2_LKS_DCOUNT = 6, /* Count of dlm requests */
238 GFS2_LKS_QCOUNT = 7, /* Count of gfs2_holder queues */
239 GFS2_NR_LKSTATS
242 struct gfs2_lkstats {
243 s64 stats[GFS2_NR_LKSTATS];
246 enum {
247 /* States */
248 HIF_HOLDER = 6, /* Set for gh that "holds" the glock */
249 HIF_FIRST = 7,
250 HIF_WAIT = 10,
253 struct gfs2_holder {
254 struct list_head gh_list;
256 struct gfs2_glock *gh_gl;
257 struct pid *gh_owner_pid;
258 unsigned int gh_state;
259 unsigned gh_flags;
261 int gh_error;
262 unsigned long gh_iflags; /* HIF_... */
263 unsigned long gh_ip;
266 /* Number of quota types we support */
267 #define GFS2_MAXQUOTAS 2
269 /* Resource group multi-block reservation, in order of appearance:
271 Step 1. Function prepares to write, allocates a mb, sets the size hint.
272 Step 2. User calls inplace_reserve to target an rgrp, sets the rgrp info
273 Step 3. Function get_local_rgrp locks the rgrp, determines which bits to use
274 Step 4. Bits are assigned from the rgrp based on either the reservation
275 or wherever it can.
278 struct gfs2_blkreserv {
279 /* components used during write (step 1): */
280 atomic_t rs_sizehint; /* hint of the write size */
282 struct gfs2_holder rs_rgd_gh; /* Filled in by get_local_rgrp */
283 struct rb_node rs_node; /* link to other block reservations */
284 struct gfs2_rbm rs_rbm; /* Start of reservation */
285 u32 rs_free; /* how many blocks are still free */
286 u64 rs_inum; /* Inode number for reservation */
288 /* ancillary quota stuff */
289 struct gfs2_quota_data *rs_qa_qd[2 * GFS2_MAXQUOTAS];
290 struct gfs2_holder rs_qa_qd_ghs[2 * GFS2_MAXQUOTAS];
291 unsigned int rs_qa_qd_num;
295 * Allocation parameters
296 * @target: The number of blocks we'd ideally like to allocate
297 * @aflags: The flags (e.g. Orlov flag)
299 * The intent is to gradually expand this structure over time in
300 * order to give more information, e.g. alignment, min extent size
301 * to the allocation code.
303 struct gfs2_alloc_parms {
304 u32 target;
305 u32 aflags;
308 enum {
309 GLF_LOCK = 1,
310 GLF_DEMOTE = 3,
311 GLF_PENDING_DEMOTE = 4,
312 GLF_DEMOTE_IN_PROGRESS = 5,
313 GLF_DIRTY = 6,
314 GLF_LFLUSH = 7,
315 GLF_INVALIDATE_IN_PROGRESS = 8,
316 GLF_REPLY_PENDING = 9,
317 GLF_INITIAL = 10,
318 GLF_FROZEN = 11,
319 GLF_QUEUED = 12,
320 GLF_LRU = 13,
321 GLF_OBJECT = 14, /* Used only for tracing */
322 GLF_BLOCKING = 15,
325 struct gfs2_glock {
326 struct hlist_bl_node gl_list;
327 struct gfs2_sbd *gl_sbd;
328 unsigned long gl_flags; /* GLF_... */
329 struct lm_lockname gl_name;
331 struct lockref gl_lockref;
332 #define gl_spin gl_lockref.lock
334 /* State fields protected by gl_spin */
335 unsigned int gl_state:2, /* Current state */
336 gl_target:2, /* Target state */
337 gl_demote_state:2, /* State requested by remote node */
338 gl_req:2, /* State in last dlm request */
339 gl_reply:8; /* Last reply from the dlm */
341 unsigned int gl_hash;
342 unsigned long gl_demote_time; /* time of first demote request */
343 long gl_hold_time;
344 struct list_head gl_holders;
346 const struct gfs2_glock_operations *gl_ops;
347 ktime_t gl_dstamp;
348 struct gfs2_lkstats gl_stats;
349 struct dlm_lksb gl_lksb;
350 unsigned long gl_tchange;
351 void *gl_object;
353 struct list_head gl_lru;
354 struct list_head gl_ail_list;
355 atomic_t gl_ail_count;
356 atomic_t gl_revokes;
357 struct delayed_work gl_work;
358 union {
359 /* For inode and iopen glocks only */
360 struct work_struct gl_delete;
361 /* For rgrp glocks only */
362 struct {
363 loff_t start;
364 loff_t end;
365 } gl_vm;
367 struct rcu_head gl_rcu;
370 #define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
372 enum {
373 GIF_INVALID = 0,
374 GIF_QD_LOCKED = 1,
375 GIF_ALLOC_FAILED = 2,
376 GIF_SW_PAGED = 3,
377 GIF_ORDERED = 4,
378 GIF_FREE_VFS_INODE = 5,
381 struct gfs2_inode {
382 struct inode i_inode;
383 u64 i_no_addr;
384 u64 i_no_formal_ino;
385 u64 i_generation;
386 u64 i_eattr;
387 unsigned long i_flags; /* GIF_... */
388 struct gfs2_glock *i_gl; /* Move into i_gh? */
389 struct gfs2_holder i_iopen_gh;
390 struct gfs2_holder i_gh; /* for prepare/commit_write only */
391 struct gfs2_blkreserv *i_res; /* rgrp multi-block reservation */
392 struct gfs2_rgrpd *i_rgd;
393 u64 i_goal; /* goal block for allocations */
394 struct rw_semaphore i_rw_mutex;
395 struct list_head i_ordered;
396 struct list_head i_trunc_list;
397 __be64 *i_hash_cache;
398 u32 i_entries;
399 u32 i_diskflags;
400 u8 i_height;
401 u8 i_depth;
405 * Since i_inode is the first element of struct gfs2_inode,
406 * this is effectively a cast.
408 static inline struct gfs2_inode *GFS2_I(struct inode *inode)
410 return container_of(inode, struct gfs2_inode, i_inode);
413 static inline struct gfs2_sbd *GFS2_SB(const struct inode *inode)
415 return inode->i_sb->s_fs_info;
418 struct gfs2_file {
419 struct mutex f_fl_mutex;
420 struct gfs2_holder f_fl_gh;
423 struct gfs2_revoke_replay {
424 struct list_head rr_list;
425 u64 rr_blkno;
426 unsigned int rr_where;
429 enum {
430 QDF_CHANGE = 1,
431 QDF_LOCKED = 2,
432 QDF_REFRESH = 3,
435 struct gfs2_quota_data {
436 struct hlist_bl_node qd_hlist;
437 struct list_head qd_list;
438 struct kqid qd_id;
439 struct gfs2_sbd *qd_sbd;
440 struct lockref qd_lockref;
441 struct list_head qd_lru;
442 unsigned qd_hash;
444 unsigned long qd_flags; /* QDF_... */
446 s64 qd_change;
447 s64 qd_change_sync;
449 unsigned int qd_slot;
450 unsigned int qd_slot_count;
452 struct buffer_head *qd_bh;
453 struct gfs2_quota_change *qd_bh_qc;
454 unsigned int qd_bh_count;
456 struct gfs2_glock *qd_gl;
457 struct gfs2_quota_lvb qd_qb;
459 u64 qd_sync_gen;
460 unsigned long qd_last_warn;
461 struct rcu_head qd_rcu;
464 struct gfs2_trans {
465 unsigned long tr_ip;
467 unsigned int tr_blocks;
468 unsigned int tr_revokes;
469 unsigned int tr_reserved;
470 unsigned int tr_touched:1;
471 unsigned int tr_attached:1;
472 unsigned int tr_alloced:1;
474 unsigned int tr_num_buf_new;
475 unsigned int tr_num_databuf_new;
476 unsigned int tr_num_buf_rm;
477 unsigned int tr_num_databuf_rm;
478 unsigned int tr_num_revoke;
479 unsigned int tr_num_revoke_rm;
481 struct list_head tr_list;
482 struct list_head tr_databuf;
483 struct list_head tr_buf;
485 unsigned int tr_first;
486 struct list_head tr_ail1_list;
487 struct list_head tr_ail2_list;
490 struct gfs2_journal_extent {
491 struct list_head list;
493 unsigned int lblock; /* First logical block */
494 u64 dblock; /* First disk block */
495 u64 blocks;
498 struct gfs2_jdesc {
499 struct list_head jd_list;
500 struct list_head extent_list;
501 unsigned int nr_extents;
502 struct work_struct jd_work;
503 struct inode *jd_inode;
504 unsigned long jd_flags;
505 #define JDF_RECOVERY 1
506 unsigned int jd_jid;
507 unsigned int jd_blocks;
508 int jd_recover_error;
509 /* Replay stuff */
511 unsigned int jd_found_blocks;
512 unsigned int jd_found_revokes;
513 unsigned int jd_replayed_blocks;
515 struct list_head jd_revoke_list;
516 unsigned int jd_replay_tail;
520 struct gfs2_statfs_change_host {
521 s64 sc_total;
522 s64 sc_free;
523 s64 sc_dinodes;
526 #define GFS2_QUOTA_DEFAULT GFS2_QUOTA_OFF
527 #define GFS2_QUOTA_OFF 0
528 #define GFS2_QUOTA_ACCOUNT 1
529 #define GFS2_QUOTA_ON 2
531 #define GFS2_DATA_DEFAULT GFS2_DATA_ORDERED
532 #define GFS2_DATA_WRITEBACK 1
533 #define GFS2_DATA_ORDERED 2
535 #define GFS2_ERRORS_DEFAULT GFS2_ERRORS_WITHDRAW
536 #define GFS2_ERRORS_WITHDRAW 0
537 #define GFS2_ERRORS_CONTINUE 1 /* place holder for future feature */
538 #define GFS2_ERRORS_RO 2 /* place holder for future feature */
539 #define GFS2_ERRORS_PANIC 3
541 struct gfs2_args {
542 char ar_lockproto[GFS2_LOCKNAME_LEN]; /* Name of the Lock Protocol */
543 char ar_locktable[GFS2_LOCKNAME_LEN]; /* Name of the Lock Table */
544 char ar_hostdata[GFS2_LOCKNAME_LEN]; /* Host specific data */
545 unsigned int ar_spectator:1; /* Don't get a journal */
546 unsigned int ar_localflocks:1; /* Let the VFS do flock|fcntl */
547 unsigned int ar_debug:1; /* Oops on errors */
548 unsigned int ar_posix_acl:1; /* Enable posix acls */
549 unsigned int ar_quota:2; /* off/account/on */
550 unsigned int ar_suiddir:1; /* suiddir support */
551 unsigned int ar_data:2; /* ordered/writeback */
552 unsigned int ar_meta:1; /* mount metafs */
553 unsigned int ar_discard:1; /* discard requests */
554 unsigned int ar_errors:2; /* errors=withdraw | panic */
555 unsigned int ar_nobarrier:1; /* do not send barriers */
556 unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */
557 int ar_commit; /* Commit interval */
558 int ar_statfs_quantum; /* The fast statfs interval */
559 int ar_quota_quantum; /* The quota interval */
560 int ar_statfs_percent; /* The % change to force sync */
563 struct gfs2_tune {
564 spinlock_t gt_spin;
566 unsigned int gt_logd_secs;
568 unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
569 unsigned int gt_quota_scale_num; /* Numerator */
570 unsigned int gt_quota_scale_den; /* Denominator */
571 unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
572 unsigned int gt_new_files_jdata;
573 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
574 unsigned int gt_complain_secs;
575 unsigned int gt_statfs_quantum;
576 unsigned int gt_statfs_slow;
579 enum {
580 SDF_JOURNAL_CHECKED = 0,
581 SDF_JOURNAL_LIVE = 1,
582 SDF_SHUTDOWN = 2,
583 SDF_NOBARRIERS = 3,
584 SDF_NORECOVERY = 4,
585 SDF_DEMOTE = 5,
586 SDF_NOJOURNALID = 6,
587 SDF_RORECOVERY = 7, /* read only recovery */
588 SDF_SKIP_DLM_UNLOCK = 8,
591 enum gfs2_freeze_state {
592 SFS_UNFROZEN = 0,
593 SFS_STARTING_FREEZE = 1,
594 SFS_FROZEN = 2,
597 #define GFS2_FSNAME_LEN 256
599 struct gfs2_inum_host {
600 u64 no_formal_ino;
601 u64 no_addr;
604 struct gfs2_sb_host {
605 u32 sb_magic;
606 u32 sb_type;
607 u32 sb_format;
609 u32 sb_fs_format;
610 u32 sb_multihost_format;
611 u32 sb_bsize;
612 u32 sb_bsize_shift;
614 struct gfs2_inum_host sb_master_dir;
615 struct gfs2_inum_host sb_root_dir;
617 char sb_lockproto[GFS2_LOCKNAME_LEN];
618 char sb_locktable[GFS2_LOCKNAME_LEN];
622 * lm_mount() return values
624 * ls_jid - the journal ID this node should use
625 * ls_first - this node is the first to mount the file system
626 * ls_lockspace - lock module's context for this file system
627 * ls_ops - lock module's functions
630 struct lm_lockstruct {
631 int ls_jid;
632 unsigned int ls_first;
633 const struct lm_lockops *ls_ops;
634 dlm_lockspace_t *ls_dlm;
636 int ls_recover_jid_done; /* These two are deprecated, */
637 int ls_recover_jid_status; /* used previously by gfs_controld */
639 struct dlm_lksb ls_mounted_lksb; /* mounted_lock */
640 struct dlm_lksb ls_control_lksb; /* control_lock */
641 char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */
642 struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */
643 char *ls_lvb_bits;
645 spinlock_t ls_recover_spin; /* protects following fields */
646 unsigned long ls_recover_flags; /* DFL_ */
647 uint32_t ls_recover_mount; /* gen in first recover_done cb */
648 uint32_t ls_recover_start; /* gen in last recover_done cb */
649 uint32_t ls_recover_block; /* copy recover_start in last recover_prep */
650 uint32_t ls_recover_size; /* size of recover_submit, recover_result */
651 uint32_t *ls_recover_submit; /* gen in last recover_slot cb per jid */
652 uint32_t *ls_recover_result; /* result of last jid recovery */
655 struct gfs2_pcpu_lkstats {
656 /* One struct for each glock type */
657 struct gfs2_lkstats lkstats[10];
660 struct gfs2_sbd {
661 struct super_block *sd_vfs;
662 struct gfs2_pcpu_lkstats __percpu *sd_lkstats;
663 struct kobject sd_kobj;
664 unsigned long sd_flags; /* SDF_... */
665 struct gfs2_sb_host sd_sb;
667 /* Constants computed on mount */
669 u32 sd_fsb2bb;
670 u32 sd_fsb2bb_shift;
671 u32 sd_diptrs; /* Number of pointers in a dinode */
672 u32 sd_inptrs; /* Number of pointers in a indirect block */
673 u32 sd_jbsize; /* Size of a journaled data block */
674 u32 sd_hash_bsize; /* sizeof(exhash block) */
675 u32 sd_hash_bsize_shift;
676 u32 sd_hash_ptrs; /* Number of pointers in a hash block */
677 u32 sd_qc_per_block;
678 u32 sd_blocks_per_bitmap;
679 u32 sd_max_dirres; /* Max blocks needed to add a directory entry */
680 u32 sd_max_height; /* Max height of a file's metadata tree */
681 u64 sd_heightsize[GFS2_MAX_META_HEIGHT + 1];
682 u32 sd_max_jheight; /* Max height of journaled file's meta tree */
683 u64 sd_jheightsize[GFS2_MAX_META_HEIGHT + 1];
685 struct gfs2_args sd_args; /* Mount arguments */
686 struct gfs2_tune sd_tune; /* Filesystem tuning structure */
688 /* Lock Stuff */
690 struct lm_lockstruct sd_lockstruct;
691 struct gfs2_holder sd_live_gh;
692 struct gfs2_glock *sd_rename_gl;
693 struct gfs2_glock *sd_freeze_gl;
694 struct work_struct sd_freeze_work;
695 wait_queue_head_t sd_glock_wait;
696 atomic_t sd_glock_disposal;
697 struct completion sd_locking_init;
698 struct completion sd_wdack;
699 struct delayed_work sd_control_work;
701 /* Inode Stuff */
703 struct dentry *sd_master_dir;
704 struct dentry *sd_root_dir;
706 struct inode *sd_jindex;
707 struct inode *sd_statfs_inode;
708 struct inode *sd_sc_inode;
709 struct inode *sd_qc_inode;
710 struct inode *sd_rindex;
711 struct inode *sd_quota_inode;
713 /* StatFS stuff */
715 spinlock_t sd_statfs_spin;
716 struct gfs2_statfs_change_host sd_statfs_master;
717 struct gfs2_statfs_change_host sd_statfs_local;
718 int sd_statfs_force_sync;
720 /* Resource group stuff */
722 int sd_rindex_uptodate;
723 spinlock_t sd_rindex_spin;
724 struct rb_root sd_rindex_tree;
725 unsigned int sd_rgrps;
726 unsigned int sd_max_rg_data;
728 /* Journal index stuff */
730 struct list_head sd_jindex_list;
731 spinlock_t sd_jindex_spin;
732 struct mutex sd_jindex_mutex;
733 unsigned int sd_journals;
735 struct gfs2_jdesc *sd_jdesc;
736 struct gfs2_holder sd_journal_gh;
737 struct gfs2_holder sd_jinode_gh;
739 struct gfs2_holder sd_sc_gh;
740 struct gfs2_holder sd_qc_gh;
742 struct completion sd_journal_ready;
744 /* Daemon stuff */
746 struct task_struct *sd_logd_process;
747 struct task_struct *sd_quotad_process;
749 /* Quota stuff */
751 struct list_head sd_quota_list;
752 atomic_t sd_quota_count;
753 struct mutex sd_quota_mutex;
754 struct mutex sd_quota_sync_mutex;
755 wait_queue_head_t sd_quota_wait;
756 struct list_head sd_trunc_list;
757 spinlock_t sd_trunc_lock;
759 unsigned int sd_quota_slots;
760 unsigned long *sd_quota_bitmap;
761 spinlock_t sd_bitmap_lock;
763 u64 sd_quota_sync_gen;
765 /* Log stuff */
767 struct address_space sd_aspace;
769 spinlock_t sd_log_lock;
771 struct gfs2_trans *sd_log_tr;
772 unsigned int sd_log_blks_reserved;
773 int sd_log_commited_revoke;
775 atomic_t sd_log_pinned;
776 unsigned int sd_log_num_revoke;
778 struct list_head sd_log_le_revoke;
779 struct list_head sd_log_le_ordered;
780 spinlock_t sd_ordered_lock;
782 atomic_t sd_log_thresh1;
783 atomic_t sd_log_thresh2;
784 atomic_t sd_log_blks_free;
785 wait_queue_head_t sd_log_waitq;
786 wait_queue_head_t sd_logd_waitq;
788 u64 sd_log_sequence;
789 unsigned int sd_log_head;
790 unsigned int sd_log_tail;
791 int sd_log_idle;
793 struct rw_semaphore sd_log_flush_lock;
794 atomic_t sd_log_in_flight;
795 struct bio *sd_log_bio;
796 wait_queue_head_t sd_log_flush_wait;
797 int sd_log_error;
799 atomic_t sd_reserving_log;
800 wait_queue_head_t sd_reserving_log_wait;
802 unsigned int sd_log_flush_head;
803 u64 sd_log_flush_wrapped;
805 spinlock_t sd_ail_lock;
806 struct list_head sd_ail1_list;
807 struct list_head sd_ail2_list;
809 /* For quiescing the filesystem */
810 struct gfs2_holder sd_freeze_gh;
811 atomic_t sd_freeze_state;
812 struct mutex sd_freeze_mutex;
814 char sd_fsname[GFS2_FSNAME_LEN];
815 char sd_table_name[GFS2_FSNAME_LEN];
816 char sd_proto_name[GFS2_FSNAME_LEN];
818 /* Debugging crud */
820 unsigned long sd_last_warning;
821 struct dentry *debugfs_dir; /* debugfs directory */
822 struct dentry *debugfs_dentry_glocks;
823 struct dentry *debugfs_dentry_glstats;
824 struct dentry *debugfs_dentry_sbstats;
827 static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which)
829 gl->gl_stats.stats[which]++;
832 static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
834 const struct gfs2_sbd *sdp = gl->gl_sbd;
835 preempt_disable();
836 this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++;
837 preempt_enable();
840 #endif /* __INCORE_DOT_H__ */