usb: gadget: pxa27x_udc: transfer mach_info into pxa_udc
[linux-2.6/btrfs-unstable.git] / fs / gfs2 / incore.h
blob39e7e9959b7462f99a7d04dff4759445f581d9df
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
10 #ifndef __INCORE_DOT_H__
11 #define __INCORE_DOT_H__
13 #include <linux/fs.h>
14 #include <linux/kobject.h>
15 #include <linux/workqueue.h>
16 #include <linux/dlm.h>
17 #include <linux/buffer_head.h>
18 #include <linux/rcupdate.h>
19 #include <linux/rculist_bl.h>
20 #include <linux/completion.h>
21 #include <linux/rbtree.h>
22 #include <linux/ktime.h>
23 #include <linux/percpu.h>
24 #include <linux/lockref.h>
26 #define DIO_WAIT 0x00000010
27 #define DIO_METADATA 0x00000020
29 struct gfs2_log_operations;
30 struct gfs2_bufdata;
31 struct gfs2_holder;
32 struct gfs2_glock;
33 struct gfs2_quota_data;
34 struct gfs2_trans;
35 struct gfs2_jdesc;
36 struct gfs2_sbd;
37 struct lm_lockops;
39 typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
41 struct gfs2_log_header_host {
42 u64 lh_sequence; /* Sequence number of this transaction */
43 u32 lh_flags; /* GFS2_LOG_HEAD_... */
44 u32 lh_tail; /* Block number of log tail */
45 u32 lh_blkno;
46 u32 lh_hash;
50 * Structure of operations that are associated with each
51 * type of element in the log.
54 struct gfs2_log_operations {
55 void (*lo_before_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
56 void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
57 void (*lo_before_scan) (struct gfs2_jdesc *jd,
58 struct gfs2_log_header_host *head, int pass);
59 int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start,
60 struct gfs2_log_descriptor *ld, __be64 *ptr,
61 int pass);
62 void (*lo_after_scan) (struct gfs2_jdesc *jd, int error, int pass);
63 const char *lo_name;
66 #define GBF_FULL 1
68 struct gfs2_bitmap {
69 struct buffer_head *bi_bh;
70 char *bi_clone;
71 unsigned long bi_flags;
72 u32 bi_offset;
73 u32 bi_start;
74 u32 bi_len;
75 u32 bi_blocks;
78 struct gfs2_rgrpd {
79 struct rb_node rd_node; /* Link with superblock */
80 struct gfs2_glock *rd_gl; /* Glock for this rgrp */
81 u64 rd_addr; /* grp block disk address */
82 u64 rd_data0; /* first data location */
83 u32 rd_length; /* length of rgrp header in fs blocks */
84 u32 rd_data; /* num of data blocks in rgrp */
85 u32 rd_bitbytes; /* number of bytes in data bitmaps */
86 u32 rd_free;
87 u32 rd_reserved; /* number of blocks reserved */
88 u32 rd_free_clone;
89 u32 rd_dinodes;
90 u64 rd_igeneration;
91 struct gfs2_bitmap *rd_bits;
92 struct gfs2_sbd *rd_sbd;
93 struct gfs2_rgrp_lvb *rd_rgl;
94 u32 rd_last_alloc;
95 u32 rd_flags;
96 u32 rd_extfail_pt; /* extent failure point */
97 #define GFS2_RDF_CHECK 0x10000000 /* check for unlinked inodes */
98 #define GFS2_RDF_UPTODATE 0x20000000 /* rg is up to date */
99 #define GFS2_RDF_ERROR 0x40000000 /* error in rg */
100 #define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */
101 spinlock_t rd_rsspin; /* protects reservation related vars */
102 struct rb_root rd_rstree; /* multi-block reservation tree */
105 struct gfs2_rbm {
106 struct gfs2_rgrpd *rgd;
107 u32 offset; /* The offset is bitmap relative */
108 int bii; /* Bitmap index */
111 static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
113 return rbm->rgd->rd_bits + rbm->bii;
116 static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
118 return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
119 rbm->offset;
122 static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1,
123 const struct gfs2_rbm *rbm2)
125 return (rbm1->rgd == rbm2->rgd) && (rbm1->bii == rbm2->bii) &&
126 (rbm1->offset == rbm2->offset);
129 enum gfs2_state_bits {
130 BH_Pinned = BH_PrivateStart,
131 BH_Escaped = BH_PrivateStart + 1,
132 BH_Zeronew = BH_PrivateStart + 2,
135 BUFFER_FNS(Pinned, pinned)
136 TAS_BUFFER_FNS(Pinned, pinned)
137 BUFFER_FNS(Escaped, escaped)
138 TAS_BUFFER_FNS(Escaped, escaped)
139 BUFFER_FNS(Zeronew, zeronew)
140 TAS_BUFFER_FNS(Zeronew, zeronew)
142 struct gfs2_bufdata {
143 struct buffer_head *bd_bh;
144 struct gfs2_glock *bd_gl;
145 u64 bd_blkno;
147 struct list_head bd_list;
148 const struct gfs2_log_operations *bd_ops;
150 struct gfs2_trans *bd_tr;
151 struct list_head bd_ail_st_list;
152 struct list_head bd_ail_gl_list;
156 * Internally, we prefix things with gdlm_ and GDLM_ (for gfs-dlm) since a
157 * prefix of lock_dlm_ gets awkward.
160 #define GDLM_STRNAME_BYTES 25
161 #define GDLM_LVB_SIZE 32
164 * ls_recover_flags:
166 * DFL_BLOCK_LOCKS: dlm is in recovery and will grant locks that had been
167 * held by failed nodes whose journals need recovery. Those locks should
168 * only be used for journal recovery until the journal recovery is done.
169 * This is set by the dlm recover_prep callback and cleared by the
170 * gfs2_control thread when journal recovery is complete. To avoid
171 * races between recover_prep setting and gfs2_control clearing, recover_spin
172 * is held while changing this bit and reading/writing recover_block
173 * and recover_start.
175 * DFL_NO_DLM_OPS: dlm lockspace ops/callbacks are not being used.
177 * DFL_FIRST_MOUNT: this node is the first to mount this fs and is doing
178 * recovery of all journals before allowing other nodes to mount the fs.
179 * This is cleared when FIRST_MOUNT_DONE is set.
181 * DFL_FIRST_MOUNT_DONE: this node was the first mounter, and has finished
182 * recovery of all journals, and now allows other nodes to mount the fs.
184 * DFL_MOUNT_DONE: gdlm_mount has completed successfully and cleared
185 * BLOCK_LOCKS for the first time. The gfs2_control thread should now
186 * control clearing BLOCK_LOCKS for further recoveries.
188 * DFL_UNMOUNT: gdlm_unmount sets to keep sdp off gfs2_control_wq.
190 * DFL_DLM_RECOVERY: set while dlm is in recovery, between recover_prep()
191 * and recover_done(), i.e. set while recover_block == recover_start.
194 enum {
195 DFL_BLOCK_LOCKS = 0,
196 DFL_NO_DLM_OPS = 1,
197 DFL_FIRST_MOUNT = 2,
198 DFL_FIRST_MOUNT_DONE = 3,
199 DFL_MOUNT_DONE = 4,
200 DFL_UNMOUNT = 5,
201 DFL_DLM_RECOVERY = 6,
204 struct lm_lockname {
205 u64 ln_number;
206 unsigned int ln_type;
209 #define lm_name_equal(name1, name2) \
210 (((name1)->ln_number == (name2)->ln_number) && \
211 ((name1)->ln_type == (name2)->ln_type))
214 struct gfs2_glock_operations {
215 void (*go_sync) (struct gfs2_glock *gl);
216 int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
217 void (*go_inval) (struct gfs2_glock *gl, int flags);
218 int (*go_demote_ok) (const struct gfs2_glock *gl);
219 int (*go_lock) (struct gfs2_holder *gh);
220 void (*go_unlock) (struct gfs2_holder *gh);
221 void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
222 void (*go_callback)(struct gfs2_glock *gl, bool remote);
223 const int go_type;
224 const unsigned long go_flags;
225 #define GLOF_ASPACE 1
226 #define GLOF_LVB 2
229 enum {
230 GFS2_LKS_SRTT = 0, /* Non blocking smoothed round trip time */
231 GFS2_LKS_SRTTVAR = 1, /* Non blocking smoothed variance */
232 GFS2_LKS_SRTTB = 2, /* Blocking smoothed round trip time */
233 GFS2_LKS_SRTTVARB = 3, /* Blocking smoothed variance */
234 GFS2_LKS_SIRT = 4, /* Smoothed Inter-request time */
235 GFS2_LKS_SIRTVAR = 5, /* Smoothed Inter-request variance */
236 GFS2_LKS_DCOUNT = 6, /* Count of dlm requests */
237 GFS2_LKS_QCOUNT = 7, /* Count of gfs2_holder queues */
238 GFS2_NR_LKSTATS
241 struct gfs2_lkstats {
242 s64 stats[GFS2_NR_LKSTATS];
245 enum {
246 /* States */
247 HIF_HOLDER = 6, /* Set for gh that "holds" the glock */
248 HIF_FIRST = 7,
249 HIF_WAIT = 10,
252 struct gfs2_holder {
253 struct list_head gh_list;
255 struct gfs2_glock *gh_gl;
256 struct pid *gh_owner_pid;
257 unsigned int gh_state;
258 unsigned gh_flags;
260 int gh_error;
261 unsigned long gh_iflags; /* HIF_... */
262 unsigned long gh_ip;
265 /* Number of quota types we support */
266 #define GFS2_MAXQUOTAS 2
268 /* Resource group multi-block reservation, in order of appearance:
270 Step 1. Function prepares to write, allocates a mb, sets the size hint.
271 Step 2. User calls inplace_reserve to target an rgrp, sets the rgrp info
272 Step 3. Function get_local_rgrp locks the rgrp, determines which bits to use
273 Step 4. Bits are assigned from the rgrp based on either the reservation
274 or wherever it can.
277 struct gfs2_blkreserv {
278 /* components used during write (step 1): */
279 atomic_t rs_sizehint; /* hint of the write size */
281 struct gfs2_holder rs_rgd_gh; /* Filled in by get_local_rgrp */
282 struct rb_node rs_node; /* link to other block reservations */
283 struct gfs2_rbm rs_rbm; /* Start of reservation */
284 u32 rs_free; /* how many blocks are still free */
285 u64 rs_inum; /* Inode number for reservation */
287 /* ancillary quota stuff */
288 struct gfs2_quota_data *rs_qa_qd[2 * GFS2_MAXQUOTAS];
289 struct gfs2_holder rs_qa_qd_ghs[2 * GFS2_MAXQUOTAS];
290 unsigned int rs_qa_qd_num;
294 * Allocation parameters
295 * @target: The number of blocks we'd ideally like to allocate
296 * @aflags: The flags (e.g. Orlov flag)
298 * The intent is to gradually expand this structure over time in
299 * order to give more information, e.g. alignment, min extent size
300 * to the allocation code.
302 struct gfs2_alloc_parms {
303 u32 target;
304 u32 aflags;
307 enum {
308 GLF_LOCK = 1,
309 GLF_DEMOTE = 3,
310 GLF_PENDING_DEMOTE = 4,
311 GLF_DEMOTE_IN_PROGRESS = 5,
312 GLF_DIRTY = 6,
313 GLF_LFLUSH = 7,
314 GLF_INVALIDATE_IN_PROGRESS = 8,
315 GLF_REPLY_PENDING = 9,
316 GLF_INITIAL = 10,
317 GLF_FROZEN = 11,
318 GLF_QUEUED = 12,
319 GLF_LRU = 13,
320 GLF_OBJECT = 14, /* Used only for tracing */
321 GLF_BLOCKING = 15,
324 struct gfs2_glock {
325 struct hlist_bl_node gl_list;
326 struct gfs2_sbd *gl_sbd;
327 unsigned long gl_flags; /* GLF_... */
328 struct lm_lockname gl_name;
330 struct lockref gl_lockref;
331 #define gl_spin gl_lockref.lock
333 /* State fields protected by gl_spin */
334 unsigned int gl_state:2, /* Current state */
335 gl_target:2, /* Target state */
336 gl_demote_state:2, /* State requested by remote node */
337 gl_req:2, /* State in last dlm request */
338 gl_reply:8; /* Last reply from the dlm */
340 unsigned int gl_hash;
341 unsigned long gl_demote_time; /* time of first demote request */
342 long gl_hold_time;
343 struct list_head gl_holders;
345 const struct gfs2_glock_operations *gl_ops;
346 ktime_t gl_dstamp;
347 struct gfs2_lkstats gl_stats;
348 struct dlm_lksb gl_lksb;
349 unsigned long gl_tchange;
350 void *gl_object;
352 struct list_head gl_lru;
353 struct list_head gl_ail_list;
354 atomic_t gl_ail_count;
355 atomic_t gl_revokes;
356 struct delayed_work gl_work;
357 union {
358 /* For inode and iopen glocks only */
359 struct work_struct gl_delete;
360 /* For rgrp glocks only */
361 struct {
362 loff_t start;
363 loff_t end;
364 } gl_vm;
366 struct rcu_head gl_rcu;
369 #define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
371 enum {
372 GIF_INVALID = 0,
373 GIF_QD_LOCKED = 1,
374 GIF_ALLOC_FAILED = 2,
375 GIF_SW_PAGED = 3,
376 GIF_ORDERED = 4,
377 GIF_FREE_VFS_INODE = 5,
380 struct gfs2_inode {
381 struct inode i_inode;
382 u64 i_no_addr;
383 u64 i_no_formal_ino;
384 u64 i_generation;
385 u64 i_eattr;
386 unsigned long i_flags; /* GIF_... */
387 struct gfs2_glock *i_gl; /* Move into i_gh? */
388 struct gfs2_holder i_iopen_gh;
389 struct gfs2_holder i_gh; /* for prepare/commit_write only */
390 struct gfs2_blkreserv *i_res; /* rgrp multi-block reservation */
391 struct gfs2_rgrpd *i_rgd;
392 u64 i_goal; /* goal block for allocations */
393 struct rw_semaphore i_rw_mutex;
394 struct list_head i_ordered;
395 struct list_head i_trunc_list;
396 __be64 *i_hash_cache;
397 u32 i_entries;
398 u32 i_diskflags;
399 u8 i_height;
400 u8 i_depth;
404 * Since i_inode is the first element of struct gfs2_inode,
405 * this is effectively a cast.
407 static inline struct gfs2_inode *GFS2_I(struct inode *inode)
409 return container_of(inode, struct gfs2_inode, i_inode);
412 static inline struct gfs2_sbd *GFS2_SB(const struct inode *inode)
414 return inode->i_sb->s_fs_info;
417 struct gfs2_file {
418 struct mutex f_fl_mutex;
419 struct gfs2_holder f_fl_gh;
422 struct gfs2_revoke_replay {
423 struct list_head rr_list;
424 u64 rr_blkno;
425 unsigned int rr_where;
428 enum {
429 QDF_CHANGE = 1,
430 QDF_LOCKED = 2,
431 QDF_REFRESH = 3,
434 struct gfs2_quota_data {
435 struct hlist_bl_node qd_hlist;
436 struct list_head qd_list;
437 struct kqid qd_id;
438 struct gfs2_sbd *qd_sbd;
439 struct lockref qd_lockref;
440 struct list_head qd_lru;
441 unsigned qd_hash;
443 unsigned long qd_flags; /* QDF_... */
445 s64 qd_change;
446 s64 qd_change_sync;
448 unsigned int qd_slot;
449 unsigned int qd_slot_count;
451 struct buffer_head *qd_bh;
452 struct gfs2_quota_change *qd_bh_qc;
453 unsigned int qd_bh_count;
455 struct gfs2_glock *qd_gl;
456 struct gfs2_quota_lvb qd_qb;
458 u64 qd_sync_gen;
459 unsigned long qd_last_warn;
460 struct rcu_head qd_rcu;
463 struct gfs2_trans {
464 unsigned long tr_ip;
466 unsigned int tr_blocks;
467 unsigned int tr_revokes;
468 unsigned int tr_reserved;
469 unsigned int tr_touched:1;
470 unsigned int tr_attached:1;
471 unsigned int tr_alloced:1;
473 unsigned int tr_num_buf_new;
474 unsigned int tr_num_databuf_new;
475 unsigned int tr_num_buf_rm;
476 unsigned int tr_num_databuf_rm;
477 unsigned int tr_num_revoke;
478 unsigned int tr_num_revoke_rm;
480 struct list_head tr_list;
481 struct list_head tr_databuf;
482 struct list_head tr_buf;
484 unsigned int tr_first;
485 struct list_head tr_ail1_list;
486 struct list_head tr_ail2_list;
489 struct gfs2_journal_extent {
490 struct list_head list;
492 unsigned int lblock; /* First logical block */
493 u64 dblock; /* First disk block */
494 u64 blocks;
497 struct gfs2_jdesc {
498 struct list_head jd_list;
499 struct list_head extent_list;
500 unsigned int nr_extents;
501 struct work_struct jd_work;
502 struct inode *jd_inode;
503 unsigned long jd_flags;
504 #define JDF_RECOVERY 1
505 unsigned int jd_jid;
506 unsigned int jd_blocks;
507 int jd_recover_error;
508 /* Replay stuff */
510 unsigned int jd_found_blocks;
511 unsigned int jd_found_revokes;
512 unsigned int jd_replayed_blocks;
514 struct list_head jd_revoke_list;
515 unsigned int jd_replay_tail;
519 struct gfs2_statfs_change_host {
520 s64 sc_total;
521 s64 sc_free;
522 s64 sc_dinodes;
525 #define GFS2_QUOTA_DEFAULT GFS2_QUOTA_OFF
526 #define GFS2_QUOTA_OFF 0
527 #define GFS2_QUOTA_ACCOUNT 1
528 #define GFS2_QUOTA_ON 2
530 #define GFS2_DATA_DEFAULT GFS2_DATA_ORDERED
531 #define GFS2_DATA_WRITEBACK 1
532 #define GFS2_DATA_ORDERED 2
534 #define GFS2_ERRORS_DEFAULT GFS2_ERRORS_WITHDRAW
535 #define GFS2_ERRORS_WITHDRAW 0
536 #define GFS2_ERRORS_CONTINUE 1 /* place holder for future feature */
537 #define GFS2_ERRORS_RO 2 /* place holder for future feature */
538 #define GFS2_ERRORS_PANIC 3
540 struct gfs2_args {
541 char ar_lockproto[GFS2_LOCKNAME_LEN]; /* Name of the Lock Protocol */
542 char ar_locktable[GFS2_LOCKNAME_LEN]; /* Name of the Lock Table */
543 char ar_hostdata[GFS2_LOCKNAME_LEN]; /* Host specific data */
544 unsigned int ar_spectator:1; /* Don't get a journal */
545 unsigned int ar_localflocks:1; /* Let the VFS do flock|fcntl */
546 unsigned int ar_debug:1; /* Oops on errors */
547 unsigned int ar_posix_acl:1; /* Enable posix acls */
548 unsigned int ar_quota:2; /* off/account/on */
549 unsigned int ar_suiddir:1; /* suiddir support */
550 unsigned int ar_data:2; /* ordered/writeback */
551 unsigned int ar_meta:1; /* mount metafs */
552 unsigned int ar_discard:1; /* discard requests */
553 unsigned int ar_errors:2; /* errors=withdraw | panic */
554 unsigned int ar_nobarrier:1; /* do not send barriers */
555 unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */
556 int ar_commit; /* Commit interval */
557 int ar_statfs_quantum; /* The fast statfs interval */
558 int ar_quota_quantum; /* The quota interval */
559 int ar_statfs_percent; /* The % change to force sync */
562 struct gfs2_tune {
563 spinlock_t gt_spin;
565 unsigned int gt_logd_secs;
567 unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
568 unsigned int gt_quota_scale_num; /* Numerator */
569 unsigned int gt_quota_scale_den; /* Denominator */
570 unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
571 unsigned int gt_new_files_jdata;
572 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
573 unsigned int gt_complain_secs;
574 unsigned int gt_statfs_quantum;
575 unsigned int gt_statfs_slow;
578 enum {
579 SDF_JOURNAL_CHECKED = 0,
580 SDF_JOURNAL_LIVE = 1,
581 SDF_SHUTDOWN = 2,
582 SDF_NOBARRIERS = 3,
583 SDF_NORECOVERY = 4,
584 SDF_DEMOTE = 5,
585 SDF_NOJOURNALID = 6,
586 SDF_RORECOVERY = 7, /* read only recovery */
587 SDF_SKIP_DLM_UNLOCK = 8,
590 #define GFS2_FSNAME_LEN 256
592 struct gfs2_inum_host {
593 u64 no_formal_ino;
594 u64 no_addr;
597 struct gfs2_sb_host {
598 u32 sb_magic;
599 u32 sb_type;
600 u32 sb_format;
602 u32 sb_fs_format;
603 u32 sb_multihost_format;
604 u32 sb_bsize;
605 u32 sb_bsize_shift;
607 struct gfs2_inum_host sb_master_dir;
608 struct gfs2_inum_host sb_root_dir;
610 char sb_lockproto[GFS2_LOCKNAME_LEN];
611 char sb_locktable[GFS2_LOCKNAME_LEN];
615 * lm_mount() return values
617 * ls_jid - the journal ID this node should use
618 * ls_first - this node is the first to mount the file system
619 * ls_lockspace - lock module's context for this file system
620 * ls_ops - lock module's functions
623 struct lm_lockstruct {
624 int ls_jid;
625 unsigned int ls_first;
626 const struct lm_lockops *ls_ops;
627 dlm_lockspace_t *ls_dlm;
629 int ls_recover_jid_done; /* These two are deprecated, */
630 int ls_recover_jid_status; /* used previously by gfs_controld */
632 struct dlm_lksb ls_mounted_lksb; /* mounted_lock */
633 struct dlm_lksb ls_control_lksb; /* control_lock */
634 char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */
635 struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */
636 char *ls_lvb_bits;
638 spinlock_t ls_recover_spin; /* protects following fields */
639 unsigned long ls_recover_flags; /* DFL_ */
640 uint32_t ls_recover_mount; /* gen in first recover_done cb */
641 uint32_t ls_recover_start; /* gen in last recover_done cb */
642 uint32_t ls_recover_block; /* copy recover_start in last recover_prep */
643 uint32_t ls_recover_size; /* size of recover_submit, recover_result */
644 uint32_t *ls_recover_submit; /* gen in last recover_slot cb per jid */
645 uint32_t *ls_recover_result; /* result of last jid recovery */
648 struct gfs2_pcpu_lkstats {
649 /* One struct for each glock type */
650 struct gfs2_lkstats lkstats[10];
653 struct gfs2_sbd {
654 struct super_block *sd_vfs;
655 struct gfs2_pcpu_lkstats __percpu *sd_lkstats;
656 struct kobject sd_kobj;
657 unsigned long sd_flags; /* SDF_... */
658 struct gfs2_sb_host sd_sb;
660 /* Constants computed on mount */
662 u32 sd_fsb2bb;
663 u32 sd_fsb2bb_shift;
664 u32 sd_diptrs; /* Number of pointers in a dinode */
665 u32 sd_inptrs; /* Number of pointers in a indirect block */
666 u32 sd_jbsize; /* Size of a journaled data block */
667 u32 sd_hash_bsize; /* sizeof(exhash block) */
668 u32 sd_hash_bsize_shift;
669 u32 sd_hash_ptrs; /* Number of pointers in a hash block */
670 u32 sd_qc_per_block;
671 u32 sd_blocks_per_bitmap;
672 u32 sd_max_dirres; /* Max blocks needed to add a directory entry */
673 u32 sd_max_height; /* Max height of a file's metadata tree */
674 u64 sd_heightsize[GFS2_MAX_META_HEIGHT + 1];
675 u32 sd_max_jheight; /* Max height of journaled file's meta tree */
676 u64 sd_jheightsize[GFS2_MAX_META_HEIGHT + 1];
678 struct gfs2_args sd_args; /* Mount arguments */
679 struct gfs2_tune sd_tune; /* Filesystem tuning structure */
681 /* Lock Stuff */
683 struct lm_lockstruct sd_lockstruct;
684 struct gfs2_holder sd_live_gh;
685 struct gfs2_glock *sd_rename_gl;
686 struct gfs2_glock *sd_freeze_gl;
687 wait_queue_head_t sd_glock_wait;
688 atomic_t sd_glock_disposal;
689 struct completion sd_locking_init;
690 struct completion sd_wdack;
691 struct delayed_work sd_control_work;
693 /* Inode Stuff */
695 struct dentry *sd_master_dir;
696 struct dentry *sd_root_dir;
698 struct inode *sd_jindex;
699 struct inode *sd_statfs_inode;
700 struct inode *sd_sc_inode;
701 struct inode *sd_qc_inode;
702 struct inode *sd_rindex;
703 struct inode *sd_quota_inode;
705 /* StatFS stuff */
707 spinlock_t sd_statfs_spin;
708 struct gfs2_statfs_change_host sd_statfs_master;
709 struct gfs2_statfs_change_host sd_statfs_local;
710 int sd_statfs_force_sync;
712 /* Resource group stuff */
714 int sd_rindex_uptodate;
715 spinlock_t sd_rindex_spin;
716 struct rb_root sd_rindex_tree;
717 unsigned int sd_rgrps;
718 unsigned int sd_max_rg_data;
720 /* Journal index stuff */
722 struct list_head sd_jindex_list;
723 spinlock_t sd_jindex_spin;
724 struct mutex sd_jindex_mutex;
725 unsigned int sd_journals;
727 struct gfs2_jdesc *sd_jdesc;
728 struct gfs2_holder sd_journal_gh;
729 struct gfs2_holder sd_jinode_gh;
731 struct gfs2_holder sd_sc_gh;
732 struct gfs2_holder sd_qc_gh;
734 struct completion sd_journal_ready;
736 /* Daemon stuff */
738 struct task_struct *sd_logd_process;
739 struct task_struct *sd_quotad_process;
741 /* Quota stuff */
743 struct list_head sd_quota_list;
744 atomic_t sd_quota_count;
745 struct mutex sd_quota_mutex;
746 struct mutex sd_quota_sync_mutex;
747 wait_queue_head_t sd_quota_wait;
748 struct list_head sd_trunc_list;
749 spinlock_t sd_trunc_lock;
751 unsigned int sd_quota_slots;
752 unsigned long *sd_quota_bitmap;
753 spinlock_t sd_bitmap_lock;
755 u64 sd_quota_sync_gen;
757 /* Log stuff */
759 struct address_space sd_aspace;
761 spinlock_t sd_log_lock;
763 struct gfs2_trans *sd_log_tr;
764 unsigned int sd_log_blks_reserved;
765 int sd_log_commited_revoke;
767 atomic_t sd_log_pinned;
768 unsigned int sd_log_num_revoke;
770 struct list_head sd_log_le_revoke;
771 struct list_head sd_log_le_ordered;
772 spinlock_t sd_ordered_lock;
774 atomic_t sd_log_thresh1;
775 atomic_t sd_log_thresh2;
776 atomic_t sd_log_blks_free;
777 wait_queue_head_t sd_log_waitq;
778 wait_queue_head_t sd_logd_waitq;
780 u64 sd_log_sequence;
781 unsigned int sd_log_head;
782 unsigned int sd_log_tail;
783 int sd_log_idle;
785 struct rw_semaphore sd_log_flush_lock;
786 atomic_t sd_log_in_flight;
787 struct bio *sd_log_bio;
788 wait_queue_head_t sd_log_flush_wait;
789 int sd_log_error;
791 unsigned int sd_log_flush_head;
792 u64 sd_log_flush_wrapped;
794 spinlock_t sd_ail_lock;
795 struct list_head sd_ail1_list;
796 struct list_head sd_ail2_list;
798 /* For quiescing the filesystem */
799 struct gfs2_holder sd_freeze_gh;
800 struct gfs2_holder sd_freeze_root_gh;
801 struct gfs2_holder sd_thaw_gh;
802 atomic_t sd_log_freeze;
803 atomic_t sd_frozen_root;
804 wait_queue_head_t sd_frozen_root_wait;
805 wait_queue_head_t sd_log_frozen_wait;
807 char sd_fsname[GFS2_FSNAME_LEN];
808 char sd_table_name[GFS2_FSNAME_LEN];
809 char sd_proto_name[GFS2_FSNAME_LEN];
811 /* Debugging crud */
813 unsigned long sd_last_warning;
814 struct dentry *debugfs_dir; /* debugfs directory */
815 struct dentry *debugfs_dentry_glocks;
816 struct dentry *debugfs_dentry_glstats;
817 struct dentry *debugfs_dentry_sbstats;
820 static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which)
822 gl->gl_stats.stats[which]++;
825 static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
827 const struct gfs2_sbd *sdp = gl->gl_sbd;
828 preempt_disable();
829 this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++;
830 preempt_enable();
833 #endif /* __INCORE_DOT_H__ */