initial commit with v2.6.9
[linux-2.6.9-moxart.git] / include / linux / buffer_head.h
blob367a8a31350607a3bc72d03130d2cd2bdcb3636f
1 /*
2 * include/linux/buffer_head.h
4 * Everything to do with buffer_heads.
5 */
7 #ifndef _LINUX_BUFFER_HEAD_H
8 #define _LINUX_BUFFER_HEAD_H
10 #include <linux/types.h>
11 #include <linux/fs.h>
12 #include <linux/linkage.h>
13 #include <linux/wait.h>
14 #include <asm/atomic.h>
16 enum bh_state_bits {
17 BH_Uptodate, /* Contains valid data */
18 BH_Dirty, /* Is dirty */
19 BH_Lock, /* Is locked */
20 BH_Req, /* Has been submitted for I/O */
22 BH_Mapped, /* Has a disk mapping */
23 BH_New, /* Disk mapping was newly created by get_block */
24 BH_Async_Read, /* Is under end_buffer_async_read I/O */
25 BH_Async_Write, /* Is under end_buffer_async_write I/O */
26 BH_Delay, /* Buffer is not yet allocated on disk */
27 BH_Boundary, /* Block is followed by a discontiguity */
28 BH_Write_EIO, /* I/O error on write */
29 BH_Ordered, /* ordered write */
30 BH_Eopnotsupp, /* operation not supported (barrier) */
32 BH_PrivateStart,/* not a state bit, but the first bit available
33 * for private allocation by other entities
37 #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
39 struct page;
40 struct buffer_head;
41 struct address_space;
42 typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
45 * Keep related fields in common cachelines. The most commonly accessed
46 * field (b_state) goes at the start so the compiler does not generate
47 * indexed addressing for it.
49 struct buffer_head {
50 /* First cache line: */
51 unsigned long b_state; /* buffer state bitmap (see above) */
52 struct buffer_head *b_this_page;/* circular list of page's buffers */
53 struct page *b_page; /* the page this bh is mapped to */
54 atomic_t b_count; /* users using this block */
55 u32 b_size; /* block size */
57 sector_t b_blocknr; /* block number */
58 char *b_data; /* pointer to data block */
60 struct block_device *b_bdev;
61 bh_end_io_t *b_end_io; /* I/O completion */
62 void *b_private; /* reserved for b_end_io */
63 struct list_head b_assoc_buffers; /* associated with another mapping */
67 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
68 * and buffer_foo() functions.
70 #define BUFFER_FNS(bit, name) \
71 static inline void set_buffer_##name(struct buffer_head *bh) \
72 { \
73 set_bit(BH_##bit, &(bh)->b_state); \
74 } \
75 static inline void clear_buffer_##name(struct buffer_head *bh) \
76 { \
77 clear_bit(BH_##bit, &(bh)->b_state); \
78 } \
79 static inline int buffer_##name(struct buffer_head *bh) \
80 { \
81 return test_bit(BH_##bit, &(bh)->b_state); \
85 * test_set_buffer_foo() and test_clear_buffer_foo()
87 #define TAS_BUFFER_FNS(bit, name) \
88 static inline int test_set_buffer_##name(struct buffer_head *bh) \
89 { \
90 return test_and_set_bit(BH_##bit, &(bh)->b_state); \
91 } \
92 static inline int test_clear_buffer_##name(struct buffer_head *bh) \
93 { \
94 return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
95 } \
98 * Emit the buffer bitops functions. Note that there are also functions
99 * of the form "mark_buffer_foo()". These are higher-level functions which
100 * do something in addition to setting a b_state bit.
102 BUFFER_FNS(Uptodate, uptodate)
103 BUFFER_FNS(Dirty, dirty)
104 TAS_BUFFER_FNS(Dirty, dirty)
105 BUFFER_FNS(Lock, locked)
106 TAS_BUFFER_FNS(Lock, locked)
107 BUFFER_FNS(Req, req)
108 TAS_BUFFER_FNS(Req, req)
109 BUFFER_FNS(Mapped, mapped)
110 BUFFER_FNS(New, new)
111 BUFFER_FNS(Async_Read, async_read)
112 BUFFER_FNS(Async_Write, async_write)
113 BUFFER_FNS(Delay, delay)
114 BUFFER_FNS(Boundary, boundary)
115 BUFFER_FNS(Write_EIO, write_io_error)
116 BUFFER_FNS(Ordered, ordered)
117 BUFFER_FNS(Eopnotsupp, eopnotsupp)
119 #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
120 #define touch_buffer(bh) mark_page_accessed(bh->b_page)
122 /* If we *know* page->private refers to buffer_heads */
123 #define page_buffers(page) \
124 ({ \
125 BUG_ON(!PagePrivate(page)); \
126 ((struct buffer_head *)(page)->private); \
128 #define page_has_buffers(page) PagePrivate(page)
131 * Declarations
134 void FASTCALL(mark_buffer_dirty(struct buffer_head *bh));
135 void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
136 void set_bh_page(struct buffer_head *bh,
137 struct page *page, unsigned long offset);
138 int try_to_free_buffers(struct page *);
139 void create_empty_buffers(struct page *, unsigned long,
140 unsigned long b_state);
141 void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
142 void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
143 void end_buffer_async_write(struct buffer_head *bh, int uptodate);
145 /* Things to do with buffers at mapping->private_list */
146 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
147 int inode_has_buffers(struct inode *);
148 void invalidate_inode_buffers(struct inode *);
149 int remove_inode_buffers(struct inode *inode);
150 int sync_mapping_buffers(struct address_space *mapping);
151 void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
153 void mark_buffer_async_write(struct buffer_head *bh);
154 void invalidate_bdev(struct block_device *, int);
155 int sync_blockdev(struct block_device *bdev);
156 void __wait_on_buffer(struct buffer_head *);
157 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
158 void wake_up_buffer(struct buffer_head *bh);
159 int fsync_bdev(struct block_device *);
160 struct super_block *freeze_bdev(struct block_device *);
161 void thaw_bdev(struct block_device *, struct super_block *);
162 int fsync_super(struct super_block *);
163 int fsync_no_super(struct block_device *);
164 struct buffer_head *__find_get_block(struct block_device *, sector_t, int);
165 struct buffer_head * __getblk(struct block_device *, sector_t, int);
166 void __brelse(struct buffer_head *);
167 void __bforget(struct buffer_head *);
168 void __breadahead(struct block_device *, sector_t block, int size);
169 struct buffer_head *__bread(struct block_device *, sector_t block, int size);
170 struct buffer_head *alloc_buffer_head(int gfp_flags);
171 void free_buffer_head(struct buffer_head * bh);
172 void FASTCALL(unlock_buffer(struct buffer_head *bh));
173 void FASTCALL(__lock_buffer(struct buffer_head *bh));
174 void ll_rw_block(int, int, struct buffer_head * bh[]);
175 int sync_dirty_buffer(struct buffer_head *bh);
176 int submit_bh(int, struct buffer_head *);
177 void write_boundary_block(struct block_device *bdev,
178 sector_t bblock, unsigned blocksize);
180 extern int buffer_heads_over_limit;
183 * Generic address_space_operations implementations for buffer_head-backed
184 * address_spaces.
186 int try_to_release_page(struct page * page, int gfp_mask);
187 int block_invalidatepage(struct page *page, unsigned long offset);
188 int block_write_full_page(struct page *page, get_block_t *get_block,
189 struct writeback_control *wbc);
190 int block_read_full_page(struct page*, get_block_t*);
191 int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
192 int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*,
193 loff_t *);
194 int generic_cont_expand(struct inode *inode, loff_t size) ;
195 int block_commit_write(struct page *page, unsigned from, unsigned to);
196 int block_sync_page(struct page *);
197 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
198 int generic_commit_write(struct file *, struct page *, unsigned, unsigned);
199 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
200 int file_fsync(struct file *, struct dentry *, int);
201 int nobh_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
202 int nobh_commit_write(struct file *, struct page *, unsigned, unsigned);
203 int nobh_truncate_page(struct address_space *, loff_t);
206 * inline definitions
209 static inline void get_bh(struct buffer_head *bh)
211 atomic_inc(&bh->b_count);
214 static inline void put_bh(struct buffer_head *bh)
216 smp_mb__before_atomic_dec();
217 atomic_dec(&bh->b_count);
220 static inline void brelse(struct buffer_head *bh)
222 if (bh)
223 __brelse(bh);
226 static inline void bforget(struct buffer_head *bh)
228 if (bh)
229 __bforget(bh);
232 static inline struct buffer_head *
233 sb_bread(struct super_block *sb, sector_t block)
235 return __bread(sb->s_bdev, block, sb->s_blocksize);
238 static inline void
239 sb_breadahead(struct super_block *sb, sector_t block)
241 __breadahead(sb->s_bdev, block, sb->s_blocksize);
244 static inline struct buffer_head *
245 sb_getblk(struct super_block *sb, sector_t block)
247 return __getblk(sb->s_bdev, block, sb->s_blocksize);
250 static inline struct buffer_head *
251 sb_find_get_block(struct super_block *sb, sector_t block)
253 return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
256 static inline void
257 map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
259 set_buffer_mapped(bh);
260 bh->b_bdev = sb->s_bdev;
261 bh->b_blocknr = block;
265 * Calling wait_on_buffer() for a zero-ref buffer is illegal, so we call into
266 * __wait_on_buffer() just to trip a debug check. Because debug code in inline
267 * functions is bloaty.
269 static inline void wait_on_buffer(struct buffer_head *bh)
271 might_sleep();
272 if (buffer_locked(bh) || atomic_read(&bh->b_count) == 0)
273 __wait_on_buffer(bh);
276 static inline void lock_buffer(struct buffer_head *bh)
278 might_sleep();
279 if (test_set_buffer_locked(bh))
280 __lock_buffer(bh);
283 #endif /* _LINUX_BUFFER_HEAD_H */