Blame view
include/linux/buffer_head.h
10.9 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
/* * include/linux/buffer_head.h * * Everything to do with buffer_heads. */ #ifndef _LINUX_BUFFER_HEAD_H #define _LINUX_BUFFER_HEAD_H #include <linux/types.h> #include <linux/fs.h> #include <linux/linkage.h> #include <linux/pagemap.h> #include <linux/wait.h> #include <asm/atomic.h> |
9361401eb
|
16 |
#ifdef CONFIG_BLOCK |
1da177e4c
|
17 18 19 20 21 |
enum bh_state_bits { BH_Uptodate, /* Contains valid data */ BH_Dirty, /* Is dirty */ BH_Lock, /* Is locked */ BH_Req, /* Has been submitted for I/O */ |
a39722034
|
22 23 24 |
BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise * IO completion of other buffers in the page */ |
1da177e4c
|
25 26 27 28 29 30 31 32 |
BH_Mapped, /* Has a disk mapping */ BH_New, /* Disk mapping was newly created by get_block */ BH_Async_Read, /* Is under end_buffer_async_read I/O */ BH_Async_Write, /* Is under end_buffer_async_write I/O */ BH_Delay, /* Buffer is not yet allocated on disk */ BH_Boundary, /* Block is followed by a discontiguity */ BH_Write_EIO, /* I/O error on write */ |
33a266dda
|
33 |
BH_Unwritten, /* Buffer is allocated on disk but not written */ |
08bafc034
|
34 |
BH_Quiet, /* Buffer Error Prinks to be quiet */ |
1da177e4c
|
35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
BH_PrivateStart,/* not a state bit, but the first bit available * for private allocation by other entities */ }; #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512) struct page; struct buffer_head; struct address_space; typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); /* |
205f87f6b
|
49 50 51 52 53 54 55 |
* Historically, a buffer_head was used to map a single block * within a page, and of course as the unit of I/O through the * filesystem and block layers. Nowadays the basic I/O unit * is the bio, and buffer_heads are used for extracting block * mappings (via a get_block_t call), for tracking state within * a page (via a page_mapping) and for wrapping bio submission * for backward compatibility reasons (e.g. submit_bh). |
1da177e4c
|
56 57 |
*/ struct buffer_head { |
1da177e4c
|
58 59 60 |
unsigned long b_state; /* buffer state bitmap (see above) */ struct buffer_head *b_this_page;/* circular list of page's buffers */ struct page *b_page; /* the page this bh is mapped to */ |
1da177e4c
|
61 |
|
205f87f6b
|
62 63 64 |
sector_t b_blocknr; /* start block number */ size_t b_size; /* size of mapping */ char *b_data; /* pointer to data within the page */ |
1da177e4c
|
65 66 67 68 69 |
struct block_device *b_bdev; bh_end_io_t *b_end_io; /* I/O completion */ void *b_private; /* reserved for b_end_io */ struct list_head b_assoc_buffers; /* associated with another mapping */ |
58ff407be
|
70 71 |
struct address_space *b_assoc_map; /* mapping this buffer is associated with */ |
205f87f6b
|
72 |
atomic_t b_count; /* users using this buffer_head */ |
1da177e4c
|
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
}; /* * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() * and buffer_foo() functions. */ #define BUFFER_FNS(bit, name) \ static inline void set_buffer_##name(struct buffer_head *bh) \ { \ set_bit(BH_##bit, &(bh)->b_state); \ } \ static inline void clear_buffer_##name(struct buffer_head *bh) \ { \ clear_bit(BH_##bit, &(bh)->b_state); \ } \ static inline int buffer_##name(const struct buffer_head *bh) \ { \ return test_bit(BH_##bit, &(bh)->b_state); \ } /* * test_set_buffer_foo() and test_clear_buffer_foo() */ #define TAS_BUFFER_FNS(bit, name) \ static inline int test_set_buffer_##name(struct buffer_head *bh) \ { \ return test_and_set_bit(BH_##bit, &(bh)->b_state); \ } \ static inline int test_clear_buffer_##name(struct buffer_head *bh) \ { \ return test_and_clear_bit(BH_##bit, &(bh)->b_state); \ } \ /* * Emit the buffer bitops functions. Note that there are also functions * of the form "mark_buffer_foo()". These are higher-level functions which * do something in addition to setting a b_state bit. */ BUFFER_FNS(Uptodate, uptodate) BUFFER_FNS(Dirty, dirty) TAS_BUFFER_FNS(Dirty, dirty) BUFFER_FNS(Lock, locked) |
1da177e4c
|
115 116 117 118 119 120 121 122 123 |
BUFFER_FNS(Req, req) TAS_BUFFER_FNS(Req, req) BUFFER_FNS(Mapped, mapped) BUFFER_FNS(New, new) BUFFER_FNS(Async_Read, async_read) BUFFER_FNS(Async_Write, async_write) BUFFER_FNS(Delay, delay) BUFFER_FNS(Boundary, boundary) BUFFER_FNS(Write_EIO, write_io_error) |
33a266dda
|
124 |
BUFFER_FNS(Unwritten, unwritten) |
1da177e4c
|
125 126 127 128 129 130 131 |
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) #define touch_buffer(bh) mark_page_accessed(bh->b_page) /* If we *know* page->private refers to buffer_heads */ #define page_buffers(page) \ ({ \ |
4c21e2f24
|
132 133 |
BUG_ON(!PagePrivate(page)); \ ((struct buffer_head *)page_private(page)); \ |
1da177e4c
|
134 135 136 137 138 139 |
}) #define page_has_buffers(page) PagePrivate(page) /* * Declarations */ |
b3c975286
|
140 |
void mark_buffer_dirty(struct buffer_head *bh); |
1da177e4c
|
141 142 143 144 145 146 147 148 149 150 |
void init_buffer(struct buffer_head *, bh_end_io_t *, void *); void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long offset); int try_to_free_buffers(struct page *); struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, int retry); void create_empty_buffers(struct page *, unsigned long, unsigned long b_state); void end_buffer_read_sync(struct buffer_head *bh, int uptodate); void end_buffer_write_sync(struct buffer_head *bh, int uptodate); |
35c80d5f4
|
151 |
void end_buffer_async_write(struct buffer_head *bh, int uptodate); |
1da177e4c
|
152 153 154 155 156 157 158 159 160 161 |
/* Things to do with buffers at mapping->private_list */ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); int inode_has_buffers(struct inode *); void invalidate_inode_buffers(struct inode *); int remove_inode_buffers(struct inode *inode); int sync_mapping_buffers(struct address_space *mapping); void unmap_underlying_metadata(struct block_device *bdev, sector_t block); void mark_buffer_async_write(struct buffer_head *bh); |
1da177e4c
|
162 163 |
void __wait_on_buffer(struct buffer_head *); wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); |
3991d3bd1
|
164 165 166 167 |
struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, unsigned size); struct buffer_head *__getblk(struct block_device *bdev, sector_t block, unsigned size); |
1da177e4c
|
168 169 |
void __brelse(struct buffer_head *); void __bforget(struct buffer_head *); |
3991d3bd1
|
170 171 |
void __breadahead(struct block_device *, sector_t block, unsigned int size); struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size); |
f9a14399a
|
172 |
void invalidate_bh_lrus(void); |
dd0fc66fb
|
173 |
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); |
1da177e4c
|
174 |
void free_buffer_head(struct buffer_head * bh); |
b3c975286
|
175 176 |
void unlock_buffer(struct buffer_head *bh); void __lock_buffer(struct buffer_head *bh); |
1da177e4c
|
177 178 |
void ll_rw_block(int, int, struct buffer_head * bh[]); int sync_dirty_buffer(struct buffer_head *bh); |
87e99511e
|
179 |
int __sync_dirty_buffer(struct buffer_head *bh, int rw); |
9cb569d60
|
180 |
void write_dirty_buffer(struct buffer_head *bh, int rw); |
1da177e4c
|
181 182 183 |
int submit_bh(int, struct buffer_head *); void write_boundary_block(struct block_device *bdev, sector_t bblock, unsigned blocksize); |
389d1b083
|
184 185 |
int bh_uptodate_or_lock(struct buffer_head *bh); int bh_submit_read(struct buffer_head *bh); |
1da177e4c
|
186 187 188 189 190 191 192 |
extern int buffer_heads_over_limit; /* * Generic address_space_operations implementations for buffer_head-backed * address_spaces. */ |
2ff28e22b
|
193 |
void block_invalidatepage(struct page *page, unsigned long offset); |
1da177e4c
|
194 195 |
int block_write_full_page(struct page *page, get_block_t *get_block, struct writeback_control *wbc); |
35c80d5f4
|
196 197 |
int block_write_full_page_endio(struct page *page, get_block_t *get_block, struct writeback_control *wbc, bh_end_io_t *handler); |
1da177e4c
|
198 |
int block_read_full_page(struct page*, get_block_t*); |
8ab22b9ab
|
199 200 |
int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, unsigned long from); |
155130a4f
|
201 202 |
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, get_block_t *get_block); |
6e1db88d5
|
203 204 |
int __block_write_begin(struct page *page, loff_t pos, unsigned len, get_block_t *get_block); |
afddba49d
|
205 206 207 208 209 210 211 |
int block_write_end(struct file *, struct address_space *, loff_t, unsigned, unsigned, struct page *, void *); int generic_write_end(struct file *, struct address_space *, loff_t, unsigned, unsigned, struct page *, void *); void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); |
89e107877
|
212 213 214 |
int cont_write_begin(struct file *, struct address_space *, loff_t, unsigned, unsigned, struct page **, void **, get_block_t *, loff_t *); |
05eb0b51f
|
215 |
int generic_cont_expand_simple(struct inode *inode, loff_t size); |
1da177e4c
|
216 |
int block_commit_write(struct page *page, unsigned from, unsigned to); |
c2ec175c3
|
217 |
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, |
541716902
|
218 |
get_block_t get_block); |
3978d7179
|
219 |
void block_sync_page(struct page *); |
1da177e4c
|
220 |
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); |
1da177e4c
|
221 |
int block_truncate_page(struct address_space *, loff_t, get_block_t *); |
ea0f04e59
|
222 |
int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned, |
03158cd7e
|
223 224 225 226 227 |
struct page **, void **, get_block_t*); int nobh_write_end(struct file *, struct address_space *, loff_t, unsigned, unsigned, struct page *, void *); int nobh_truncate_page(struct address_space *, loff_t, get_block_t *); |
1da177e4c
|
228 229 |
int nobh_writepage(struct page *page, get_block_t *get_block, struct writeback_control *wbc); |
b6cd0b772
|
230 |
void buffer_init(void); |
1da177e4c
|
231 232 233 234 235 236 237 238 239 240 |
/* * inline definitions */ static inline void attach_page_buffers(struct page *page, struct buffer_head *head) { page_cache_get(page); SetPagePrivate(page); |
4c21e2f24
|
241 |
set_page_private(page, (unsigned long)head); |
1da177e4c
|
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 |
} static inline void get_bh(struct buffer_head *bh) { atomic_inc(&bh->b_count); } static inline void put_bh(struct buffer_head *bh) { smp_mb__before_atomic_dec(); atomic_dec(&bh->b_count); } static inline void brelse(struct buffer_head *bh) { if (bh) __brelse(bh); } static inline void bforget(struct buffer_head *bh) { if (bh) __bforget(bh); } static inline struct buffer_head * sb_bread(struct super_block *sb, sector_t block) { return __bread(sb->s_bdev, block, sb->s_blocksize); } static inline void sb_breadahead(struct super_block *sb, sector_t block) { __breadahead(sb->s_bdev, block, sb->s_blocksize); } static inline struct buffer_head * sb_getblk(struct super_block *sb, sector_t block) { return __getblk(sb->s_bdev, block, sb->s_blocksize); } static inline struct buffer_head * sb_find_get_block(struct super_block *sb, sector_t block) { return __find_get_block(sb->s_bdev, block, sb->s_blocksize); } static inline void map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) { set_buffer_mapped(bh); bh->b_bdev = sb->s_bdev; bh->b_blocknr = block; |
b0cf2321c
|
297 |
bh->b_size = sb->s_blocksize; |
1da177e4c
|
298 |
} |
1da177e4c
|
299 300 301 |
static inline void wait_on_buffer(struct buffer_head *bh) { might_sleep(); |
a9877cc29
|
302 |
if (buffer_locked(bh)) |
1da177e4c
|
303 304 |
__wait_on_buffer(bh); } |
ca5de404f
|
305 306 |
static inline int trylock_buffer(struct buffer_head *bh) { |
51b07fc3c
|
307 |
return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state)); |
ca5de404f
|
308 |
} |
1da177e4c
|
309 310 311 |
static inline void lock_buffer(struct buffer_head *bh) { might_sleep(); |
ca5de404f
|
312 |
if (!trylock_buffer(bh)) |
1da177e4c
|
313 314 |
__lock_buffer(bh); } |
cf9a2ae8d
|
315 |
extern int __set_page_dirty_buffers(struct page *page); |
9361401eb
|
316 317 318 319 320 |
#else /* CONFIG_BLOCK */ static inline void buffer_init(void) {} static inline int try_to_free_buffers(struct page *page) { return 1; } |
9361401eb
|
321 322 323 324 |
static inline int inode_has_buffers(struct inode *inode) { return 0; } static inline void invalidate_inode_buffers(struct inode *inode) {} static inline int remove_inode_buffers(struct inode *inode) { return 1; } static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } |
9361401eb
|
325 326 |
#endif /* CONFIG_BLOCK */ |
1da177e4c
|
327 |
#endif /* _LINUX_BUFFER_HEAD_H */ |