Blame view
fs/xfs/xfs_buf.h
12.5 KB
0b61f8a40
|
1 |
// SPDX-License-Identifier: GPL-2.0 |
1da177e4c
|
2 |
/* |
7b7187698
|
3 4 |
* Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. |
1da177e4c
|
5 |
*/ |
1da177e4c
|
6 7 |
#ifndef __XFS_BUF_H__ #define __XFS_BUF_H__ |
1da177e4c
|
8 9 10 |
#include <linux/list.h> #include <linux/types.h> #include <linux/spinlock.h> |
1da177e4c
|
11 12 |
#include <linux/mm.h> #include <linux/fs.h> |
c94c2acf8
|
13 |
#include <linux/dax.h> |
1da177e4c
|
14 |
#include <linux/uio.h> |
e80dfa199
|
15 |
#include <linux/list_lru.h> |
1da177e4c
|
16 17 18 19 |
/* * Base types */ |
ce8e922c0
|
20 |
#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL)) |
ce8e922c0
|
21 22 23 24 25 |
typedef enum { XBRW_READ = 1, /* transfer into target memory */ XBRW_WRITE = 2, /* transfer from target memory */ XBRW_ZERO = 3, /* Zero target memory */ } xfs_buf_rw_t; |
6fb8a90aa
|
26 27 28 |
#define XBF_READ (1 << 0) /* buffer intended for reading from device */ #define XBF_WRITE (1 << 1) /* buffer intended for writing to device */ #define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */ |
c891c30a4
|
29 |
#define XBF_NO_IOACCT (1 << 3) /* bypass I/O accounting (non-LRU bufs) */ |
6fb8a90aa
|
30 31 32 |
#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ |
ac8809f9a
|
33 |
#define XBF_WRITE_FAIL (1 << 24)/* async writes have failed on this buffer */ |
1d5ae5dfe
|
34 35 |
/* I/O hints for the BIO layer */ |
6fb8a90aa
|
36 37 38 |
#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ #define XBF_FUA (1 << 11)/* force cache write through mode */ #define XBF_FLUSH (1 << 12)/* flush the disk cache before a write */ |
1da177e4c
|
39 |
|
807cbbdb4
|
40 |
/* flags used only as arguments to access routines */ |
6fb8a90aa
|
41 42 |
#define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */ #define XBF_UNMAPPED (1 << 17)/* do not map the buffer */ |
1da177e4c
|
43 |
|
807cbbdb4
|
44 |
/* flags used only internally */ |
6fb8a90aa
|
45 46 47 48 |
#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */ #define _XBF_KMEM (1 << 21)/* backed by heap memory */ #define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */ #define _XBF_COMPOUND (1 << 23)/* compound buffer */ |
6ab455eea
|
49 |
|
807cbbdb4
|
50 |
typedef unsigned int xfs_buf_flags_t; |
1da177e4c
|
51 |
|
0b1b213fc
|
52 53 54 |
#define XFS_BUF_FLAGS \ { XBF_READ, "READ" }, \ { XBF_WRITE, "WRITE" }, \ |
1d5ae5dfe
|
55 |
{ XBF_READ_AHEAD, "READ_AHEAD" }, \ |
1247ec4c5
|
56 |
{ XBF_NO_IOACCT, "NO_IOACCT" }, \ |
0b1b213fc
|
57 58 |
{ XBF_ASYNC, "ASYNC" }, \ { XBF_DONE, "DONE" }, \ |
0b1b213fc
|
59 |
{ XBF_STALE, "STALE" }, \ |
ac8809f9a
|
60 |
{ XBF_WRITE_FAIL, "WRITE_FAIL" }, \ |
1d5ae5dfe
|
61 62 63 |
{ XBF_SYNCIO, "SYNCIO" }, \ { XBF_FUA, "FUA" }, \ { XBF_FLUSH, "FLUSH" }, \ |
6fb8a90aa
|
64 |
{ XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\ |
611c99468
|
65 |
{ XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\ |
0b1b213fc
|
66 |
{ _XBF_PAGES, "PAGES" }, \ |
0e6e847ff
|
67 |
{ _XBF_KMEM, "KMEM" }, \ |
cbb7baab2
|
68 |
{ _XBF_DELWRI_Q, "DELWRI_Q" }, \ |
63db7c815
|
69 |
{ _XBF_COMPOUND, "COMPOUND" } |
a40823572
|
70 |
|
ac8809f9a
|
71 |
|
a40823572
|
72 73 74 75 |
/* * Internal state flags. */ #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */ |
63db7c815
|
76 |
#define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */ |
0b1b213fc
|
77 |
|
7c71ee780
|
78 79 80 81 82 83 84 85 86 87 88 89 90 |
/* * The xfs_buftarg contains 2 notions of "sector size" - * * 1) The metadata sector size, which is the minimum unit and * alignment of IO which will be performed by metadata operations. * 2) The device logical sector size * * The first is specified at mkfs time, and is stored on-disk in the * superblock's sb_sectsize. * * The latter is derived from the underlying device, and controls direct IO * alignment constraints. */ |
1da177e4c
|
91 |
typedef struct xfs_buftarg { |
ce8e922c0
|
92 93 |
dev_t bt_dev; struct block_device *bt_bdev; |
486aff5e0
|
94 |
struct dax_device *bt_daxdev; |
ebad861b5
|
95 |
struct xfs_mount *bt_mount; |
6da54179b
|
96 97 |
unsigned int bt_meta_sectorsize; size_t bt_meta_sectormask; |
7c71ee780
|
98 99 |
size_t bt_logical_sectorsize; size_t bt_logical_sectormask; |
ce8e922c0
|
100 |
|
ff57ab219
|
101 102 |
/* LRU control structures */ struct shrinker bt_shrinker; |
e80dfa199
|
103 |
struct list_lru bt_lru; |
9c7504aa7
|
104 105 |
struct percpu_counter bt_io_count; |
1da177e4c
|
106 |
} xfs_buftarg_t; |
1da177e4c
|
107 |
struct xfs_buf; |
ce8e922c0
|
108 |
typedef void (*xfs_buf_iodone_t)(struct xfs_buf *); |
1da177e4c
|
109 |
|
c3f8fc73a
|
110 |
|
ce8e922c0
|
111 |
#define XB_PAGES 2 |
1da177e4c
|
112 |
|
cbb7baab2
|
113 114 115 116 |
struct xfs_buf_map { xfs_daddr_t bm_bn; /* block number for I/O */ int bm_len; /* size of I/O */ }; |
3e85c868a
|
117 118 |
#define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \ struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) }; |
1813dd640
|
119 |
struct xfs_buf_ops { |
233135b76
|
120 |
char *name; |
1813dd640
|
121 122 |
void (*verify_read)(struct xfs_buf *); void (*verify_write)(struct xfs_buf *); |
b55725974
|
123 |
xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp); |
1813dd640
|
124 |
}; |
1da177e4c
|
125 |
typedef struct xfs_buf { |
50f59e8ee
|
126 127 128 129 130 131 132 |
/* * first cacheline holds all the fields needed for an uncontended cache * hit to be fully processed. The semaphore straddles the cacheline * boundary, but the counter and lock sits on the first cacheline, * which is the only bit that is touched if we hit the semaphore * fast-path on locking. */ |
6031e73a5
|
133 |
struct rhash_head b_rhash_head; /* pag buffer hash node */ |
cbb7baab2
|
134 |
xfs_daddr_t b_bn; /* block number of buffer */ |
4e94b71b7
|
135 |
int b_length; /* size of buffer in BBs */ |
50f59e8ee
|
136 |
atomic_t b_hold; /* reference count */ |
430cbeb86
|
137 |
atomic_t b_lru_ref; /* lru reclaim ref count */ |
50f59e8ee
|
138 |
xfs_buf_flags_t b_flags; /* status flags */ |
ce8e922c0
|
139 |
struct semaphore b_sema; /* semaphore for lockables */ |
50f59e8ee
|
140 |
|
6fb8a90aa
|
141 142 143 144 |
/* * concurrent access to b_lru and b_lru_flags are protected by * bt_lru_lock and not by b_sema */ |
430cbeb86
|
145 |
struct list_head b_lru; /* lru list */ |
a40823572
|
146 147 |
spinlock_t b_lock; /* internal state lock */ unsigned int b_state; /* internal state flags */ |
61be9c529
|
148 |
int b_io_error; /* internal IO error state */ |
ce8e922c0
|
149 150 |
wait_queue_head_t b_waiters; /* unpin waiters */ struct list_head b_list; |
74f75a0cb
|
151 |
struct xfs_perag *b_pag; /* contains rbtree root */ |
ce8e922c0
|
152 |
xfs_buftarg_t *b_target; /* buffer target (device) */ |
ce8e922c0
|
153 |
void *b_addr; /* virtual address of buffer */ |
b29c70f59
|
154 155 |
struct work_struct b_ioend_work; struct workqueue_struct *b_ioend_wq; /* I/O completion wq */ |
ce8e922c0
|
156 |
xfs_buf_iodone_t b_iodone; /* I/O completion function */ |
b4dd330b9
|
157 |
struct completion b_iowait; /* queue for I/O waiters */ |
fb1755a64
|
158 |
void *b_log_item; |
643c8c05e
|
159 |
struct list_head b_li_list; /* Log items list head */ |
bf9d9013a
|
160 |
struct xfs_trans *b_transp; |
ce8e922c0
|
161 162 |
struct page **b_pages; /* array of page pointers */ struct page *b_page_array[XB_PAGES]; /* inline pages */ |
3e85c868a
|
163 |
struct xfs_buf_map *b_maps; /* compound buffer map */ |
d44d9bc68
|
164 |
struct xfs_buf_map __b_map; /* inline compound buffer map */ |
3e85c868a
|
165 |
int b_map_count; |
aa0e8833b
|
166 |
int b_io_length; /* IO size in BBs */ |
50f59e8ee
|
167 168 169 170 |
atomic_t b_pin_count; /* pin count */ atomic_t b_io_remaining; /* #outstanding I/O requests */ unsigned int b_page_count; /* size of page array */ unsigned int b_offset; /* page offset in first page */ |
2451337dd
|
171 |
int b_error; /* error code on I/O */ |
a5ea70d25
|
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 |
/* * async write failure retry count. Initialised to zero on the first * failure, then when it exceeds the maximum configured without a * success the write is considered to be failed permanently and the * iodone handler will take appropriate action. * * For retry timeouts, we record the jiffie of the first failure. This * means that we can change the retry timeout for buffers already under * I/O and thus avoid getting stuck in a retry loop with a long timeout. * * last_error is used to ensure that we are getting repeated errors, not * different errors. e.g. a block device might change ENOSPC to EIO when * a failure timeout occurs, so we want to re-initialise the error * retry behaviour appropriately when that happens. */ int b_retries; unsigned long b_first_retry_time; /* in jiffies */ int b_last_error; |
1813dd640
|
191 |
const struct xfs_buf_ops *b_ops; |
1da177e4c
|
192 |
} xfs_buf_t; |
1da177e4c
|
193 |
/* Finding and Reading Buffers */ |
8925a3dc4
|
194 195 196 |
struct xfs_buf *xfs_buf_incore(struct xfs_buftarg *target, xfs_daddr_t blkno, size_t numblks, xfs_buf_flags_t flags); |
3e85c868a
|
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 |
struct xfs_buf *_xfs_buf_alloc(struct xfs_buftarg *target, struct xfs_buf_map *map, int nmaps, xfs_buf_flags_t flags); static inline struct xfs_buf * xfs_buf_alloc( struct xfs_buftarg *target, xfs_daddr_t blkno, size_t numblks, xfs_buf_flags_t flags) { DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); return _xfs_buf_alloc(target, &map, 1, flags); } |
1da177e4c
|
212 |
|
6dde27077
|
213 214 215 216 217 |
struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map, int nmaps, xfs_buf_flags_t flags); struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map, int nmaps, |
1813dd640
|
218 219 |
xfs_buf_flags_t flags, const struct xfs_buf_ops *ops); |
6dde27077
|
220 |
void xfs_buf_readahead_map(struct xfs_buftarg *target, |
c3f8fc73a
|
221 |
struct xfs_buf_map *map, int nmaps, |
1813dd640
|
222 |
const struct xfs_buf_ops *ops); |
6dde27077
|
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 |
static inline struct xfs_buf * xfs_buf_get( struct xfs_buftarg *target, xfs_daddr_t blkno, size_t numblks, xfs_buf_flags_t flags) { DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); return xfs_buf_get_map(target, &map, 1, flags); } static inline struct xfs_buf * xfs_buf_read( struct xfs_buftarg *target, xfs_daddr_t blkno, size_t numblks, |
c3f8fc73a
|
240 |
xfs_buf_flags_t flags, |
1813dd640
|
241 |
const struct xfs_buf_ops *ops) |
6dde27077
|
242 243 |
{ DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); |
1813dd640
|
244 |
return xfs_buf_read_map(target, &map, 1, flags, ops); |
6dde27077
|
245 246 247 248 249 250 |
} static inline void xfs_buf_readahead( struct xfs_buftarg *target, xfs_daddr_t blkno, |
c3f8fc73a
|
251 |
size_t numblks, |
1813dd640
|
252 |
const struct xfs_buf_ops *ops) |
6dde27077
|
253 254 |
{ DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); |
1813dd640
|
255 |
return xfs_buf_readahead_map(target, &map, 1, ops); |
6dde27077
|
256 |
} |
e70b73f84
|
257 |
|
e70b73f84
|
258 259 260 261 262 |
void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks); int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length); struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks, int flags); |
ba3726742
|
263 264 265 |
int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr, size_t numblks, int flags, struct xfs_buf **bpp, const struct xfs_buf_ops *ops); |
e70b73f84
|
266 |
void xfs_buf_hold(struct xfs_buf *bp); |
1da177e4c
|
267 268 |
/* Releasing Buffers */ |
ce8e922c0
|
269 270 |
extern void xfs_buf_free(xfs_buf_t *); extern void xfs_buf_rele(xfs_buf_t *); |
1da177e4c
|
271 272 |
/* Locking and Unlocking Buffers */ |
0c842ad46
|
273 |
extern int xfs_buf_trylock(xfs_buf_t *); |
ce8e922c0
|
274 275 |
extern void xfs_buf_lock(xfs_buf_t *); extern void xfs_buf_unlock(xfs_buf_t *); |
0c842ad46
|
276 277 |
#define xfs_buf_islocked(bp) \ ((bp)->b_sema.count <= 0) |
1da177e4c
|
278 279 |
/* Buffer Read and Write Routines */ |
c2b006c1d
|
280 |
extern int xfs_bwrite(struct xfs_buf *bp); |
e8aaba9a7
|
281 |
extern void xfs_buf_ioend(struct xfs_buf *bp); |
31ca03c92
|
282 283 284 |
extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error, xfs_failaddr_t failaddr); #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address) |
901796afc
|
285 |
extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func); |
6af88cda0
|
286 287 288 289 290 291 292 |
extern int __xfs_buf_submit(struct xfs_buf *bp, bool); static inline int xfs_buf_submit(struct xfs_buf *bp) { bool wait = bp->b_flags & XBF_ASYNC ? false : true; return __xfs_buf_submit(bp, wait); } |
b9c486495
|
293 |
extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, |
ce8e922c0
|
294 |
xfs_buf_rw_t); |
1a1a3e97b
|
295 296 |
#define xfs_buf_zero(bp, off, len) \ xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) |
ce8e922c0
|
297 |
|
1da177e4c
|
298 |
/* Buffer Utility Routines */ |
88ee2df7f
|
299 |
extern void *xfs_buf_offset(struct xfs_buf *, size_t); |
5cfd28b6a
|
300 |
extern void xfs_buf_stale(struct xfs_buf *bp); |
1da177e4c
|
301 |
|
1da177e4c
|
302 |
/* Delayed Write Buffer Routines */ |
20e8a0637
|
303 |
extern void xfs_buf_delwri_cancel(struct list_head *); |
43ff2122e
|
304 305 306 |
extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *); extern int xfs_buf_delwri_submit(struct list_head *); extern int xfs_buf_delwri_submit_nowait(struct list_head *); |
7912e7fef
|
307 |
extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *); |
1da177e4c
|
308 309 |
/* Buffer Daemon Setup Routines */ |
ce8e922c0
|
310 311 |
extern int xfs_buf_init(void); extern void xfs_buf_terminate(void); |
1da177e4c
|
312 |
|
cbb7baab2
|
313 314 315 316 317 318 319 320 321 322 |
/* * These macros use the IO block map rather than b_bn. b_bn is now really * just for the buffer cache index for cached buffers. As IO does not use b_bn * anymore, uncached buffers do not use b_bn at all and hence must modify the IO * map directly. Uncached buffers are not allowed to be discontiguous, so this * is safe to do. * * In future, uncached buffers will pass the block number directly to the io * request function and hence these macros will go away at that point. */ |
d44d9bc68
|
323 324 |
#define XFS_BUF_ADDR(bp) ((bp)->b_maps[0].bm_bn) #define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno)) |
ce8e922c0
|
325 |
|
7561d27e9
|
326 |
void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref); |
ce8e922c0
|
327 |
|
879de98ea
|
328 329 330 331 332 333 334 335 336 337 338 |
/* * If the buffer is already on the LRU, do nothing. Otherwise set the buffer * up with a reference count of 0 so it will be tossed from the cache when * released. */ static inline void xfs_buf_oneshot(struct xfs_buf *bp) { if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1) return; atomic_set(&bp->b_lru_ref, 0); } |
811e64c71
|
339 340 341 342 |
static inline int xfs_buf_ispinned(struct xfs_buf *bp) { return atomic_read(&bp->b_pin_count); } |
ce8e922c0
|
343 |
|
ce8e922c0
|
344 |
static inline void xfs_buf_relse(xfs_buf_t *bp) |
1da177e4c
|
345 |
{ |
bfc60177f
|
346 |
xfs_buf_unlock(bp); |
ce8e922c0
|
347 |
xfs_buf_rele(bp); |
1da177e4c
|
348 |
} |
515821705
|
349 350 351 352 353 354 |
static inline int xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset) { return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length), cksum_offset); } |
f1dbcd7e3
|
355 356 357 358 359 360 |
static inline void xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset) { xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), cksum_offset); } |
1da177e4c
|
361 362 363 |
/* * Handling of buftargs. */ |
ebad861b5
|
364 |
extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *, |
486aff5e0
|
365 |
struct block_device *, struct dax_device *); |
a1f69417c
|
366 |
extern void xfs_free_buftarg(struct xfs_buftarg *); |
1da177e4c
|
367 |
extern void xfs_wait_buftarg(xfs_buftarg_t *); |
a96c41519
|
368 |
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int); |
d808f617a
|
369 |
|
ce8e922c0
|
370 371 |
#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev) #define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev) |
1da177e4c
|
372 |
#endif /* __XFS_BUF_H__ */ |