Blame view
include/linux/blk_types.h
8.52 KB
7cc015811
|
1 2 3 4 5 6 |
/* * Block data types and constants. Directly include this file only to * break include dependency loop. */ #ifndef __LINUX_BLK_TYPES_H #define __LINUX_BLK_TYPES_H |
7cc015811
|
7 |
#include <linux/types.h> |
0781e79eb
|
8 |
#include <linux/bvec.h> |
7cc015811
|
9 10 11 12 13 14 |
struct bio_set; struct bio; struct bio_integrity_payload; struct page; struct block_device; |
852c788f8
|
15 16 |
struct io_context; struct cgroup_subsys_state; |
4246a0b63
|
17 |
typedef void (bio_end_io_t) (struct bio *); |
7cc015811
|
18 |
|
62a8067a7
|
19 |
#ifdef CONFIG_BLOCK |
7cc015811
|
20 21 22 23 24 |
/* * main unit of I/O for the block layer and lower layers (ie drivers and * stacking drivers) */ struct bio { |
7cc015811
|
25 26 |
struct bio *bi_next; /* request queue link */ struct block_device *bi_bdev; |
2c68f6dc6
|
27 |
int bi_error; |
1eff9d322
|
28 29 30 |
unsigned int bi_opf; /* bottom bits req flags, * top bits REQ_OP. Use * accessors. |
4e1b2d52a
|
31 |
*/ |
c0acf12a5
|
32 |
unsigned short bi_flags; /* status, command, etc */ |
43b62ce3f
|
33 |
unsigned short bi_ioprio; |
7cc015811
|
34 |
|
4f024f379
|
35 |
struct bvec_iter bi_iter; |
7cc015811
|
36 37 38 39 40 |
/* Number of segments in this BIO after * physical address coalescing is performed. */ unsigned int bi_phys_segments; |
7cc015811
|
41 42 43 44 45 46 |
/* * To keep track of the max segment size, we account for the * sizes of the first and last mergeable segments in this bio. */ unsigned int bi_seg_front_size; unsigned int bi_seg_back_size; |
c4cf5261f
|
47 |
atomic_t __bi_remaining; |
196d38bcc
|
48 |
|
7cc015811
|
49 50 51 |
bio_end_io_t *bi_end_io; void *bi_private; |
852c788f8
|
52 53 54 55 56 57 58 59 |
#ifdef CONFIG_BLK_CGROUP /* * Optional ioc and css associated with this bio. Put on bio * release. Read comment on top of bio_associate_current(). */ struct io_context *bi_ioc; struct cgroup_subsys_state *bi_css; #endif |
180b2f95d
|
60 |
union { |
7cc015811
|
61 |
#if defined(CONFIG_BLK_DEV_INTEGRITY) |
180b2f95d
|
62 |
struct bio_integrity_payload *bi_integrity; /* data integrity */ |
7cc015811
|
63 |
#endif |
180b2f95d
|
64 |
}; |
7cc015811
|
65 |
|
4f024f379
|
66 |
unsigned short bi_vcnt; /* how many bio_vec's */ |
f44b48c76
|
67 68 69 |
/* * Everything starting with bi_max_vecs will be preserved by bio_reset() */ |
4f024f379
|
70 |
unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ |
f44b48c76
|
71 |
|
dac56212e
|
72 |
atomic_t __bi_cnt; /* pin count */ |
f44b48c76
|
73 74 |
struct bio_vec *bi_io_vec; /* the actual vec list */ |
395c72a70
|
75 |
struct bio_set *bi_pool; |
7cc015811
|
76 77 78 79 80 81 82 |
/* * We can inline a number of vecs at the end of the bio, to avoid * double allocations for a small number of bio_vecs. This member * MUST obviously be kept at the very end of the bio. */ struct bio_vec bi_inline_vecs[0]; }; |
637ca77bd
|
83 |
#define BIO_OP_SHIFT (8 * FIELD_SIZEOF(struct bio, bi_opf) - REQ_OP_BITS) |
4382e33ad
|
84 |
#define bio_flags(bio) ((bio)->bi_opf & ((1 << BIO_OP_SHIFT) - 1)) |
1eff9d322
|
85 |
#define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT) |
4e1b2d52a
|
86 |
|
3e1de31b9
|
87 88 89 90 91 92 93 94 95 96 97 98 |
#define bio_set_op_attrs(bio, op, op_flags) do { \ if (__builtin_constant_p(op)) \ BUILD_BUG_ON((op) + 0U >= (1U << REQ_OP_BITS)); \ else \ WARN_ON_ONCE((op) + 0U >= (1U << REQ_OP_BITS)); \ if (__builtin_constant_p(op_flags)) \ BUILD_BUG_ON((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \ else \ WARN_ON_ONCE((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \ (bio)->bi_opf = bio_flags(bio); \ (bio)->bi_opf |= (((op) + 0U) << BIO_OP_SHIFT); \ (bio)->bi_opf |= (op_flags); \ |
4e1b2d52a
|
99 |
} while (0) |
f44b48c76
|
100 |
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) |
7cc015811
|
101 102 103 |
/* * bio flags */ |
b2dbe0a60
|
104 105 106 107 108 109 |
#define BIO_SEG_VALID 1 /* bi_phys_segments valid */ #define BIO_CLONED 2 /* doesn't own data */ #define BIO_BOUNCED 3 /* bio is a bounce bio */ #define BIO_USER_MAPPED 4 /* contains user pages */ #define BIO_NULL_MAPPED 5 /* contains invalid user pages */ #define BIO_QUIET 6 /* Make BIO Quiet */ |
a3ad0a9da
|
110 111 |
#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */ #define BIO_REFFED 8 /* bio has elevated ->bi_cnt */ |
f44b48c76
|
112 113 114 |
/* * Flags starting here get preserved by bio_reset() - this includes |
ed996a52c
|
115 |
* BVEC_POOL_IDX() |
f44b48c76
|
116 |
*/ |
c0acf12a5
|
117 |
#define BIO_RESET_BITS 10 |
f44b48c76
|
118 |
|
7cc015811
|
119 |
/* |
ed996a52c
|
120 121 |
* We support 6 different bvec pools, the last one is magic in that it * is backed by a mempool. |
7cc015811
|
122 |
*/ |
ed996a52c
|
123 124 125 126 127 128 129 130 131 |
#define BVEC_POOL_NR 6 #define BVEC_POOL_MAX (BVEC_POOL_NR - 1) /* * Top 4 bits of bio flags indicate the pool the bvecs came from. We add * 1 to the actual index so that 0 indicates that there are no bvecs to be * freed. */ #define BVEC_POOL_BITS (4) |
c0acf12a5
|
132 |
#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) |
ed996a52c
|
133 |
#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) |
7cc015811
|
134 |
|
de75d60d5
|
135 |
#endif /* CONFIG_BLOCK */ |
7cc015811
|
136 137 |
/* * Request flags. For use in the cmd_flags field of struct request, and in |
1eff9d322
|
138 |
* bi_opf of struct bio. Note that some flags are only valid in either one. |
7cc015811
|
139 140 141 |
*/ enum rq_flag_bits { /* common flags */ |
7cc015811
|
142 143 144 |
__REQ_FAILFAST_DEV, /* no driver retries of device errors */ __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ |
7cc015811
|
145 146 |
__REQ_SYNC, /* request is sync (sync write or read) */ __REQ_META, /* metadata io request */ |
65299a3b7
|
147 |
__REQ_PRIO, /* boost priority in cfq */ |
8e4bf8447
|
148 |
|
7cc015811
|
149 |
__REQ_NOIDLE, /* don't anticipate more IO after this one */ |
180b2f95d
|
150 |
__REQ_INTEGRITY, /* I/O includes block integrity payload */ |
8e4bf8447
|
151 |
__REQ_FUA, /* forced unit access */ |
28a8f0d31
|
152 |
__REQ_PREFLUSH, /* request for cache flush */ |
7cc015811
|
153 154 |
/* bio only flags */ |
7cc015811
|
155 |
__REQ_RAHEAD, /* read ahead, can fail anytime */ |
e43473b7f
|
156 157 |
__REQ_THROTTLED, /* This bio has already been subjected to * throttling rules. Don't do it again. */ |
7cc015811
|
158 159 160 161 |
/* request only flags */ __REQ_SORTED, /* elevator knows about this request */ __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ |
7cc015811
|
162 163 164 165 166 167 168 |
__REQ_NOMERGE, /* don't touch this for merging */ __REQ_STARTED, /* drive already may have started this one */ __REQ_DONTPREP, /* don't call prep for this one */ __REQ_QUEUED, /* uses queueing */ __REQ_ELVPRIV, /* elevator private data attached */ __REQ_FAILED, /* set if the request failed */ __REQ_QUIET, /* don't worry about errors */ |
bba0bdd7a
|
169 170 171 |
__REQ_PREEMPT, /* set for "ide_preempt" requests and also for requests for which the SCSI "quiesce" state must be ignored. */ |
7cc015811
|
172 173 |
__REQ_ALLOCED, /* request came from our alloc pool */ __REQ_COPY_USER, /* contains copies of user pages */ |
414b4ff5e
|
174 |
__REQ_FLUSH_SEQ, /* request for flush sequence */ |
7cc015811
|
175 176 |
__REQ_IO_STAT, /* account I/O stat */ __REQ_MIXED_MERGE, /* merge of different types, fail separately */ |
663112746
|
177 |
__REQ_PM, /* runtime pm request */ |
360f92c24
|
178 |
__REQ_HASHED, /* on IO scheduler merge hash */ |
0d2602ca3
|
179 |
__REQ_MQ_INFLIGHT, /* track inflight for MQ */ |
7cc015811
|
180 181 |
__REQ_NR_BITS, /* stops here */ }; |
5953316db
|
182 183 184 185 186 187 |
#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV) #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT) #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER) #define REQ_SYNC (1ULL << __REQ_SYNC) #define REQ_META (1ULL << __REQ_META) #define REQ_PRIO (1ULL << __REQ_PRIO) |
5953316db
|
188 |
#define REQ_NOIDLE (1ULL << __REQ_NOIDLE) |
180b2f95d
|
189 |
#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) |
7cc015811
|
190 191 192 193 |
#define REQ_FAILFAST_MASK \ (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) #define REQ_COMMON_MASK \ |
4e1b2d52a
|
194 |
(REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \ |
288dab8a3
|
195 |
REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE) |
3a2edd0d6
|
196 |
#define REQ_CLONE_MASK REQ_COMMON_MASK |
7cc015811
|
197 |
|
e2a60da74
|
198 199 |
/* This mask is used for both bio and request merge checking */ #define REQ_NOMERGE_FLAGS \ |
28a8f0d31
|
200 |
(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ) |
e2a60da74
|
201 |
|
5953316db
|
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD) #define REQ_THROTTLED (1ULL << __REQ_THROTTLED) #define REQ_SORTED (1ULL << __REQ_SORTED) #define REQ_SOFTBARRIER (1ULL << __REQ_SOFTBARRIER) #define REQ_FUA (1ULL << __REQ_FUA) #define REQ_NOMERGE (1ULL << __REQ_NOMERGE) #define REQ_STARTED (1ULL << __REQ_STARTED) #define REQ_DONTPREP (1ULL << __REQ_DONTPREP) #define REQ_QUEUED (1ULL << __REQ_QUEUED) #define REQ_ELVPRIV (1ULL << __REQ_ELVPRIV) #define REQ_FAILED (1ULL << __REQ_FAILED) #define REQ_QUIET (1ULL << __REQ_QUIET) #define REQ_PREEMPT (1ULL << __REQ_PREEMPT) #define REQ_ALLOCED (1ULL << __REQ_ALLOCED) #define REQ_COPY_USER (1ULL << __REQ_COPY_USER) |
28a8f0d31
|
218 |
#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) |
5953316db
|
219 220 221 |
#define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ) #define REQ_IO_STAT (1ULL << __REQ_IO_STAT) #define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) |
5953316db
|
222 |
#define REQ_PM (1ULL << __REQ_PM) |
360f92c24
|
223 |
#define REQ_HASHED (1ULL << __REQ_HASHED) |
0d2602ca3
|
224 |
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) |
7cc015811
|
225 |
|
c11f0c0b5
|
226 227 228 229 230 231 232 233 234 235 |
enum req_op { REQ_OP_READ, REQ_OP_WRITE, REQ_OP_DISCARD, /* request to discard sectors */ REQ_OP_SECURE_ERASE, /* request to securely erase sectors */ REQ_OP_WRITE_SAME, /* write same block many times */ REQ_OP_FLUSH, /* request for cache flush */ }; #define REQ_OP_BITS 3 |
dece16353
|
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 |
typedef unsigned int blk_qc_t; #define BLK_QC_T_NONE -1U #define BLK_QC_T_SHIFT 16 static inline bool blk_qc_t_valid(blk_qc_t cookie) { return cookie != BLK_QC_T_NONE; } static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num) { return tag | (queue_num << BLK_QC_T_SHIFT); } static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) { return cookie >> BLK_QC_T_SHIFT; } static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) { |
e3a7a3bf3
|
257 |
return cookie & ((1u << BLK_QC_T_SHIFT) - 1); |
dece16353
|
258 |
} |
7cc015811
|
259 |
#endif /* __LINUX_BLK_TYPES_H */ |