Blame view
block/blk.h
12.8 KB
b24413180 License cleanup: ... |
1 |
/* SPDX-License-Identifier: GPL-2.0 */ |
8324aa91d block: split tag ... |
2 3 |
#ifndef BLK_INTERNAL_H #define BLK_INTERNAL_H |
a73f730d0 block, cfq: move ... |
4 |
#include <linux/idr.h> |
f70ced091 blk-mq: support p... |
5 6 |
#include <linux/blk-mq.h> #include "blk-mq.h" |
a73f730d0 block, cfq: move ... |
7 |
|
86db1e297 block: continue l... |
8 9 10 11 12 |
/* Amount of time in which a process may batch requests */ #define BLK_BATCH_TIME (HZ/50UL) /* Number of requests a "batching" process may submit */ #define BLK_BATCH_REQ 32 |
0d2602ca3 blk-mq: improve s... |
13 14 |
/* Max future timer expiry for timeouts */ #define BLK_MAX_TIMEOUT (5 * HZ) |
18fbda91c block: use same b... |
15 16 17 |
#ifdef CONFIG_DEBUG_FS extern struct dentry *blk_debugfs_root; #endif |
7c94e1c15 block: introduce ... |
18 19 20 21 22 23 24 25 |
struct blk_flush_queue { unsigned int flush_queue_delayed:1; unsigned int flush_pending_idx:1; unsigned int flush_running_idx:1; unsigned long flush_pending_since; struct list_head flush_queue[2]; struct list_head flush_data_in_flight; struct request *flush_rq; |
0048b4837 blk-mq: fix race ... |
26 27 28 29 30 31 |
/* * flush_rq shares tag with this rq, both can't be active * at the same time */ struct request *orig_rq; |
7c94e1c15 block: introduce ... |
32 33 |
spinlock_t mq_flush_lock; }; |
8324aa91d block: split tag ... |
34 |
extern struct kmem_cache *blk_requestq_cachep; |
320ae51fe blk-mq: new multi... |
35 |
extern struct kmem_cache *request_cachep; |
8324aa91d block: split tag ... |
36 |
extern struct kobj_type blk_queue_ktype; |
a73f730d0 block, cfq: move ... |
37 |
extern struct ida blk_queue_ida; |
8324aa91d block: split tag ... |
38 |
|
8a0ac14b8 block: Move the q... |
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
/* * @q->queue_lock is set while a queue is being initialized. Since we know * that no other threads access the queue object before @q->queue_lock has * been set, it is safe to manipulate queue flags without holding the * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and * blk_init_allocated_queue(). */ static inline void queue_lockdep_assert_held(struct request_queue *q) { if (q->queue_lock) lockdep_assert_held(q->queue_lock); } static inline void queue_flag_set_unlocked(unsigned int flag, struct request_queue *q) { if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) && kref_read(&q->kobj.kref)) lockdep_assert_held(q->queue_lock); __set_bit(flag, &q->queue_flags); } static inline void queue_flag_clear_unlocked(unsigned int flag, struct request_queue *q) { if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) && kref_read(&q->kobj.kref)) lockdep_assert_held(q->queue_lock); __clear_bit(flag, &q->queue_flags); } static inline int queue_flag_test_and_clear(unsigned int flag, struct request_queue *q) { queue_lockdep_assert_held(q); if (test_bit(flag, &q->queue_flags)) { __clear_bit(flag, &q->queue_flags); return 1; } return 0; } static inline int queue_flag_test_and_set(unsigned int flag, struct request_queue *q) { queue_lockdep_assert_held(q); if (!test_bit(flag, &q->queue_flags)) { __set_bit(flag, &q->queue_flags); return 0; } return 1; } static inline void queue_flag_set(unsigned int flag, struct request_queue *q) { queue_lockdep_assert_held(q); __set_bit(flag, &q->queue_flags); } static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) { queue_lockdep_assert_held(q); __clear_bit(flag, &q->queue_flags); } |
7c94e1c15 block: introduce ... |
107 |
static inline struct blk_flush_queue *blk_get_flush_queue( |
e97c293cd block: introduce ... |
108 |
struct request_queue *q, struct blk_mq_ctx *ctx) |
7c94e1c15 block: introduce ... |
109 |
{ |
7d7e0f90b blk-mq: remove ->... |
110 111 112 |
if (q->mq_ops) return blk_mq_map_queue(q, ctx->cpu)->fq; return q->fq; |
7c94e1c15 block: introduce ... |
113 |
} |
09ac46c42 block: misc updat... |
114 115 116 117 |
static inline void __blk_get_queue(struct request_queue *q) { kobject_get(&q->kobj); } |
f70ced091 blk-mq: support p... |
118 119 120 |
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, int node, int cmd_size); void blk_free_flush_queue(struct blk_flush_queue *q); |
f35526557 block: introduce ... |
121 |
|
5b788ce3e block: prepare fo... |
122 123 |
int blk_init_rl(struct request_list *rl, struct request_queue *q, gfp_t gfp_mask); |
b425e5049 block: Avoid that... |
124 |
void blk_exit_rl(struct request_queue *q, struct request_list *rl); |
4cf6324b1 block: Introduce ... |
125 |
void blk_exit_queue(struct request_queue *q); |
86db1e297 block: continue l... |
126 127 |
void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio); |
d732580b4 block: implement ... |
128 129 |
void blk_queue_bypass_start(struct request_queue *q); void blk_queue_bypass_end(struct request_queue *q); |
8324aa91d block: split tag ... |
130 |
void __blk_queue_free_tags(struct request_queue *q); |
3ef28e83a block: generic re... |
131 132 133 134 135 136 137 138 139 140 141 142 |
void blk_freeze_queue(struct request_queue *q); static inline void blk_queue_enter_live(struct request_queue *q) { /* * Given that running in generic_make_request() context * guarantees that a live reference against q_usage_counter has * been established, further references under that same context * need not check that the queue has been frozen (marked dead). */ percpu_ref_get(&q->q_usage_counter); } |
8324aa91d block: split tag ... |
143 |
|
5a48fc147 block: blk_flush_... |
144 145 |
#ifdef CONFIG_BLK_DEV_INTEGRITY void blk_flush_integrity(void); |
7c20f1168 bio-integrity: st... |
146 147 148 149 150 151 152 |
bool __bio_integrity_endio(struct bio *); static inline bool bio_integrity_endio(struct bio *bio) { if (bio_integrity(bio)) return __bio_integrity_endio(bio); return true; } |
5a48fc147 block: blk_flush_... |
153 154 155 156 |
#else static inline void blk_flush_integrity(void) { } |
7c20f1168 bio-integrity: st... |
157 158 159 160 |
static inline bool bio_integrity_endio(struct bio *bio) { return true; } |
5a48fc147 block: blk_flush_... |
161 |
#endif |
8324aa91d block: split tag ... |
162 |
|
287922eb0 block: defer time... |
163 |
void blk_timeout_work(struct work_struct *work); |
0d2602ca3 blk-mq: improve s... |
164 |
unsigned long blk_rq_timeout(unsigned long timeout); |
87ee7b112 blk-mq: fix race ... |
165 |
void blk_add_timer(struct request *req); |
242f9dcb8 block: unify requ... |
166 |
void blk_delete_timer(struct request *); |
242f9dcb8 block: unify requ... |
167 |
|
320ae51fe blk-mq: new multi... |
168 169 170 171 172 |
bool bio_attempt_front_merge(struct request_queue *q, struct request *req, struct bio *bio); bool bio_attempt_back_merge(struct request_queue *q, struct request *req, struct bio *bio); |
1e739730c block: optionally... |
173 174 |
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, struct bio *bio); |
320ae51fe blk-mq: new multi... |
175 |
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, |
5b3f341f0 blk-mq: make plug... |
176 177 |
unsigned int *request_count, struct request **same_queue_rq); |
0809e3ac6 block: fix plug l... |
178 |
unsigned int blk_plug_queued_count(struct request_queue *q); |
320ae51fe blk-mq: new multi... |
179 180 181 |
void blk_account_io_start(struct request *req, bool new_io); void blk_account_io_completion(struct request *req, unsigned int bytes); |
522a77756 block: consolidat... |
182 |
void blk_account_io_done(struct request *req, u64 now); |
320ae51fe blk-mq: new multi... |
183 |
|
242f9dcb8 block: unify requ... |
184 |
/* |
242f9dcb8 block: unify requ... |
185 |
* EH timer and IO completion will both attempt to 'grab' the request, make |
e14575b3d block: convert RE... |
186 187 |
* sure that only one of them succeeds. Steal the bottom bit of the * __deadline field for this. |
242f9dcb8 block: unify requ... |
188 189 190 |
*/ static inline int blk_mark_rq_complete(struct request *rq) { |
e14575b3d block: convert RE... |
191 |
return test_and_set_bit(0, &rq->__deadline); |
242f9dcb8 block: unify requ... |
192 193 194 195 |
} static inline void blk_clear_rq_complete(struct request *rq) { |
e14575b3d block: convert RE... |
196 197 198 199 200 201 |
clear_bit(0, &rq->__deadline); } static inline bool blk_rq_is_complete(struct request *rq) { return test_bit(0, &rq->__deadline); |
242f9dcb8 block: unify requ... |
202 |
} |
86db1e297 block: continue l... |
203 |
|
158dbda00 block: reorganize... |
204 205 206 |
/* * Internal elevator interface */ |
e80640213 block: split out ... |
207 |
#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) |
158dbda00 block: reorganize... |
208 |
|
ae1b15396 block: reimplemen... |
209 |
void blk_insert_flush(struct request *rq); |
dd831006d block: misc clean... |
210 |
|
158dbda00 block: reorganize... |
211 212 213 |
static inline void elv_activate_rq(struct request_queue *q, struct request *rq) { struct elevator_queue *e = q->elevator; |
c51ca6cf5 block: move exist... |
214 215 |
if (e->type->ops.sq.elevator_activate_req_fn) e->type->ops.sq.elevator_activate_req_fn(q, rq); |
158dbda00 block: reorganize... |
216 217 218 219 220 |
} static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) { struct elevator_queue *e = q->elevator; |
c51ca6cf5 block: move exist... |
221 222 |
if (e->type->ops.sq.elevator_deactivate_req_fn) e->type->ops.sq.elevator_deactivate_req_fn(q, rq); |
158dbda00 block: reorganize... |
223 |
} |
ddb725325 block: remove the... |
224 |
int elevator_init(struct request_queue *); |
131d08e12 block: split the ... |
225 |
int elevator_init_mq(struct request_queue *q); |
d48ece209 blk-mq: init hctx... |
226 227 |
int elevator_switch_mq(struct request_queue *q, struct elevator_type *new_e); |
a8a275c9c block: unexport e... |
228 |
void elevator_exit(struct request_queue *, struct elevator_queue *); |
83d016ac8 block: Unexport e... |
229 230 |
int elv_register_queue(struct request_queue *q); void elv_unregister_queue(struct request_queue *q); |
807d4af2f block: add a __di... |
231 |
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno); |
581d4e28d block: add fault ... |
232 233 234 235 236 237 238 239 240 241 242 |
#ifdef CONFIG_FAIL_IO_TIMEOUT int blk_should_fake_timeout(struct request_queue *); ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); ssize_t part_timeout_store(struct device *, struct device_attribute *, const char *, size_t); #else static inline int blk_should_fake_timeout(struct request_queue *q) { return 0; } #endif |
d6d481969 block: ll_rw_blk.... |
243 244 245 246 |
int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio); int ll_front_merge_fn(struct request_queue *q, struct request *req, struct bio *bio); |
b973cb7e8 blk-merge: return... |
247 248 |
struct request *attempt_back_merge(struct request_queue *q, struct request *rq); struct request *attempt_front_merge(struct request_queue *q, struct request *rq); |
5e84ea3a9 block: attempt to... |
249 250 |
int blk_attempt_req_merge(struct request_queue *q, struct request *rq, struct request *next); |
d6d481969 block: ll_rw_blk.... |
251 |
void blk_recalc_rq_segments(struct request *rq); |
80a761fd3 block: implement ... |
252 |
void blk_rq_set_mixed_merge(struct request *rq); |
050c8ea80 block: separate o... |
253 |
bool blk_rq_merge_ok(struct request *rq, struct bio *bio); |
34fe7c054 block: enumify EL... |
254 |
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio); |
d6d481969 block: ll_rw_blk.... |
255 |
|
8324aa91d block: split tag ... |
256 |
void blk_queue_congestion_threshold(struct request_queue *q); |
ff88972c8 proper prototype ... |
257 |
int blk_dev_init(void); |
f253b86b4 Revert "block: fi... |
258 |
|
8324aa91d block: split tag ... |
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 |
/* * Return the threshold (number of used requests) at which the queue is * considered to be congested. It include a little hysteresis to keep the * context switch rate down. */ static inline int queue_congestion_on_threshold(struct request_queue *q) { return q->nr_congestion_on; } /* * The threshold at which a queue is considered to be uncongested */ static inline int queue_congestion_off_threshold(struct request_queue *q) { return q->nr_congestion_off; } |
e3a2b3f93 blk-mq: allow cha... |
276 |
extern int blk_update_nr_requests(struct request_queue *, unsigned int); |
c2553b584 block: make blk_d... |
277 278 279 280 281 |
/* * Contribute to IO statistics IFF: * * a) it's attached to a gendisk, and * b) the queue had IO stats enabled when this request was started, and |
e2a60da74 block: Clean up s... |
282 |
* c) it's a file system request |
c2553b584 block: make blk_d... |
283 |
*/ |
599d067dd block: change ret... |
284 |
static inline bool blk_do_io_stat(struct request *rq) |
fb8ec18c3 block: fix oops i... |
285 |
{ |
33659ebba block: remove wra... |
286 |
return rq->rq_disk && |
e80640213 block: split out ... |
287 |
(rq->rq_flags & RQF_IO_STAT) && |
57292b58d block: introduce ... |
288 |
!blk_rq_is_passthrough(rq); |
fb8ec18c3 block: fix oops i... |
289 |
} |
6cf7677f1 block: move req_s... |
290 291 292 293 294 295 |
static inline void req_set_nomerge(struct request_queue *q, struct request *req) { req->cmd_flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; } |
f2dbd76a0 block, cfq: repla... |
296 |
/* |
0a72e7f44 block: add access... |
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 |
* Steal a bit from this field for legacy IO path atomic IO marking. Note that * setting the deadline clears the bottom bit, potentially clearing the * completed bit. The user has to be OK with this (current ones are fine). */ static inline void blk_rq_set_deadline(struct request *rq, unsigned long time) { rq->__deadline = time & ~0x1UL; } static inline unsigned long blk_rq_deadline(struct request *rq) { return rq->__deadline & ~0x1UL; } /* |
14657efd3 block: make sure ... |
312 313 314 315 316 317 318 319 320 321 |
* The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size * is defined as 'unsigned int', meantime it has to aligned to with logical * block size which is the minimum accepted unit by hardware. */ static inline unsigned int bio_allowed_max_sectors(struct request_queue *q) { return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9; } /* |
f2dbd76a0 block, cfq: repla... |
322 323 324 |
* Internal io_context interface */ void get_io_context(struct io_context *ioc); |
47fdd4ca9 block, cfq: move ... |
325 |
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); |
24acfc34f block: interface ... |
326 327 |
struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, gfp_t gfp_mask); |
7e5a87944 block, cfq: move ... |
328 |
void ioc_clear_queue(struct request_queue *q); |
f2dbd76a0 block, cfq: repla... |
329 |
|
24acfc34f block: interface ... |
330 |
int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); |
f2dbd76a0 block, cfq: repla... |
331 332 |
/** |
c23ecb426 block: move rq_io... |
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 |
* rq_ioc - determine io_context for request allocation * @bio: request being allocated is for this bio (can be %NULL) * * Determine io_context to use for request allocation for @bio. May return * %NULL if %current->io_context doesn't exist. */ static inline struct io_context *rq_ioc(struct bio *bio) { #ifdef CONFIG_BLK_CGROUP if (bio && bio->bi_ioc) return bio->bi_ioc; #endif return current->io_context; } /** |
f2dbd76a0 block, cfq: repla... |
349 |
* create_io_context - try to create task->io_context |
f2dbd76a0 block, cfq: repla... |
350 351 352 |
* @gfp_mask: allocation mask * @node: allocation node * |
24acfc34f block: interface ... |
353 354 355 |
* If %current->io_context is %NULL, allocate a new io_context and install * it. Returns the current %current->io_context which may be %NULL if * allocation failed. |
f2dbd76a0 block, cfq: repla... |
356 357 |
* * Note that this function can't be called with IRQ disabled because |
24acfc34f block: interface ... |
358 |
* task_lock which protects %current->io_context is IRQ-unsafe. |
f2dbd76a0 block, cfq: repla... |
359 |
*/ |
24acfc34f block: interface ... |
360 |
static inline struct io_context *create_io_context(gfp_t gfp_mask, int node) |
f2dbd76a0 block, cfq: repla... |
361 362 |
{ WARN_ON_ONCE(irqs_disabled()); |
24acfc34f block: interface ... |
363 364 365 |
if (unlikely(!current->io_context)) create_task_io_context(current, gfp_mask, node); return current->io_context; |
f2dbd76a0 block, cfq: repla... |
366 367 368 369 370 |
} /* * Internal throttling interface */ |
bc9fcbf9c block: move blk_t... |
371 |
#ifdef CONFIG_BLK_DEV_THROTTLING |
c9a929dde block: fix reques... |
372 |
extern void blk_throtl_drain(struct request_queue *q); |
bc9fcbf9c block: move blk_t... |
373 374 |
extern int blk_throtl_init(struct request_queue *q); extern void blk_throtl_exit(struct request_queue *q); |
d61fcfa4b blk-throttle: cho... |
375 |
extern void blk_throtl_register_queue(struct request_queue *q); |
bc9fcbf9c block: move blk_t... |
376 |
#else /* CONFIG_BLK_DEV_THROTTLING */ |
c9a929dde block: fix reques... |
377 |
static inline void blk_throtl_drain(struct request_queue *q) { } |
bc9fcbf9c block: move blk_t... |
378 379 |
static inline int blk_throtl_init(struct request_queue *q) { return 0; } static inline void blk_throtl_exit(struct request_queue *q) { } |
d61fcfa4b blk-throttle: cho... |
380 |
static inline void blk_throtl_register_queue(struct request_queue *q) { } |
bc9fcbf9c block: move blk_t... |
381 |
#endif /* CONFIG_BLK_DEV_THROTTLING */ |
297e3d854 blk-throttle: mak... |
382 383 384 385 |
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); extern ssize_t blk_throtl_sample_time_store(struct request_queue *q, const char *page, size_t count); |
9e234eeaf blk-throttle: add... |
386 |
extern void blk_throtl_bio_endio(struct bio *bio); |
b9147dd1b blk-throttle: add... |
387 |
extern void blk_throtl_stat_add(struct request *rq, u64 time); |
9e234eeaf blk-throttle: add... |
388 389 |
#else static inline void blk_throtl_bio_endio(struct bio *bio) { } |
b9147dd1b blk-throttle: add... |
390 |
static inline void blk_throtl_stat_add(struct request *rq, u64 time) { } |
297e3d854 blk-throttle: mak... |
391 |
#endif |
bc9fcbf9c block: move blk_t... |
392 |
|
3bce016a4 block: move bounc... |
393 394 395 396 397 398 399 400 401 402 403 404 |
#ifdef CONFIG_BOUNCE extern int init_emergency_isa_pool(void); extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); #else static inline int init_emergency_isa_pool(void) { return 0; } static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) { } #endif /* CONFIG_BOUNCE */ |
454be724f block: drain queu... |
405 |
extern void blk_drain_queue(struct request_queue *q); |
d70675121 block: introduce ... |
406 407 408 409 410 |
#ifdef CONFIG_BLK_CGROUP_IOLATENCY extern int blk_iolatency_init(struct request_queue *q); #else static inline int blk_iolatency_init(struct request_queue *q) { return 0; } #endif |
bc9fcbf9c block: move blk_t... |
411 |
#endif /* BLK_INTERNAL_H */ |