Blame view
block/blk-mq-sched.h
2.63 KB
b24413180 License cleanup: ... |
1 |
/* SPDX-License-Identifier: GPL-2.0 */ |
bd166ef18 blk-mq-sched: add... |
2 3 4 5 6 |
#ifndef BLK_MQ_SCHED_H #define BLK_MQ_SCHED_H #include "blk-mq.h" #include "blk-mq-tag.h" |
bd166ef18 blk-mq-sched: add... |
7 8 |
void blk_mq_sched_free_hctx_data(struct request_queue *q, void (*exit)(struct blk_mq_hw_ctx *)); |
44e8c2bff blk-mq: refactor ... |
9 |
void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio); |
bd166ef18 blk-mq-sched: add... |
10 11 |
void blk_mq_sched_request_inserted(struct request *rq); |
e4d750c97 block: free merge... |
12 13 |
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, struct request **merged_request); |
bd166ef18 blk-mq-sched: add... |
14 15 |
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio); bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); |
6353c0a03 block: mq-deadlin... |
16 |
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx); |
6d8c6c0f9 blk-mq: Restart a... |
17 |
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); |
bd166ef18 blk-mq-sched: add... |
18 |
|
bd6737f1a blk-mq-sched: add... |
19 |
void blk_mq_sched_insert_request(struct request *rq, bool at_head, |
9e97d2951 blk-mq-sched: rem... |
20 |
bool run_queue, bool async); |
bd6737f1a blk-mq-sched: add... |
21 22 23 |
void blk_mq_sched_insert_requests(struct request_queue *q, struct blk_mq_ctx *ctx, struct list_head *list, bool run_queue_async); |
bd166ef18 blk-mq-sched: add... |
24 |
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); |
bd166ef18 blk-mq-sched: add... |
25 |
|
6917ff0b5 blk-mq-sched: ref... |
26 |
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); |
54d5329d4 blk-mq-sched: fix... |
27 |
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); |
bd166ef18 blk-mq-sched: add... |
28 29 30 31 |
static inline bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) { |
9bddeb2a5 blk-mq: make per-... |
32 |
if (blk_queue_nomerges(q) || !bio_mergeable(bio)) |
bd166ef18 blk-mq-sched: add... |
33 34 35 36 |
return false; return __blk_mq_sched_bio_merge(q, bio); } |
bd166ef18 blk-mq-sched: add... |
37 38 39 40 41 42 43 44 45 46 47 |
static inline bool blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, struct bio *bio) { struct elevator_queue *e = q->elevator; if (e && e->type->ops.mq.allow_merge) return e->type->ops.mq.allow_merge(q, rq, bio); return true; } |
c05f8525f blk-mq-sched: mak... |
48 |
static inline void blk_mq_sched_completed_request(struct request *rq) |
bd166ef18 blk-mq-sched: add... |
49 |
{ |
c05f8525f blk-mq-sched: mak... |
50 |
struct elevator_queue *e = rq->q->elevator; |
bd166ef18 blk-mq-sched: add... |
51 52 |
if (e && e->type->ops.mq.completed_request) |
c05f8525f blk-mq-sched: mak... |
53 |
e->type->ops.mq.completed_request(rq); |
bd166ef18 blk-mq-sched: add... |
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 |
} static inline void blk_mq_sched_started_request(struct request *rq) { struct request_queue *q = rq->q; struct elevator_queue *e = q->elevator; if (e && e->type->ops.mq.started_request) e->type->ops.mq.started_request(rq); } static inline void blk_mq_sched_requeue_request(struct request *rq) { struct request_queue *q = rq->q; struct elevator_queue *e = q->elevator; if (e && e->type->ops.mq.requeue_request) e->type->ops.mq.requeue_request(rq); } static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) { struct elevator_queue *e = hctx->queue->elevator; if (e && e->type->ops.mq.has_work) return e->type->ops.mq.has_work(hctx); return false; } |
bd166ef18 blk-mq-sched: add... |
83 84 85 86 87 88 |
static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) { return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); } #endif |