Blame view
block/blk-mq.h
3.12 KB
320ae51fe
|
1 2 |
#ifndef INT_BLK_MQ_H #define INT_BLK_MQ_H |
24d2f9030
|
3 |
struct blk_mq_tag_set; |
320ae51fe
|
4 5 6 7 8 9 10 11 |
struct blk_mq_ctx { struct { spinlock_t lock; struct list_head rq_list; } ____cacheline_aligned_in_smp; unsigned int cpu; unsigned int index_hw; |
320ae51fe
|
12 |
|
4bb659b15
|
13 |
unsigned int last_tag ____cacheline_aligned_in_smp; |
320ae51fe
|
14 15 16 17 18 19 20 21 22 |
/* incremented at dispatch time */ unsigned long rq_dispatched[2]; unsigned long rq_merged; /* incremented at completion time */ unsigned long ____cacheline_aligned_in_smp rq_completed[2]; struct request_queue *queue; struct kobject kobj; |
4bb659b15
|
23 |
} ____cacheline_aligned_in_smp; |
320ae51fe
|
24 |
|
30a91cb4e
|
25 |
void __blk_mq_complete_request(struct request *rq); |
320ae51fe
|
26 27 |
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_init_flush(struct request_queue *q); |
43a5e4e21
|
28 |
void blk_mq_drain_queue(struct request_queue *q); |
3edcc0ce8
|
29 |
void blk_mq_free_queue(struct request_queue *q); |
8727af4b9
|
30 31 |
void blk_mq_clone_flush_request(struct request *flush_rq, struct request *orig_rq); |
e3a2b3f93
|
32 |
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); |
320ae51fe
|
33 34 35 36 37 38 |
/* * CPU hotplug helpers */ struct blk_mq_cpu_notifier; void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, |
e814e71ba
|
39 |
int (*fn)(void *, unsigned long, unsigned int), |
320ae51fe
|
40 41 42 43 |
void *data); void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier); void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier); void blk_mq_cpu_init(void); |
676141e48
|
44 45 |
void blk_mq_enable_hotplug(void); void blk_mq_disable_hotplug(void); |
320ae51fe
|
46 47 48 49 |
/* * CPU -> queue mappings */ |
24d2f9030
|
50 |
extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); |
320ae51fe
|
51 |
extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); |
f14bbe77a
|
52 |
extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); |
320ae51fe
|
53 |
|
e93ecf602
|
54 |
/* |
67aec14ce
|
55 56 57 58 59 60 |
* sysfs helpers */ extern int blk_mq_sysfs_register(struct request_queue *q); extern void blk_mq_sysfs_unregister(struct request_queue *q); /* |
e93ecf602
|
61 62 63 64 65 66 67 |
* Basic implementation of sparser bitmap, allowing the user to spread * the bits over more cachelines. */ struct blk_align_bitmap { unsigned long word; unsigned long depth; } ____cacheline_aligned_in_smp; |
1aecfe488
|
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, unsigned int cpu) { return per_cpu_ptr(q->queue_ctx, cpu); } /* * This assumes per-cpu software queueing queues. They could be per-node * as well, for instance. For now this is hardcoded as-is. Note that we don't * care about preemption, since we know the ctx's are persistent. This does * mean that we can't rely on ctx always matching the currently running CPU. */ static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) { return __blk_mq_get_ctx(q, get_cpu()); } static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) { put_cpu(); } |
cb96a42cc
|
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
struct blk_mq_alloc_data { /* input parameter */ struct request_queue *q; gfp_t gfp; bool reserved; /* input & output parameter */ struct blk_mq_ctx *ctx; struct blk_mq_hw_ctx *hctx; }; static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data, struct request_queue *q, gfp_t gfp, bool reserved, struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx) { data->q = q; data->gfp = gfp; data->reserved = reserved; data->ctx = ctx; data->hctx = hctx; } |
320ae51fe
|
111 |
#endif |