Blame view

block/blk-mq.h 5.8 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  /* SPDX-License-Identifier: GPL-2.0 */
320ae51fe   Jens Axboe   blk-mq: new multi...
2
3
  #ifndef INT_BLK_MQ_H
  #define INT_BLK_MQ_H
cf43e6be8   Jens Axboe   block: add scalab...
4
  #include "blk-stat.h"
244c65a3c   Ming Lei   blk-mq: move blk_...
5
  #include "blk-mq-tag.h"
cf43e6be8   Jens Axboe   block: add scalab...
6

24d2f9030   Christoph Hellwig   blk-mq: split out...
7
  struct blk_mq_tag_set;
fe644072d   Linus Walleij   block: mq: Add so...
8
9
10
  /**
   * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
   */
320ae51fe   Jens Axboe   blk-mq: new multi...
11
12
13
14
15
16
17
18
  struct blk_mq_ctx {
  	struct {
  		spinlock_t		lock;
  		struct list_head	rq_list;
  	}  ____cacheline_aligned_in_smp;
  
  	unsigned int		cpu;
  	unsigned int		index_hw;
320ae51fe   Jens Axboe   blk-mq: new multi...
19
20
21
22
23
24
25
26
27
28
  
  	/* incremented at dispatch time */
  	unsigned long		rq_dispatched[2];
  	unsigned long		rq_merged;
  
  	/* incremented at completion time */
  	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
  
  	struct request_queue	*queue;
  	struct kobject		kobj;
4bb659b15   Jens Axboe   blk-mq: implement...
29
  } ____cacheline_aligned_in_smp;
320ae51fe   Jens Axboe   blk-mq: new multi...
30

780db2071   Tejun Heo   blk-mq: decouble ...
31
  void blk_mq_freeze_queue(struct request_queue *q);
3edcc0ce8   Ming Lei   block: blk-mq: do...
32
  void blk_mq_free_queue(struct request_queue *q);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
33
  int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
aed3ea94b   Jens Axboe   block: wake up wa...
34
  void blk_mq_wake_waiters(struct request_queue *q);
de1482974   Ming Lei   blk-mq: introduce...
35
  bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
2c3ad6679   Jens Axboe   blk-mq: export so...
36
  void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
8ab6bb9ee   Ming Lei   blk-mq: cleanup b...
37
  bool blk_mq_get_driver_tag(struct request *rq);
b347689ff   Ming Lei   blk-mq-sched: imp...
38
39
  struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
  					struct blk_mq_ctx *start);
2c3ad6679   Jens Axboe   blk-mq: export so...
40
41
42
43
  
  /*
   * Internal helpers for allocating/freeing the request map
   */
cc71a6f43   Jens Axboe   blk-mq: abstract ...
44
45
46
47
48
49
50
51
52
  void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
  		     unsigned int hctx_idx);
  void blk_mq_free_rq_map(struct blk_mq_tags *tags);
  struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
  					unsigned int hctx_idx,
  					unsigned int nr_tags,
  					unsigned int reserved_tags);
  int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
  		     unsigned int hctx_idx, unsigned int depth);
2c3ad6679   Jens Axboe   blk-mq: export so...
53
54
55
56
57
58
  
  /*
   * Internal helpers for request insertion into sw queues
   */
  void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
  				bool at_head);
b0850297c   Ming Lei   block: pass 'run_...
59
  void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
bd166ef18   Jens Axboe   blk-mq-sched: add...
60
61
  void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
  				struct list_head *list);
320ae51fe   Jens Axboe   blk-mq: new multi...
62

396eaf21e   Ming Lei   blk-mq: improve D...
63
  /* Used by blk_insert_cloned_request() to issue request directly */
c77ff7fd0   Bart Van Assche   blk-mq: Rename bl...
64
  blk_status_t blk_mq_request_issue_directly(struct request *rq);
6ce3dd6ee   Ming Lei   blk-mq: issue dir...
65
66
  void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
  				    struct list_head *list);
396eaf21e   Ming Lei   blk-mq: improve D...
67

320ae51fe   Jens Axboe   blk-mq: new multi...
68
69
70
  /*
   * CPU -> queue mappings
   */
f14bbe77a   Jens Axboe   blk-mq: pass in s...
71
  extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
320ae51fe   Jens Axboe   blk-mq: new multi...
72

7d7e0f90b   Christoph Hellwig   blk-mq: remove ->...
73
74
75
76
77
  static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
  		int cpu)
  {
  	return q->queue_hw_ctx[q->mq_map[cpu]];
  }
e93ecf602   Jens Axboe   blk-mq: move the ...
78
  /*
67aec14ce   Jens Axboe   blk-mq: make the ...
79
80
   * sysfs helpers
   */
737f98cfe   Ming Lei   blk-mq: initializ...
81
  extern void blk_mq_sysfs_init(struct request_queue *q);
7ea5fe31c   Ming Lei   blk-mq: make life...
82
  extern void blk_mq_sysfs_deinit(struct request_queue *q);
2d0364c8c   Bart Van Assche   blk-mq: Register ...
83
  extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
67aec14ce   Jens Axboe   blk-mq: make the ...
84
85
  extern int blk_mq_sysfs_register(struct request_queue *q);
  extern void blk_mq_sysfs_unregister(struct request_queue *q);
868f2f0b7   Keith Busch   blk-mq: dynamic h...
86
  extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
67aec14ce   Jens Axboe   blk-mq: make the ...
87

e09aae7ed   Ming Lei   blk-mq: release m...
88
  void blk_mq_release(struct request_queue *q);
1d9bd5161   Tejun Heo   blk-mq: replace t...
89
90
91
92
  /**
   * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
   * @rq: target request.
   */
12f5b9314   Keith Busch   blk-mq: Remove ge...
93
  static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
1d9bd5161   Tejun Heo   blk-mq: replace t...
94
  {
12f5b9314   Keith Busch   blk-mq: Remove ge...
95
  	return READ_ONCE(rq->state);
1d9bd5161   Tejun Heo   blk-mq: replace t...
96
  }
1aecfe488   Ming Lei   blk-mq: move blk_...
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
  static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
  					   unsigned int cpu)
  {
  	return per_cpu_ptr(q->queue_ctx, cpu);
  }
  
  /*
   * This assumes per-cpu software queueing queues. They could be per-node
   * as well, for instance. For now this is hardcoded as-is. Note that we don't
   * care about preemption, since we know the ctx's are persistent. This does
   * mean that we can't rely on ctx always matching the currently running CPU.
   */
  static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
  {
  	return __blk_mq_get_ctx(q, get_cpu());
  }
  
  static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
  {
  	put_cpu();
  }
cb96a42cc   Ming Lei   blk-mq: fix sched...
118
119
120
  struct blk_mq_alloc_data {
  	/* input parameter */
  	struct request_queue *q;
9a95e4ef7   Bart Van Assche   block, nvme: Intr...
121
  	blk_mq_req_flags_t flags;
229a92873   Omar Sandoval   blk-mq: add shall...
122
  	unsigned int shallow_depth;
cb96a42cc   Ming Lei   blk-mq: fix sched...
123
124
125
126
127
  
  	/* input & output parameter */
  	struct blk_mq_ctx *ctx;
  	struct blk_mq_hw_ctx *hctx;
  };
4941115be   Jens Axboe   blk-mq-tag: clean...
128
129
  static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
  {
bd166ef18   Jens Axboe   blk-mq-sched: add...
130
131
  	if (data->flags & BLK_MQ_REQ_INTERNAL)
  		return data->hctx->sched_tags;
4941115be   Jens Axboe   blk-mq-tag: clean...
132
133
  	return data->hctx->tags;
  }
5d1b25c1e   Bart Van Assche   blk-mq: Introduce...
134
135
136
137
  static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
  {
  	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
  }
19c66e59c   Ming Lei   blk-mq: prevent u...
138
139
140
141
  static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
  {
  	return hctx->nr_ctx && hctx->tags;
  }
f299b7c7a   Jens Axboe   blk-mq: provide i...
142
  void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
bf0ddaba6   Omar Sandoval   blk-mq: fix sysfs...
143
144
145
  		      unsigned int inflight[2]);
  void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
  			 unsigned int inflight[2]);
f299b7c7a   Jens Axboe   blk-mq: provide i...
146

de1482974   Ming Lei   blk-mq: introduce...
147
148
149
150
151
152
153
  static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
  {
  	struct request_queue *q = hctx->queue;
  
  	if (q->mq_ops->put_budget)
  		q->mq_ops->put_budget(hctx);
  }
88022d720   Ming Lei   blk-mq: don't han...
154
  static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
de1482974   Ming Lei   blk-mq: introduce...
155
156
157
158
159
  {
  	struct request_queue *q = hctx->queue;
  
  	if (q->mq_ops->get_budget)
  		return q->mq_ops->get_budget(hctx);
88022d720   Ming Lei   blk-mq: don't han...
160
  	return true;
de1482974   Ming Lei   blk-mq: introduce...
161
  }
244c65a3c   Ming Lei   blk-mq: move blk_...
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
  static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
  					   struct request *rq)
  {
  	blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
  	rq->tag = -1;
  
  	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
  		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
  		atomic_dec(&hctx->nr_active);
  	}
  }
  
  static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
  				       struct request *rq)
  {
  	if (rq->tag == -1 || rq->internal_tag == -1)
  		return;
  
  	__blk_mq_put_driver_tag(hctx, rq);
  }
  
  static inline void blk_mq_put_driver_tag(struct request *rq)
  {
  	struct blk_mq_hw_ctx *hctx;
  
  	if (rq->tag == -1 || rq->internal_tag == -1)
  		return;
  
  	hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
  	__blk_mq_put_driver_tag(hctx, rq);
  }
0da73d00c   Minwoo Im   blk-mq: code clea...
193
194
195
196
197
198
199
  static inline void blk_mq_clear_mq_map(struct blk_mq_tag_set *set)
  {
  	int cpu;
  
  	for_each_possible_cpu(cpu)
  		set->mq_map[cpu] = 0;
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
200
  #endif