Blame view

block/blk-mq.h 9.33 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  /* SPDX-License-Identifier: GPL-2.0 */
320ae51fe   Jens Axboe   blk-mq: new multi...
2
3
  #ifndef INT_BLK_MQ_H
  #define INT_BLK_MQ_H
cf43e6be8   Jens Axboe   block: add scalab...
4
  #include "blk-stat.h"
244c65a3c   Ming Lei   blk-mq: move blk_...
5
  #include "blk-mq-tag.h"
cf43e6be8   Jens Axboe   block: add scalab...
6

24d2f9030   Christoph Hellwig   blk-mq: split out...
7
  struct blk_mq_tag_set;
1db4909e7   Ming Lei   blk-mq: not embed...
8
9
10
11
  struct blk_mq_ctxs {
  	struct kobject kobj;
  	struct blk_mq_ctx __percpu	*queue_ctx;
  };
fe644072d   Linus Walleij   block: mq: Add so...
12
13
14
  /**
   * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
   */
320ae51fe   Jens Axboe   blk-mq: new multi...
15
16
17
  struct blk_mq_ctx {
  	struct {
  		spinlock_t		lock;
c16d6b5a9   Ming Lei   blk-mq: fix dispa...
18
19
  		struct list_head	rq_lists[HCTX_MAX_TYPES];
  	} ____cacheline_aligned_in_smp;
320ae51fe   Jens Axboe   blk-mq: new multi...
20
21
  
  	unsigned int		cpu;
f31967f0e   Jens Axboe   blk-mq: allow sof...
22
  	unsigned short		index_hw[HCTX_MAX_TYPES];
8ccdf4a37   Jianchao Wang   blk-mq: save queu...
23
  	struct blk_mq_hw_ctx 	*hctxs[HCTX_MAX_TYPES];
320ae51fe   Jens Axboe   blk-mq: new multi...
24
25
26
27
28
29
30
31
32
  
  	/* incremented at dispatch time */
  	unsigned long		rq_dispatched[2];
  	unsigned long		rq_merged;
  
  	/* incremented at completion time */
  	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
  
  	struct request_queue	*queue;
1db4909e7   Ming Lei   blk-mq: not embed...
33
  	struct blk_mq_ctxs      *ctxs;
320ae51fe   Jens Axboe   blk-mq: new multi...
34
  	struct kobject		kobj;
4bb659b15   Jens Axboe   blk-mq: implement...
35
  } ____cacheline_aligned_in_smp;
320ae51fe   Jens Axboe   blk-mq: new multi...
36

c7e2d94b3   Ming Lei   blk-mq: free hw q...
37
  void blk_mq_exit_queue(struct request_queue *q);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
38
  int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
aed3ea94b   Jens Axboe   block: wake up wa...
39
  void blk_mq_wake_waiters(struct request_queue *q);
1fd40b5ea   Ming Lei   blk-mq: pass obta...
40
41
  bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
  			     unsigned int);
e6c987120   Bart Van Assche   block: Unexport b...
42
43
  void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
  				bool kick_requeue_list);
2c3ad6679   Jens Axboe   blk-mq: export so...
44
  void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
b347689ff   Ming Lei   blk-mq-sched: imp...
45
46
  struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
  					struct blk_mq_ctx *start);
2c3ad6679   Jens Axboe   blk-mq: export so...
47
48
49
50
  
  /*
   * Internal helpers for allocating/freeing the request map
   */
cc71a6f43   Jens Axboe   blk-mq: abstract ...
51
52
  void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
  		     unsigned int hctx_idx);
1c0706a70   John Garry   blk-mq: Pass flag...
53
  void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
cc71a6f43   Jens Axboe   blk-mq: abstract ...
54
55
56
  struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
  					unsigned int hctx_idx,
  					unsigned int nr_tags,
1c0706a70   John Garry   blk-mq: Pass flag...
57
58
  					unsigned int reserved_tags,
  					unsigned int flags);
cc71a6f43   Jens Axboe   blk-mq: abstract ...
59
60
  int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
  		     unsigned int hctx_idx, unsigned int depth);
2c3ad6679   Jens Axboe   blk-mq: export so...
61
62
63
64
65
66
  
  /*
   * Internal helpers for request insertion into sw queues
   */
  void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
  				bool at_head);
01e99aeca   Ming Lei   blk-mq: insert pa...
67
68
  void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
  				  bool run_queue);
bd166ef18   Jens Axboe   blk-mq-sched: add...
69
70
  void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
  				struct list_head *list);
320ae51fe   Jens Axboe   blk-mq: new multi...
71

fd9c40f64   Bart Van Assche   block: Revert v5....
72
73
  /* Used by blk_insert_cloned_request() to issue request directly */
  blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
6ce3dd6ee   Ming Lei   blk-mq: issue dir...
74
75
  void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
  				    struct list_head *list);
396eaf21e   Ming Lei   blk-mq: improve D...
76

320ae51fe   Jens Axboe   blk-mq: new multi...
77
78
79
  /*
   * CPU -> queue mappings
   */
ed76e329d   Jens Axboe   blk-mq: abstract ...
80
  extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
320ae51fe   Jens Axboe   blk-mq: new multi...
81

b3c661b15   Jens Axboe   blk-mq: support m...
82
83
84
  /*
   * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
   * @q: request queue
e20ba6e1d   Christoph Hellwig   block: move queue...
85
   * @type: the hctx type index
b3c661b15   Jens Axboe   blk-mq: support m...
86
87
88
   * @cpu: CPU
   */
  static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
e20ba6e1d   Christoph Hellwig   block: move queue...
89
  							  enum hctx_type type,
b3c661b15   Jens Axboe   blk-mq: support m...
90
  							  unsigned int cpu)
7d7e0f90b   Christoph Hellwig   blk-mq: remove ->...
91
  {
e20ba6e1d   Christoph Hellwig   block: move queue...
92
  	return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
7d7e0f90b   Christoph Hellwig   blk-mq: remove ->...
93
  }
b3c661b15   Jens Axboe   blk-mq: support m...
94
95
96
97
  /*
   * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
   * @q: request queue
   * @flags: request command flags
8ccdf4a37   Jianchao Wang   blk-mq: save queu...
98
   * @cpu: cpu ctx
b3c661b15   Jens Axboe   blk-mq: support m...
99
100
101
   */
  static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
  						     unsigned int flags,
8ccdf4a37   Jianchao Wang   blk-mq: save queu...
102
  						     struct blk_mq_ctx *ctx)
ff2c56609   Jens Axboe   blk-mq: provide d...
103
  {
e20ba6e1d   Christoph Hellwig   block: move queue...
104
  	enum hctx_type type = HCTX_TYPE_DEFAULT;
bb94aea14   Jianchao Wang   blk-mq: save defa...
105
106
107
108
  	/*
  	 * The caller ensure that if REQ_HIPRI, poll must be enabled.
  	 */
  	if (flags & REQ_HIPRI)
e20ba6e1d   Christoph Hellwig   block: move queue...
109
  		type = HCTX_TYPE_POLL;
bb94aea14   Jianchao Wang   blk-mq: save defa...
110
  	else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
e20ba6e1d   Christoph Hellwig   block: move queue...
111
  		type = HCTX_TYPE_READ;
5aceaeb26   Christoph Hellwig   blk-mq: only disp...
112
  	
8ccdf4a37   Jianchao Wang   blk-mq: save queu...
113
  	return ctx->hctxs[type];
ff2c56609   Jens Axboe   blk-mq: provide d...
114
  }
e93ecf602   Jens Axboe   blk-mq: move the ...
115
  /*
67aec14ce   Jens Axboe   blk-mq: make the ...
116
117
   * sysfs helpers
   */
737f98cfe   Ming Lei   blk-mq: initializ...
118
  extern void blk_mq_sysfs_init(struct request_queue *q);
7ea5fe31c   Ming Lei   blk-mq: make life...
119
  extern void blk_mq_sysfs_deinit(struct request_queue *q);
2d0364c8c   Bart Van Assche   blk-mq: Register ...
120
  extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
67aec14ce   Jens Axboe   blk-mq: make the ...
121
122
  extern int blk_mq_sysfs_register(struct request_queue *q);
  extern void blk_mq_sysfs_unregister(struct request_queue *q);
868f2f0b7   Keith Busch   blk-mq: dynamic h...
123
  extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
67aec14ce   Jens Axboe   blk-mq: make the ...
124

e09aae7ed   Ming Lei   blk-mq: release m...
125
  void blk_mq_release(struct request_queue *q);
1aecfe488   Ming Lei   blk-mq: move blk_...
126
127
128
129
130
131
132
133
134
135
136
137
138
139
  static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
  					   unsigned int cpu)
  {
  	return per_cpu_ptr(q->queue_ctx, cpu);
  }
  
  /*
   * This assumes per-cpu software queueing queues. They could be per-node
   * as well, for instance. For now this is hardcoded as-is. Note that we don't
   * care about preemption, since we know the ctx's are persistent. This does
   * mean that we can't rely on ctx always matching the currently running CPU.
   */
  static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
  {
c05f42206   Bart Van Assche   blk-mq: remove bl...
140
  	return __blk_mq_get_ctx(q, raw_smp_processor_id());
1aecfe488   Ming Lei   blk-mq: move blk_...
141
  }
cb96a42cc   Ming Lei   blk-mq: fix sched...
142
143
144
  struct blk_mq_alloc_data {
  	/* input parameter */
  	struct request_queue *q;
9a95e4ef7   Bart Van Assche   block, nvme: Intr...
145
  	blk_mq_req_flags_t flags;
229a92873   Omar Sandoval   blk-mq: add shall...
146
  	unsigned int shallow_depth;
f9afca4d3   Jens Axboe   blk-mq: pass in r...
147
  	unsigned int cmd_flags;
cb96a42cc   Ming Lei   blk-mq: fix sched...
148
149
150
151
152
  
  	/* input & output parameter */
  	struct blk_mq_ctx *ctx;
  	struct blk_mq_hw_ctx *hctx;
  };
32bc15afe   John Garry   blk-mq: Facilitat...
153
154
155
156
  static inline bool blk_mq_is_sbitmap_shared(unsigned int flags)
  {
  	return flags & BLK_MQ_F_TAG_HCTX_SHARED;
  }
4941115be   Jens Axboe   blk-mq-tag: clean...
157
158
  static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
  {
42fdc5e49   Christoph Hellwig   blk-mq: remove th...
159
  	if (data->q->elevator)
bd166ef18   Jens Axboe   blk-mq-sched: add...
160
  		return data->hctx->sched_tags;
4941115be   Jens Axboe   blk-mq-tag: clean...
161
162
  	return data->hctx->tags;
  }
5d1b25c1e   Bart Van Assche   blk-mq: Introduce...
163
164
165
166
  static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
  {
  	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
  }
19c66e59c   Ming Lei   blk-mq: prevent u...
167
168
169
170
  static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
  {
  	return hctx->nr_ctx && hctx->tags;
  }
e016b7820   Mikulas Patocka   block: return jus...
171
  unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part);
bf0ddaba6   Omar Sandoval   blk-mq: fix sysfs...
172
173
  void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
  			 unsigned int inflight[2]);
f299b7c7a   Jens Axboe   blk-mq: provide i...
174

65c763694   Ming Lei   blk-mq: pass requ...
175
  static inline void blk_mq_put_dispatch_budget(struct request_queue *q)
de1482974   Ming Lei   blk-mq: introduce...
176
  {
de1482974   Ming Lei   blk-mq: introduce...
177
  	if (q->mq_ops->put_budget)
65c763694   Ming Lei   blk-mq: pass requ...
178
  		q->mq_ops->put_budget(q);
de1482974   Ming Lei   blk-mq: introduce...
179
  }
65c763694   Ming Lei   blk-mq: pass requ...
180
  static inline bool blk_mq_get_dispatch_budget(struct request_queue *q)
de1482974   Ming Lei   blk-mq: introduce...
181
  {
de1482974   Ming Lei   blk-mq: introduce...
182
  	if (q->mq_ops->get_budget)
65c763694   Ming Lei   blk-mq: pass requ...
183
  		return q->mq_ops->get_budget(q);
88022d720   Ming Lei   blk-mq: don't han...
184
  	return true;
de1482974   Ming Lei   blk-mq: introduce...
185
  }
bccf5e26d   John Garry   blk-mq: Record nr...
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
  static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
  {
  	if (blk_mq_is_sbitmap_shared(hctx->flags))
  		atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap);
  	else
  		atomic_inc(&hctx->nr_active);
  }
  
  static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
  {
  	if (blk_mq_is_sbitmap_shared(hctx->flags))
  		atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap);
  	else
  		atomic_dec(&hctx->nr_active);
  }
  
  static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
  {
  	if (blk_mq_is_sbitmap_shared(hctx->flags))
  		return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap);
  	return atomic_read(&hctx->nr_active);
  }
4e2f62e56   Jens Axboe   Revert "blk-mq: p...
208
209
210
211
212
213
214
215
  static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
  					   struct request *rq)
  {
  	blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
  	rq->tag = BLK_MQ_NO_TAG;
  
  	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
  		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
bccf5e26d   John Garry   blk-mq: Record nr...
216
  		__blk_mq_dec_active_requests(hctx);
4e2f62e56   Jens Axboe   Revert "blk-mq: p...
217
218
219
220
221
222
223
224
225
226
  	}
  }
  
  static inline void blk_mq_put_driver_tag(struct request *rq)
  {
  	if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
  		return;
  
  	__blk_mq_put_driver_tag(rq->mq_hctx, rq);
  }
ed76e329d   Jens Axboe   blk-mq: abstract ...
227
  static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
0da73d00c   Minwoo Im   blk-mq: code clea...
228
229
230
231
  {
  	int cpu;
  
  	for_each_possible_cpu(cpu)
ed76e329d   Jens Axboe   blk-mq: abstract ...
232
  		qmap->mq_map[cpu] = 0;
0da73d00c   Minwoo Im   blk-mq: code clea...
233
  }
b49773e7b   Damien Le Moal   block: Disable wr...
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
  /*
   * blk_mq_plug() - Get caller context plug
   * @q: request queue
   * @bio : the bio being submitted by the caller context
   *
   * Plugging, by design, may delay the insertion of BIOs into the elevator in
   * order to increase BIO merging opportunities. This however can cause BIO
   * insertion order to change from the order in which submit_bio() is being
   * executed in the case of multiple contexts concurrently issuing BIOs to a
   * device, even if these context are synchronized to tightly control BIO issuing
   * order. While this is not a problem with regular block devices, this ordering
   * change can cause write BIO failures with zoned block devices as these
   * require sequential write patterns to zones. Prevent this from happening by
   * ignoring the plug state of a BIO issuing context if the target request queue
   * is for a zoned block device and the BIO to plug is a write operation.
   *
   * Return current->plug if the bio can be plugged and NULL otherwise
   */
  static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
  					   struct bio *bio)
  {
  	/*
  	 * For regular block devices or read operations, use the context plug
  	 * which may be NULL if blk_start_plug() was not executed.
  	 */
  	if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
  		return current->plug;
  
  	/* Zoned block device write operation case: do not plug the BIO */
  	return NULL;
  }
a0235d230   John Garry   blk-mq: Relocate ...
265
266
267
268
269
270
271
272
273
274
275
  /*
   * For shared tag users, we track the number of currently active users
   * and attempt to provide a fair share of the tag depth for each of them.
   */
  static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
  				  struct sbitmap_queue *bt)
  {
  	unsigned int depth, users;
  
  	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
  		return true;
a0235d230   John Garry   blk-mq: Relocate ...
276
277
278
279
280
281
  
  	/*
  	 * Don't try dividing an ant
  	 */
  	if (bt->sb.depth == 1)
  		return true;
f1b49fdc1   John Garry   blk-mq: Record ac...
282
283
284
285
286
287
288
289
290
291
292
293
  	if (blk_mq_is_sbitmap_shared(hctx->flags)) {
  		struct request_queue *q = hctx->queue;
  		struct blk_mq_tag_set *set = q->tag_set;
  
  		if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &q->queue_flags))
  			return true;
  		users = atomic_read(&set->active_queues_shared_sbitmap);
  	} else {
  		if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  			return true;
  		users = atomic_read(&hctx->tags->active_queues);
  	}
a0235d230   John Garry   blk-mq: Relocate ...
294
295
296
297
298
299
300
  	if (!users)
  		return true;
  
  	/*
  	 * Allow at least some tags
  	 */
  	depth = max((bt->sb.depth + users - 1) / users, 4U);
bccf5e26d   John Garry   blk-mq: Record nr...
301
  	return __blk_mq_active_requests(hctx) < depth;
a0235d230   John Garry   blk-mq: Relocate ...
302
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
303
  #endif