Blame view

block/blk-mq.h 10.1 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  /* SPDX-License-Identifier: GPL-2.0 */
320ae51fe   Jens Axboe   blk-mq: new multi...
2
3
  #ifndef INT_BLK_MQ_H
  #define INT_BLK_MQ_H
cf43e6be8   Jens Axboe   block: add scalab...
4
  #include "blk-stat.h"
244c65a3c   Ming Lei   blk-mq: move blk_...
5
  #include "blk-mq-tag.h"
cf43e6be8   Jens Axboe   block: add scalab...
6

24d2f9030   Christoph Hellwig   blk-mq: split out...
7
  struct blk_mq_tag_set;
1db4909e7   Ming Lei   blk-mq: not embed...
8
9
10
11
  struct blk_mq_ctxs {
  	struct kobject kobj;
  	struct blk_mq_ctx __percpu	*queue_ctx;
  };
fe644072d   Linus Walleij   block: mq: Add so...
12
13
14
  /**
   * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
   */
320ae51fe   Jens Axboe   blk-mq: new multi...
15
16
17
  struct blk_mq_ctx {
  	struct {
  		spinlock_t		lock;
c16d6b5a9   Ming Lei   blk-mq: fix dispa...
18
19
  		struct list_head	rq_lists[HCTX_MAX_TYPES];
  	} ____cacheline_aligned_in_smp;
320ae51fe   Jens Axboe   blk-mq: new multi...
20
21
  
  	unsigned int		cpu;
f31967f0e   Jens Axboe   blk-mq: allow sof...
22
  	unsigned short		index_hw[HCTX_MAX_TYPES];
8ccdf4a37   Jianchao Wang   blk-mq: save queu...
23
  	struct blk_mq_hw_ctx 	*hctxs[HCTX_MAX_TYPES];
320ae51fe   Jens Axboe   blk-mq: new multi...
24
25
26
27
28
29
30
31
32
  
  	/* incremented at dispatch time */
  	unsigned long		rq_dispatched[2];
  	unsigned long		rq_merged;
  
  	/* incremented at completion time */
  	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
  
  	struct request_queue	*queue;
1db4909e7   Ming Lei   blk-mq: not embed...
33
  	struct blk_mq_ctxs      *ctxs;
320ae51fe   Jens Axboe   blk-mq: new multi...
34
  	struct kobject		kobj;
4bb659b15   Jens Axboe   blk-mq: implement...
35
  } ____cacheline_aligned_in_smp;
320ae51fe   Jens Axboe   blk-mq: new multi...
36

c7e2d94b3   Ming Lei   blk-mq: free hw q...
37
  void blk_mq_exit_queue(struct request_queue *q);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
38
  int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
aed3ea94b   Jens Axboe   block: wake up wa...
39
  void blk_mq_wake_waiters(struct request_queue *q);
1fd40b5ea   Ming Lei   blk-mq: pass obta...
40
41
  bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
  			     unsigned int);
e6c987120   Bart Van Assche   block: Unexport b...
42
43
  void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
  				bool kick_requeue_list);
2c3ad6679   Jens Axboe   blk-mq: export so...
44
  void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
b347689ff   Ming Lei   blk-mq-sched: imp...
45
46
  struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
  					struct blk_mq_ctx *start);
2e315dc07   Ming Lei   blk-mq: grab rq->...
47
  void blk_mq_put_rq_ref(struct request *rq);
2c3ad6679   Jens Axboe   blk-mq: export so...
48
49
50
51
  
  /*
   * Internal helpers for allocating/freeing the request map
   */
cc71a6f43   Jens Axboe   blk-mq: abstract ...
52
53
  void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
  		     unsigned int hctx_idx);
1c0706a70   John Garry   blk-mq: Pass flag...
54
  void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
cc71a6f43   Jens Axboe   blk-mq: abstract ...
55
56
57
  struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
  					unsigned int hctx_idx,
  					unsigned int nr_tags,
1c0706a70   John Garry   blk-mq: Pass flag...
58
59
  					unsigned int reserved_tags,
  					unsigned int flags);
cc71a6f43   Jens Axboe   blk-mq: abstract ...
60
61
  int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
  		     unsigned int hctx_idx, unsigned int depth);
2c3ad6679   Jens Axboe   blk-mq: export so...
62
63
64
65
66
67
  
  /*
   * Internal helpers for request insertion into sw queues
   */
  void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
  				bool at_head);
01e99aeca   Ming Lei   blk-mq: insert pa...
68
69
  void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
  				  bool run_queue);
bd166ef18   Jens Axboe   blk-mq-sched: add...
70
71
  void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
  				struct list_head *list);
320ae51fe   Jens Axboe   blk-mq: new multi...
72

fd9c40f64   Bart Van Assche   block: Revert v5....
73
74
  /* Used by blk_insert_cloned_request() to issue request directly */
  blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
6ce3dd6ee   Ming Lei   blk-mq: issue dir...
75
76
  void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
  				    struct list_head *list);
396eaf21e   Ming Lei   blk-mq: improve D...
77

320ae51fe   Jens Axboe   blk-mq: new multi...
78
79
80
  /*
   * CPU -> queue mappings
   */
ed76e329d   Jens Axboe   blk-mq: abstract ...
81
  extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
320ae51fe   Jens Axboe   blk-mq: new multi...
82

b3c661b15   Jens Axboe   blk-mq: support m...
83
84
85
  /*
   * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
   * @q: request queue
e20ba6e1d   Christoph Hellwig   block: move queue...
86
   * @type: the hctx type index
b3c661b15   Jens Axboe   blk-mq: support m...
87
88
89
   * @cpu: CPU
   */
  static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
e20ba6e1d   Christoph Hellwig   block: move queue...
90
  							  enum hctx_type type,
b3c661b15   Jens Axboe   blk-mq: support m...
91
  							  unsigned int cpu)
7d7e0f90b   Christoph Hellwig   blk-mq: remove ->...
92
  {
e20ba6e1d   Christoph Hellwig   block: move queue...
93
  	return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
7d7e0f90b   Christoph Hellwig   blk-mq: remove ->...
94
  }
b3c661b15   Jens Axboe   blk-mq: support m...
95
96
97
98
  /*
   * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
   * @q: request queue
   * @flags: request command flags
d220a2141   Minwoo Im   blk-mq: update ar...
99
   * @ctx: software queue cpu ctx
b3c661b15   Jens Axboe   blk-mq: support m...
100
101
102
   */
  static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
  						     unsigned int flags,
8ccdf4a37   Jianchao Wang   blk-mq: save queu...
103
  						     struct blk_mq_ctx *ctx)
ff2c56609   Jens Axboe   blk-mq: provide d...
104
  {
e20ba6e1d   Christoph Hellwig   block: move queue...
105
  	enum hctx_type type = HCTX_TYPE_DEFAULT;
bb94aea14   Jianchao Wang   blk-mq: save defa...
106
107
108
109
  	/*
  	 * The caller ensure that if REQ_HIPRI, poll must be enabled.
  	 */
  	if (flags & REQ_HIPRI)
e20ba6e1d   Christoph Hellwig   block: move queue...
110
  		type = HCTX_TYPE_POLL;
bb94aea14   Jianchao Wang   blk-mq: save defa...
111
  	else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
e20ba6e1d   Christoph Hellwig   block: move queue...
112
  		type = HCTX_TYPE_READ;
5aceaeb26   Christoph Hellwig   blk-mq: only disp...
113
  	
8ccdf4a37   Jianchao Wang   blk-mq: save queu...
114
  	return ctx->hctxs[type];
ff2c56609   Jens Axboe   blk-mq: provide d...
115
  }
e93ecf602   Jens Axboe   blk-mq: move the ...
116
  /*
67aec14ce   Jens Axboe   blk-mq: make the ...
117
118
   * sysfs helpers
   */
737f98cfe   Ming Lei   blk-mq: initializ...
119
  extern void blk_mq_sysfs_init(struct request_queue *q);
7ea5fe31c   Ming Lei   blk-mq: make life...
120
  extern void blk_mq_sysfs_deinit(struct request_queue *q);
2d0364c8c   Bart Van Assche   blk-mq: Register ...
121
  extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
67aec14ce   Jens Axboe   blk-mq: make the ...
122
123
  extern int blk_mq_sysfs_register(struct request_queue *q);
  extern void blk_mq_sysfs_unregister(struct request_queue *q);
868f2f0b7   Keith Busch   blk-mq: dynamic h...
124
  extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
67aec14ce   Jens Axboe   blk-mq: make the ...
125

e03513f58   Ming Lei   blk-mq: cancel bl...
126
  void blk_mq_cancel_work_sync(struct request_queue *q);
e09aae7ed   Ming Lei   blk-mq: release m...
127
  void blk_mq_release(struct request_queue *q);
1aecfe488   Ming Lei   blk-mq: move blk_...
128
129
130
131
132
133
134
135
136
137
138
139
140
141
  static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
  					   unsigned int cpu)
  {
  	return per_cpu_ptr(q->queue_ctx, cpu);
  }
  
  /*
   * This assumes per-cpu software queueing queues. They could be per-node
   * as well, for instance. For now this is hardcoded as-is. Note that we don't
   * care about preemption, since we know the ctx's are persistent. This does
   * mean that we can't rely on ctx always matching the currently running CPU.
   */
  static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
  {
c05f42206   Bart Van Assche   blk-mq: remove bl...
142
  	return __blk_mq_get_ctx(q, raw_smp_processor_id());
1aecfe488   Ming Lei   blk-mq: move blk_...
143
  }
cb96a42cc   Ming Lei   blk-mq: fix sched...
144
145
146
  struct blk_mq_alloc_data {
  	/* input parameter */
  	struct request_queue *q;
9a95e4ef7   Bart Van Assche   block, nvme: Intr...
147
  	blk_mq_req_flags_t flags;
229a92873   Omar Sandoval   blk-mq: add shall...
148
  	unsigned int shallow_depth;
f9afca4d3   Jens Axboe   blk-mq: pass in r...
149
  	unsigned int cmd_flags;
cb96a42cc   Ming Lei   blk-mq: fix sched...
150
151
152
153
154
  
  	/* input & output parameter */
  	struct blk_mq_ctx *ctx;
  	struct blk_mq_hw_ctx *hctx;
  };
32bc15afe   John Garry   blk-mq: Facilitat...
155
156
157
158
  static inline bool blk_mq_is_sbitmap_shared(unsigned int flags)
  {
  	return flags & BLK_MQ_F_TAG_HCTX_SHARED;
  }
4941115be   Jens Axboe   blk-mq-tag: clean...
159
160
  static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
  {
42fdc5e49   Christoph Hellwig   blk-mq: remove th...
161
  	if (data->q->elevator)
bd166ef18   Jens Axboe   blk-mq-sched: add...
162
  		return data->hctx->sched_tags;
4941115be   Jens Axboe   blk-mq-tag: clean...
163
164
  	return data->hctx->tags;
  }
5d1b25c1e   Bart Van Assche   blk-mq: Introduce...
165
166
167
168
  static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
  {
  	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
  }
19c66e59c   Ming Lei   blk-mq: prevent u...
169
170
171
172
  static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
  {
  	return hctx->nr_ctx && hctx->tags;
  }
8446fe925   Christoph Hellwig   block: switch par...
173
174
175
176
  unsigned int blk_mq_in_flight(struct request_queue *q,
  		struct block_device *part);
  void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
  		unsigned int inflight[2]);
f299b7c7a   Jens Axboe   blk-mq: provide i...
177

2a5a24aa8   Ming Lei   scsi: blk-mq: Ret...
178
179
  static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
  					      int budget_token)
de1482974   Ming Lei   blk-mq: introduce...
180
  {
de1482974   Ming Lei   blk-mq: introduce...
181
  	if (q->mq_ops->put_budget)
2a5a24aa8   Ming Lei   scsi: blk-mq: Ret...
182
  		q->mq_ops->put_budget(q, budget_token);
de1482974   Ming Lei   blk-mq: introduce...
183
  }
2a5a24aa8   Ming Lei   scsi: blk-mq: Ret...
184
  static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
de1482974   Ming Lei   blk-mq: introduce...
185
  {
de1482974   Ming Lei   blk-mq: introduce...
186
  	if (q->mq_ops->get_budget)
65c763694   Ming Lei   blk-mq: pass requ...
187
  		return q->mq_ops->get_budget(q);
2a5a24aa8   Ming Lei   scsi: blk-mq: Ret...
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
  	return 0;
  }
  
  static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
  {
  	if (token < 0)
  		return;
  
  	if (rq->q->mq_ops->set_rq_budget_token)
  		rq->q->mq_ops->set_rq_budget_token(rq, token);
  }
  
  static inline int blk_mq_get_rq_budget_token(struct request *rq)
  {
  	if (rq->q->mq_ops->get_rq_budget_token)
  		return rq->q->mq_ops->get_rq_budget_token(rq);
  	return -1;
de1482974   Ming Lei   blk-mq: introduce...
205
  }
bccf5e26d   John Garry   blk-mq: Record nr...
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
  static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
  {
  	if (blk_mq_is_sbitmap_shared(hctx->flags))
  		atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap);
  	else
  		atomic_inc(&hctx->nr_active);
  }
  
  static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
  {
  	if (blk_mq_is_sbitmap_shared(hctx->flags))
  		atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap);
  	else
  		atomic_dec(&hctx->nr_active);
  }
  
  static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
  {
  	if (blk_mq_is_sbitmap_shared(hctx->flags))
  		return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap);
  	return atomic_read(&hctx->nr_active);
  }
4e2f62e56   Jens Axboe   Revert "blk-mq: p...
228
229
230
231
232
233
234
235
  static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
  					   struct request *rq)
  {
  	blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
  	rq->tag = BLK_MQ_NO_TAG;
  
  	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
  		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
bccf5e26d   John Garry   blk-mq: Record nr...
236
  		__blk_mq_dec_active_requests(hctx);
4e2f62e56   Jens Axboe   Revert "blk-mq: p...
237
238
239
240
241
242
243
244
245
246
  	}
  }
  
  static inline void blk_mq_put_driver_tag(struct request *rq)
  {
  	if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
  		return;
  
  	__blk_mq_put_driver_tag(rq->mq_hctx, rq);
  }
613471549   Jan Kara   block: Do not pul...
247
  bool blk_mq_get_driver_tag(struct request *rq);
ed76e329d   Jens Axboe   blk-mq: abstract ...
248
  static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
0da73d00c   Minwoo Im   blk-mq: code clea...
249
250
251
252
  {
  	int cpu;
  
  	for_each_possible_cpu(cpu)
ed76e329d   Jens Axboe   blk-mq: abstract ...
253
  		qmap->mq_map[cpu] = 0;
0da73d00c   Minwoo Im   blk-mq: code clea...
254
  }
b49773e7b   Damien Le Moal   block: Disable wr...
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
  /*
   * blk_mq_plug() - Get caller context plug
   * @q: request queue
   * @bio : the bio being submitted by the caller context
   *
   * Plugging, by design, may delay the insertion of BIOs into the elevator in
   * order to increase BIO merging opportunities. This however can cause BIO
   * insertion order to change from the order in which submit_bio() is being
   * executed in the case of multiple contexts concurrently issuing BIOs to a
   * device, even if these context are synchronized to tightly control BIO issuing
   * order. While this is not a problem with regular block devices, this ordering
   * change can cause write BIO failures with zoned block devices as these
   * require sequential write patterns to zones. Prevent this from happening by
   * ignoring the plug state of a BIO issuing context if the target request queue
   * is for a zoned block device and the BIO to plug is a write operation.
   *
   * Return current->plug if the bio can be plugged and NULL otherwise
   */
  static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
  					   struct bio *bio)
  {
  	/*
  	 * For regular block devices or read operations, use the context plug
  	 * which may be NULL if blk_start_plug() was not executed.
  	 */
  	if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
  		return current->plug;
  
  	/* Zoned block device write operation case: do not plug the BIO */
  	return NULL;
  }
fd2ef39cc   Jan Kara   blk: Fix lock inv...
286
287
288
289
290
291
292
293
294
295
  /* Free all requests on the list */
  static inline void blk_mq_free_requests(struct list_head *list)
  {
  	while (!list_empty(list)) {
  		struct request *rq = list_entry_rq(list->next);
  
  		list_del_init(&rq->queuelist);
  		blk_mq_free_request(rq);
  	}
  }
a0235d230   John Garry   blk-mq: Relocate ...
296
297
298
299
300
301
302
303
304
305
306
  /*
   * For shared tag users, we track the number of currently active users
   * and attempt to provide a fair share of the tag depth for each of them.
   */
  static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
  				  struct sbitmap_queue *bt)
  {
  	unsigned int depth, users;
  
  	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
  		return true;
a0235d230   John Garry   blk-mq: Relocate ...
307
308
309
310
311
312
  
  	/*
  	 * Don't try dividing an ant
  	 */
  	if (bt->sb.depth == 1)
  		return true;
f1b49fdc1   John Garry   blk-mq: Record ac...
313
314
315
  	if (blk_mq_is_sbitmap_shared(hctx->flags)) {
  		struct request_queue *q = hctx->queue;
  		struct blk_mq_tag_set *set = q->tag_set;
2569063c7   Ming Lei   blk-mq: test QUEU...
316
  		if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
f1b49fdc1   John Garry   blk-mq: Record ac...
317
318
319
320
321
322
323
  			return true;
  		users = atomic_read(&set->active_queues_shared_sbitmap);
  	} else {
  		if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  			return true;
  		users = atomic_read(&hctx->tags->active_queues);
  	}
a0235d230   John Garry   blk-mq: Relocate ...
324
325
326
327
328
329
330
  	if (!users)
  		return true;
  
  	/*
  	 * Allow at least some tags
  	 */
  	depth = max((bt->sb.depth + users - 1) / users, 4U);
bccf5e26d   John Garry   blk-mq: Record nr...
331
  	return __blk_mq_active_requests(hctx) < depth;
a0235d230   John Garry   blk-mq: Relocate ...
332
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
333
  #endif