Blame view

block/blk-mq.h 7.84 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  /* SPDX-License-Identifier: GPL-2.0 */
320ae51fe   Jens Axboe   blk-mq: new multi...
2
3
  #ifndef INT_BLK_MQ_H
  #define INT_BLK_MQ_H
cf43e6be8   Jens Axboe   block: add scalab...
4
  #include "blk-stat.h"
244c65a3c   Ming Lei   blk-mq: move blk_...
5
  #include "blk-mq-tag.h"
cf43e6be8   Jens Axboe   block: add scalab...
6

24d2f9030   Christoph Hellwig   blk-mq: split out...
7
  struct blk_mq_tag_set;
1db4909e7   Ming Lei   blk-mq: not embed...
8
9
10
11
  struct blk_mq_ctxs {
  	struct kobject kobj;
  	struct blk_mq_ctx __percpu	*queue_ctx;
  };
fe644072d   Linus Walleij   block: mq: Add so...
12
13
14
  /**
   * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
   */
320ae51fe   Jens Axboe   blk-mq: new multi...
15
16
17
  struct blk_mq_ctx {
  	struct {
  		spinlock_t		lock;
c16d6b5a9   Ming Lei   blk-mq: fix dispa...
18
19
  		struct list_head	rq_lists[HCTX_MAX_TYPES];
  	} ____cacheline_aligned_in_smp;
320ae51fe   Jens Axboe   blk-mq: new multi...
20
21
  
  	unsigned int		cpu;
f31967f0e   Jens Axboe   blk-mq: allow sof...
22
  	unsigned short		index_hw[HCTX_MAX_TYPES];
8ccdf4a37   Jianchao Wang   blk-mq: save queu...
23
  	struct blk_mq_hw_ctx 	*hctxs[HCTX_MAX_TYPES];
320ae51fe   Jens Axboe   blk-mq: new multi...
24
25
26
27
28
29
30
31
32
  
  	/* incremented at dispatch time */
  	unsigned long		rq_dispatched[2];
  	unsigned long		rq_merged;
  
  	/* incremented at completion time */
  	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
  
  	struct request_queue	*queue;
1db4909e7   Ming Lei   blk-mq: not embed...
33
  	struct blk_mq_ctxs      *ctxs;
320ae51fe   Jens Axboe   blk-mq: new multi...
34
  	struct kobject		kobj;
4bb659b15   Jens Axboe   blk-mq: implement...
35
  } ____cacheline_aligned_in_smp;
320ae51fe   Jens Axboe   blk-mq: new multi...
36

c7e2d94b3   Ming Lei   blk-mq: free hw q...
37
  void blk_mq_exit_queue(struct request_queue *q);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
38
  int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
aed3ea94b   Jens Axboe   block: wake up wa...
39
  void blk_mq_wake_waiters(struct request_queue *q);
de1482974   Ming Lei   blk-mq: introduce...
40
  bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
e6c987120   Bart Van Assche   block: Unexport b...
41
42
  void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
  				bool kick_requeue_list);
2c3ad6679   Jens Axboe   blk-mq: export so...
43
  void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
8ab6bb9ee   Ming Lei   blk-mq: cleanup b...
44
  bool blk_mq_get_driver_tag(struct request *rq);
b347689ff   Ming Lei   blk-mq-sched: imp...
45
46
  struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
  					struct blk_mq_ctx *start);
2c3ad6679   Jens Axboe   blk-mq: export so...
47
48
49
50
  
  /*
   * Internal helpers for allocating/freeing the request map
   */
cc71a6f43   Jens Axboe   blk-mq: abstract ...
51
52
53
54
55
56
57
58
59
  void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
  		     unsigned int hctx_idx);
  void blk_mq_free_rq_map(struct blk_mq_tags *tags);
  struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
  					unsigned int hctx_idx,
  					unsigned int nr_tags,
  					unsigned int reserved_tags);
  int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
  		     unsigned int hctx_idx, unsigned int depth);
2c3ad6679   Jens Axboe   blk-mq: export so...
60
61
62
63
64
65
  
  /*
   * Internal helpers for request insertion into sw queues
   */
  void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
  				bool at_head);
74c77d6a4   Ming Lei   blk-mq: insert pa...
66
67
  void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
  				  bool run_queue);
bd166ef18   Jens Axboe   blk-mq-sched: add...
68
69
  void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
  				struct list_head *list);
320ae51fe   Jens Axboe   blk-mq: new multi...
70

fd9c40f64   Bart Van Assche   block: Revert v5....
71
72
  /* Used by blk_insert_cloned_request() to issue request directly */
  blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
6ce3dd6ee   Ming Lei   blk-mq: issue dir...
73
74
  void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
  				    struct list_head *list);
396eaf21e   Ming Lei   blk-mq: improve D...
75

320ae51fe   Jens Axboe   blk-mq: new multi...
76
77
78
  /*
   * CPU -> queue mappings
   */
ed76e329d   Jens Axboe   blk-mq: abstract ...
79
  extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
320ae51fe   Jens Axboe   blk-mq: new multi...
80

b3c661b15   Jens Axboe   blk-mq: support m...
81
82
83
  /*
   * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
   * @q: request queue
e20ba6e1d   Christoph Hellwig   block: move queue...
84
   * @type: the hctx type index
b3c661b15   Jens Axboe   blk-mq: support m...
85
86
87
   * @cpu: CPU
   */
  static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
e20ba6e1d   Christoph Hellwig   block: move queue...
88
  							  enum hctx_type type,
b3c661b15   Jens Axboe   blk-mq: support m...
89
  							  unsigned int cpu)
7d7e0f90b   Christoph Hellwig   blk-mq: remove ->...
90
  {
e20ba6e1d   Christoph Hellwig   block: move queue...
91
  	return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
7d7e0f90b   Christoph Hellwig   blk-mq: remove ->...
92
  }
b3c661b15   Jens Axboe   blk-mq: support m...
93
94
95
96
  /*
   * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
   * @q: request queue
   * @flags: request command flags
8ccdf4a37   Jianchao Wang   blk-mq: save queu...
97
   * @cpu: cpu ctx
b3c661b15   Jens Axboe   blk-mq: support m...
98
99
100
   */
  static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
  						     unsigned int flags,
8ccdf4a37   Jianchao Wang   blk-mq: save queu...
101
  						     struct blk_mq_ctx *ctx)
ff2c56609   Jens Axboe   blk-mq: provide d...
102
  {
e20ba6e1d   Christoph Hellwig   block: move queue...
103
  	enum hctx_type type = HCTX_TYPE_DEFAULT;
bb94aea14   Jianchao Wang   blk-mq: save defa...
104
105
106
107
  	/*
  	 * The caller ensure that if REQ_HIPRI, poll must be enabled.
  	 */
  	if (flags & REQ_HIPRI)
e20ba6e1d   Christoph Hellwig   block: move queue...
108
  		type = HCTX_TYPE_POLL;
bb94aea14   Jianchao Wang   blk-mq: save defa...
109
  	else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
e20ba6e1d   Christoph Hellwig   block: move queue...
110
  		type = HCTX_TYPE_READ;
5aceaeb26   Christoph Hellwig   blk-mq: only disp...
111
  	
8ccdf4a37   Jianchao Wang   blk-mq: save queu...
112
  	return ctx->hctxs[type];
ff2c56609   Jens Axboe   blk-mq: provide d...
113
  }
e93ecf602   Jens Axboe   blk-mq: move the ...
114
  /*
67aec14ce   Jens Axboe   blk-mq: make the ...
115
116
   * sysfs helpers
   */
737f98cfe   Ming Lei   blk-mq: initializ...
117
  extern void blk_mq_sysfs_init(struct request_queue *q);
7ea5fe31c   Ming Lei   blk-mq: make life...
118
  extern void blk_mq_sysfs_deinit(struct request_queue *q);
2d0364c8c   Bart Van Assche   blk-mq: Register ...
119
  extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
67aec14ce   Jens Axboe   blk-mq: make the ...
120
121
  extern int blk_mq_sysfs_register(struct request_queue *q);
  extern void blk_mq_sysfs_unregister(struct request_queue *q);
868f2f0b7   Keith Busch   blk-mq: dynamic h...
122
  extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
67aec14ce   Jens Axboe   blk-mq: make the ...
123

e09aae7ed   Ming Lei   blk-mq: release m...
124
  void blk_mq_release(struct request_queue *q);
1d9bd5161   Tejun Heo   blk-mq: replace t...
125
126
127
128
  /**
   * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
   * @rq: target request.
   */
12f5b9314   Keith Busch   blk-mq: Remove ge...
129
  static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
1d9bd5161   Tejun Heo   blk-mq: replace t...
130
  {
12f5b9314   Keith Busch   blk-mq: Remove ge...
131
  	return READ_ONCE(rq->state);
1d9bd5161   Tejun Heo   blk-mq: replace t...
132
  }
1aecfe488   Ming Lei   blk-mq: move blk_...
133
134
135
136
137
138
139
140
141
142
143
144
145
146
  static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
  					   unsigned int cpu)
  {
  	return per_cpu_ptr(q->queue_ctx, cpu);
  }
  
  /*
   * This assumes per-cpu software queueing queues. They could be per-node
   * as well, for instance. For now this is hardcoded as-is. Note that we don't
   * care about preemption, since we know the ctx's are persistent. This does
   * mean that we can't rely on ctx always matching the currently running CPU.
   */
  static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
  {
c05f42206   Bart Van Assche   blk-mq: remove bl...
147
  	return __blk_mq_get_ctx(q, raw_smp_processor_id());
1aecfe488   Ming Lei   blk-mq: move blk_...
148
  }
cb96a42cc   Ming Lei   blk-mq: fix sched...
149
150
151
  struct blk_mq_alloc_data {
  	/* input parameter */
  	struct request_queue *q;
9a95e4ef7   Bart Van Assche   block, nvme: Intr...
152
  	blk_mq_req_flags_t flags;
229a92873   Omar Sandoval   blk-mq: add shall...
153
  	unsigned int shallow_depth;
f9afca4d3   Jens Axboe   blk-mq: pass in r...
154
  	unsigned int cmd_flags;
cb96a42cc   Ming Lei   blk-mq: fix sched...
155
156
157
158
159
  
  	/* input & output parameter */
  	struct blk_mq_ctx *ctx;
  	struct blk_mq_hw_ctx *hctx;
  };
4941115be   Jens Axboe   blk-mq-tag: clean...
160
161
  static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
  {
bd166ef18   Jens Axboe   blk-mq-sched: add...
162
163
  	if (data->flags & BLK_MQ_REQ_INTERNAL)
  		return data->hctx->sched_tags;
4941115be   Jens Axboe   blk-mq-tag: clean...
164
165
  	return data->hctx->tags;
  }
5d1b25c1e   Bart Van Assche   blk-mq: Introduce...
166
167
168
169
  static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
  {
  	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
  }
19c66e59c   Ming Lei   blk-mq: prevent u...
170
171
172
173
  static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
  {
  	return hctx->nr_ctx && hctx->tags;
  }
e016b7820   Mikulas Patocka   block: return jus...
174
  unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part);
bf0ddaba6   Omar Sandoval   blk-mq: fix sysfs...
175
176
  void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
  			 unsigned int inflight[2]);
f299b7c7a   Jens Axboe   blk-mq: provide i...
177

de1482974   Ming Lei   blk-mq: introduce...
178
179
180
181
182
183
184
  static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
  {
  	struct request_queue *q = hctx->queue;
  
  	if (q->mq_ops->put_budget)
  		q->mq_ops->put_budget(hctx);
  }
88022d720   Ming Lei   blk-mq: don't han...
185
  static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
de1482974   Ming Lei   blk-mq: introduce...
186
187
188
189
190
  {
  	struct request_queue *q = hctx->queue;
  
  	if (q->mq_ops->get_budget)
  		return q->mq_ops->get_budget(hctx);
88022d720   Ming Lei   blk-mq: don't han...
191
  	return true;
de1482974   Ming Lei   blk-mq: introduce...
192
  }
244c65a3c   Ming Lei   blk-mq: move blk_...
193
194
195
196
197
198
199
200
201
202
203
  static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
  					   struct request *rq)
  {
  	blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
  	rq->tag = -1;
  
  	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
  		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
  		atomic_dec(&hctx->nr_active);
  	}
  }
244c65a3c   Ming Lei   blk-mq: move blk_...
204
205
  static inline void blk_mq_put_driver_tag(struct request *rq)
  {
244c65a3c   Ming Lei   blk-mq: move blk_...
206
207
  	if (rq->tag == -1 || rq->internal_tag == -1)
  		return;
ea4f995ee   Jens Axboe   blk-mq: cache req...
208
  	__blk_mq_put_driver_tag(rq->mq_hctx, rq);
244c65a3c   Ming Lei   blk-mq: move blk_...
209
  }
ed76e329d   Jens Axboe   blk-mq: abstract ...
210
  static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
0da73d00c   Minwoo Im   blk-mq: code clea...
211
212
213
214
  {
  	int cpu;
  
  	for_each_possible_cpu(cpu)
ed76e329d   Jens Axboe   blk-mq: abstract ...
215
  		qmap->mq_map[cpu] = 0;
0da73d00c   Minwoo Im   blk-mq: code clea...
216
  }
b49773e7b   Damien Le Moal   block: Disable wr...
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
  /*
   * blk_mq_plug() - Get caller context plug
   * @q: request queue
   * @bio : the bio being submitted by the caller context
   *
   * Plugging, by design, may delay the insertion of BIOs into the elevator in
   * order to increase BIO merging opportunities. This however can cause BIO
   * insertion order to change from the order in which submit_bio() is being
   * executed in the case of multiple contexts concurrently issuing BIOs to a
   * device, even if these context are synchronized to tightly control BIO issuing
   * order. While this is not a problem with regular block devices, this ordering
   * change can cause write BIO failures with zoned block devices as these
   * require sequential write patterns to zones. Prevent this from happening by
   * ignoring the plug state of a BIO issuing context if the target request queue
   * is for a zoned block device and the BIO to plug is a write operation.
   *
   * Return current->plug if the bio can be plugged and NULL otherwise
   */
  static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
  					   struct bio *bio)
  {
  	/*
  	 * For regular block devices or read operations, use the context plug
  	 * which may be NULL if blk_start_plug() was not executed.
  	 */
  	if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
  		return current->plug;
  
  	/* Zoned block device write operation case: do not plug the BIO */
  	return NULL;
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
248
  #endif