Blame view

block/blk-rq-qos.c 7.08 KB
3dcf60bcb   Christoph Hellwig   block: add SPDX t...
1
  // SPDX-License-Identifier: GPL-2.0
a79050434   Josef Bacik   blk-rq-qos: refac...
2
  #include "blk-rq-qos.h"
a79050434   Josef Bacik   blk-rq-qos: refac...
3
4
5
6
  /*
   * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
   * false if 'v' + 1 would be bigger than 'below'.
   */
22f17952c   Josef Bacik   blk-rq-qos: make ...
7
  static bool atomic_inc_below(atomic_t *v, unsigned int below)
a79050434   Josef Bacik   blk-rq-qos: refac...
8
  {
22f17952c   Josef Bacik   blk-rq-qos: make ...
9
  	unsigned int cur = atomic_read(v);
a79050434   Josef Bacik   blk-rq-qos: refac...
10
11
  
  	for (;;) {
22f17952c   Josef Bacik   blk-rq-qos: make ...
12
  		unsigned int old;
a79050434   Josef Bacik   blk-rq-qos: refac...
13
14
15
16
17
18
19
20
21
22
23
  
  		if (cur >= below)
  			return false;
  		old = atomic_cmpxchg(v, cur, cur + 1);
  		if (old == cur)
  			break;
  		cur = old;
  	}
  
  	return true;
  }
22f17952c   Josef Bacik   blk-rq-qos: make ...
24
  bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit)
a79050434   Josef Bacik   blk-rq-qos: refac...
25
26
27
  {
  	return atomic_inc_below(&rq_wait->inflight, limit);
  }
e50454544   Jens Axboe   blk-rq-qos: inlin...
28
  void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio)
a79050434   Josef Bacik   blk-rq-qos: refac...
29
  {
e50454544   Jens Axboe   blk-rq-qos: inlin...
30
  	do {
a79050434   Josef Bacik   blk-rq-qos: refac...
31
  		if (rqos->ops->cleanup)
c1c80384c   Josef Bacik   block: remove ext...
32
  			rqos->ops->cleanup(rqos, bio);
e50454544   Jens Axboe   blk-rq-qos: inlin...
33
34
  		rqos = rqos->next;
  	} while (rqos);
a79050434   Josef Bacik   blk-rq-qos: refac...
35
  }
e50454544   Jens Axboe   blk-rq-qos: inlin...
36
  void __rq_qos_done(struct rq_qos *rqos, struct request *rq)
a79050434   Josef Bacik   blk-rq-qos: refac...
37
  {
e50454544   Jens Axboe   blk-rq-qos: inlin...
38
  	do {
a79050434   Josef Bacik   blk-rq-qos: refac...
39
40
  		if (rqos->ops->done)
  			rqos->ops->done(rqos, rq);
e50454544   Jens Axboe   blk-rq-qos: inlin...
41
42
  		rqos = rqos->next;
  	} while (rqos);
a79050434   Josef Bacik   blk-rq-qos: refac...
43
  }
e50454544   Jens Axboe   blk-rq-qos: inlin...
44
  void __rq_qos_issue(struct rq_qos *rqos, struct request *rq)
a79050434   Josef Bacik   blk-rq-qos: refac...
45
  {
e50454544   Jens Axboe   blk-rq-qos: inlin...
46
  	do {
a79050434   Josef Bacik   blk-rq-qos: refac...
47
48
  		if (rqos->ops->issue)
  			rqos->ops->issue(rqos, rq);
e50454544   Jens Axboe   blk-rq-qos: inlin...
49
50
  		rqos = rqos->next;
  	} while (rqos);
a79050434   Josef Bacik   blk-rq-qos: refac...
51
  }
e50454544   Jens Axboe   blk-rq-qos: inlin...
52
  void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq)
a79050434   Josef Bacik   blk-rq-qos: refac...
53
  {
e50454544   Jens Axboe   blk-rq-qos: inlin...
54
  	do {
a79050434   Josef Bacik   blk-rq-qos: refac...
55
56
  		if (rqos->ops->requeue)
  			rqos->ops->requeue(rqos, rq);
e50454544   Jens Axboe   blk-rq-qos: inlin...
57
58
  		rqos = rqos->next;
  	} while (rqos);
a79050434   Josef Bacik   blk-rq-qos: refac...
59
  }
e50454544   Jens Axboe   blk-rq-qos: inlin...
60
  void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio)
a79050434   Josef Bacik   blk-rq-qos: refac...
61
  {
e50454544   Jens Axboe   blk-rq-qos: inlin...
62
  	do {
a79050434   Josef Bacik   blk-rq-qos: refac...
63
  		if (rqos->ops->throttle)
d53375608   Christoph Hellwig   block: remove the...
64
  			rqos->ops->throttle(rqos, bio);
e50454544   Jens Axboe   blk-rq-qos: inlin...
65
66
  		rqos = rqos->next;
  	} while (rqos);
c1c80384c   Josef Bacik   block: remove ext...
67
  }
e50454544   Jens Axboe   blk-rq-qos: inlin...
68
  void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
c1c80384c   Josef Bacik   block: remove ext...
69
  {
e50454544   Jens Axboe   blk-rq-qos: inlin...
70
  	do {
c1c80384c   Josef Bacik   block: remove ext...
71
72
  		if (rqos->ops->track)
  			rqos->ops->track(rqos, rq, bio);
d3e65ffff   Tejun Heo   block/rq_qos: add...
73
74
75
76
77
78
79
80
81
  		rqos = rqos->next;
  	} while (rqos);
  }
  
  void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio)
  {
  	do {
  		if (rqos->ops->merge)
  			rqos->ops->merge(rqos, rq, bio);
e50454544   Jens Axboe   blk-rq-qos: inlin...
82
83
  		rqos = rqos->next;
  	} while (rqos);
a79050434   Josef Bacik   blk-rq-qos: refac...
84
  }
e50454544   Jens Axboe   blk-rq-qos: inlin...
85
  void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio)
67b42d0bf   Josef Bacik   rq-qos: introduce...
86
  {
e50454544   Jens Axboe   blk-rq-qos: inlin...
87
  	do {
67b42d0bf   Josef Bacik   rq-qos: introduce...
88
89
  		if (rqos->ops->done_bio)
  			rqos->ops->done_bio(rqos, bio);
e50454544   Jens Axboe   blk-rq-qos: inlin...
90
91
  		rqos = rqos->next;
  	} while (rqos);
67b42d0bf   Josef Bacik   rq-qos: introduce...
92
  }
9677a3e01   Tejun Heo   block/rq_qos: imp...
93
94
95
96
97
98
99
100
  void __rq_qos_queue_depth_changed(struct rq_qos *rqos)
  {
  	do {
  		if (rqos->ops->queue_depth_changed)
  			rqos->ops->queue_depth_changed(rqos);
  		rqos = rqos->next;
  	} while (rqos);
  }
a79050434   Josef Bacik   blk-rq-qos: refac...
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
  /*
   * Return true, if we can't increase the depth further by scaling
   */
  bool rq_depth_calc_max_depth(struct rq_depth *rqd)
  {
  	unsigned int depth;
  	bool ret = false;
  
  	/*
  	 * For QD=1 devices, this is a special case. It's important for those
  	 * to have one request ready when one completes, so force a depth of
  	 * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
  	 * since the device can't have more than that in flight. If we're
  	 * scaling down, then keep a setting of 1/1/1.
  	 */
  	if (rqd->queue_depth == 1) {
  		if (rqd->scale_step > 0)
  			rqd->max_depth = 1;
  		else {
  			rqd->max_depth = 2;
  			ret = true;
  		}
  	} else {
  		/*
  		 * scale_step == 0 is our default state. If we have suffered
  		 * latency spikes, step will be > 0, and we shrink the
  		 * allowed write depths. If step is < 0, we're only doing
  		 * writes, and we allow a temporarily higher depth to
  		 * increase performance.
  		 */
  		depth = min_t(unsigned int, rqd->default_depth,
  			      rqd->queue_depth);
  		if (rqd->scale_step > 0)
  			depth = 1 + ((depth - 1) >> min(31, rqd->scale_step));
  		else if (rqd->scale_step < 0) {
  			unsigned int maxd = 3 * rqd->queue_depth / 4;
  
  			depth = 1 + ((depth - 1) << -rqd->scale_step);
  			if (depth > maxd) {
  				depth = maxd;
  				ret = true;
  			}
  		}
  
  		rqd->max_depth = depth;
  	}
  
  	return ret;
  }
b84477d3e   Harshad Shirwadkar   blk-wbt: fix perf...
150
151
  /* Returns true on success and false if scaling up wasn't possible */
  bool rq_depth_scale_up(struct rq_depth *rqd)
a79050434   Josef Bacik   blk-rq-qos: refac...
152
153
154
155
156
  {
  	/*
  	 * Hit max in previous round, stop here
  	 */
  	if (rqd->scaled_max)
b84477d3e   Harshad Shirwadkar   blk-wbt: fix perf...
157
  		return false;
a79050434   Josef Bacik   blk-rq-qos: refac...
158
159
160
161
  
  	rqd->scale_step--;
  
  	rqd->scaled_max = rq_depth_calc_max_depth(rqd);
b84477d3e   Harshad Shirwadkar   blk-wbt: fix perf...
162
  	return true;
a79050434   Josef Bacik   blk-rq-qos: refac...
163
164
165
166
  }
  
  /*
   * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
b84477d3e   Harshad Shirwadkar   blk-wbt: fix perf...
167
168
   * had a latency violation. Returns true on success and returns false if
   * scaling down wasn't possible.
a79050434   Josef Bacik   blk-rq-qos: refac...
169
   */
b84477d3e   Harshad Shirwadkar   blk-wbt: fix perf...
170
  bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
a79050434   Josef Bacik   blk-rq-qos: refac...
171
172
173
174
175
176
177
  {
  	/*
  	 * Stop scaling down when we've hit the limit. This also prevents
  	 * ->scale_step from going to crazy values, if the device can't
  	 * keep up.
  	 */
  	if (rqd->max_depth == 1)
b84477d3e   Harshad Shirwadkar   blk-wbt: fix perf...
178
  		return false;
a79050434   Josef Bacik   blk-rq-qos: refac...
179
180
181
182
183
184
185
186
  
  	if (rqd->scale_step < 0 && hard_throttle)
  		rqd->scale_step = 0;
  	else
  		rqd->scale_step++;
  
  	rqd->scaled_max = false;
  	rq_depth_calc_max_depth(rqd);
b84477d3e   Harshad Shirwadkar   blk-wbt: fix perf...
187
  	return true;
a79050434   Josef Bacik   blk-rq-qos: refac...
188
  }
84f603246   Josef Bacik   block: add rq_qos...
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
  struct rq_qos_wait_data {
  	struct wait_queue_entry wq;
  	struct task_struct *task;
  	struct rq_wait *rqw;
  	acquire_inflight_cb_t *cb;
  	void *private_data;
  	bool got_token;
  };
  
  static int rq_qos_wake_function(struct wait_queue_entry *curr,
  				unsigned int mode, int wake_flags, void *key)
  {
  	struct rq_qos_wait_data *data = container_of(curr,
  						     struct rq_qos_wait_data,
  						     wq);
  
  	/*
  	 * If we fail to get a budget, return -1 to interrupt the wake up loop
  	 * in __wake_up_common.
  	 */
  	if (!data->cb(data->rqw, data->private_data))
  		return -1;
  
  	data->got_token = true;
ac38297f7   Josef Bacik   rq-qos: use a mb ...
213
  	smp_wmb();
84f603246   Josef Bacik   block: add rq_qos...
214
215
216
217
218
219
220
  	list_del_init(&curr->entry);
  	wake_up_process(data->task);
  	return 1;
  }
  
  /**
   * rq_qos_wait - throttle on a rqw if we need to
83826a506   Bart Van Assche   block: Fix rq_qos...
221
222
223
224
   * @rqw: rqw to throttle on
   * @private_data: caller provided specific data
   * @acquire_inflight_cb: inc the rqw->inflight counter if we can
   * @cleanup_cb: the callback to cleanup in case we race with a waker
84f603246   Josef Bacik   block: add rq_qos...
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
   *
   * This provides a uniform place for the rq_qos users to do their throttling.
   * Since you can end up with a lot of things sleeping at once, this manages the
   * waking up based on the resources available.  The acquire_inflight_cb should
   * inc the rqw->inflight if we have the ability to do so, or return false if not
   * and then we will sleep until the room becomes available.
   *
   * cleanup_cb is in case that we race with a waker and need to cleanup the
   * inflight count accordingly.
   */
  void rq_qos_wait(struct rq_wait *rqw, void *private_data,
  		 acquire_inflight_cb_t *acquire_inflight_cb,
  		 cleanup_cb_t *cleanup_cb)
  {
  	struct rq_qos_wait_data data = {
  		.wq = {
  			.func	= rq_qos_wake_function,
  			.entry	= LIST_HEAD_INIT(data.wq.entry),
  		},
  		.task = current,
  		.rqw = rqw,
  		.cb = acquire_inflight_cb,
  		.private_data = private_data,
  	};
  	bool has_sleeper;
  
  	has_sleeper = wq_has_sleeper(&rqw->wait);
  	if (!has_sleeper && acquire_inflight_cb(rqw, private_data))
  		return;
  
  	prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
545fbd077   Josef Bacik   rq-qos: fix misse...
256
  	has_sleeper = !wq_has_single_sleeper(&rqw->wait);
84f603246   Josef Bacik   block: add rq_qos...
257
  	do {
ac38297f7   Josef Bacik   rq-qos: use a mb ...
258
  		/* The memory barrier in set_task_state saves us here. */
84f603246   Josef Bacik   block: add rq_qos...
259
260
261
262
263
264
265
266
267
268
  		if (data.got_token)
  			break;
  		if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) {
  			finish_wait(&rqw->wait, &data.wq);
  
  			/*
  			 * We raced with wbt_wake_function() getting a token,
  			 * which means we now have two. Put our local token
  			 * and wake anyone else potentially waiting for one.
  			 */
ac38297f7   Josef Bacik   rq-qos: use a mb ...
269
  			smp_rmb();
84f603246   Josef Bacik   block: add rq_qos...
270
271
272
273
274
  			if (data.got_token)
  				cleanup_cb(rqw, private_data);
  			break;
  		}
  		io_schedule();
64e7ea875   Josef Bacik   rq-qos: don't res...
275
  		has_sleeper = true;
d14a9b389   Josef Bacik   rq-qos: set ourse...
276
  		set_current_state(TASK_UNINTERRUPTIBLE);
84f603246   Josef Bacik   block: add rq_qos...
277
278
279
  	} while (1);
  	finish_wait(&rqw->wait, &data.wq);
  }
a79050434   Josef Bacik   blk-rq-qos: refac...
280
281
  void rq_qos_exit(struct request_queue *q)
  {
cc56694f1   Ming Lei   blk-mq-debugfs: s...
282
  	blk_mq_debugfs_unregister_queue_rqos(q);
a79050434   Josef Bacik   blk-rq-qos: refac...
283
284
285
286
287
288
  	while (q->rq_qos) {
  		struct rq_qos *rqos = q->rq_qos;
  		q->rq_qos = rqos->next;
  		rqos->ops->exit(rqos);
  	}
  }