Blame view
block/blk-ioc.c
10.2 KB
b24413180 License cleanup: ... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
86db1e297 block: continue l... |
2 3 4 5 6 7 8 9 |
/* * Functions related to io context handling */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/bio.h> #include <linux/blkdev.h> |
5a0e3ad6a include cleanup: ... |
10 |
#include <linux/slab.h> |
f719ff9bc sched/headers: Pr... |
11 |
#include <linux/sched/task.h> |
86db1e297 block: continue l... |
12 13 14 15 16 17 18 |
#include "blk.h" /* * For io context allocations */ static struct kmem_cache *iocontext_cachep; |
6e736be7f block: make ioc g... |
19 20 21 22 23 24 25 26 27 28 29 |
/** * get_io_context - increment reference count to io_context * @ioc: io_context to get * * Increment reference count to @ioc. */ void get_io_context(struct io_context *ioc) { BUG_ON(atomic_long_read(&ioc->refcount) <= 0); atomic_long_inc(&ioc->refcount); } |
6e736be7f block: make ioc g... |
30 |
|
7e5a87944 block, cfq: move ... |
31 32 33 34 35 36 |
static void icq_free_icq_rcu(struct rcu_head *head) { struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); kmem_cache_free(icq->__rcu_icq_cache, icq); } |
3d492c2e0 blk-mq-sched: don... |
37 |
/* |
7b36a7189 block: don't call... |
38 39 |
* Exit an icq. Called with ioc locked for blk-mq, and with both ioc * and queue locked for legacy. |
3d492c2e0 blk-mq-sched: don... |
40 |
*/ |
7e5a87944 block, cfq: move ... |
41 42 |
static void ioc_exit_icq(struct io_cq *icq) { |
621032ad6 block: exit_io_co... |
43 44 45 46 |
struct elevator_type *et = icq->q->elevator->type; if (icq->flags & ICQ_EXITED) return; |
f9cd4bfe9 block: get rid of... |
47 48 |
if (et->ops.exit_icq) et->ops.exit_icq(icq); |
621032ad6 block: exit_io_co... |
49 50 51 |
icq->flags |= ICQ_EXITED; } |
7b36a7189 block: don't call... |
52 53 54 55 |
/* * Release an icq. Called with ioc locked for blk-mq, and with both ioc * and queue locked for legacy. */ |
621032ad6 block: exit_io_co... |
56 57 |
static void ioc_destroy_icq(struct io_cq *icq) { |
7e5a87944 block, cfq: move ... |
58 59 60 61 62 |
struct io_context *ioc = icq->ioc; struct request_queue *q = icq->q; struct elevator_type *et = q->elevator->type; lockdep_assert_held(&ioc->lock); |
7e5a87944 block, cfq: move ... |
63 64 65 66 67 68 69 70 71 72 |
radix_tree_delete(&ioc->icq_tree, icq->q->id); hlist_del_init(&icq->ioc_node); list_del_init(&icq->q_node); /* * Both setting lookup hint to and clearing it from @icq are done * under queue_lock. If it's not pointing to @icq now, it never * will. Hint assignment itself can race safely. */ |
ec6c676a0 block: Substitute... |
73 |
if (rcu_access_pointer(ioc->icq_hint) == icq) |
7e5a87944 block, cfq: move ... |
74 |
rcu_assign_pointer(ioc->icq_hint, NULL); |
621032ad6 block: exit_io_co... |
75 |
ioc_exit_icq(icq); |
7e5a87944 block, cfq: move ... |
76 77 78 79 80 81 82 83 |
/* * @icq->q might have gone away by the time RCU callback runs * making it impossible to determine icq_cache. Record it in @icq. */ icq->__rcu_icq_cache = et->icq_cache; call_rcu(&icq->__rcu_head, icq_free_icq_rcu); } |
b2efa0526 block, cfq: unlin... |
84 85 |
/* * Slow path for ioc release in put_io_context(). Performs double-lock |
c58698073 block, cfq: reorg... |
86 |
* dancing to unlink all icq's and then frees ioc. |
b2efa0526 block, cfq: unlin... |
87 88 |
*/ static void ioc_release_fn(struct work_struct *work) |
86db1e297 block: continue l... |
89 |
{ |
b2efa0526 block, cfq: unlin... |
90 91 |
struct io_context *ioc = container_of(work, struct io_context, release_work); |
d8c66c5d5 block: fix lockde... |
92 |
unsigned long flags; |
b2efa0526 block, cfq: unlin... |
93 |
|
d8c66c5d5 block: fix lockde... |
94 95 96 97 98 99 100 |
/* * Exiting icq may call into put_io_context() through elevator * which will trigger lockdep warning. The ioc's are guaranteed to * be different, use a different locking subclass here. Use * irqsave variant as there's no spin_lock_irq_nested(). */ spin_lock_irqsave_nested(&ioc->lock, flags, 1); |
b2efa0526 block, cfq: unlin... |
101 |
|
c58698073 block, cfq: reorg... |
102 103 104 |
while (!hlist_empty(&ioc->icq_list)) { struct io_cq *icq = hlist_entry(ioc->icq_list.first, struct io_cq, ioc_node); |
2274b029f block: simplify i... |
105 |
struct request_queue *q = icq->q; |
0d945c1f9 block: remove the... |
106 |
if (spin_trylock(&q->queue_lock)) { |
621032ad6 block: exit_io_co... |
107 |
ioc_destroy_icq(icq); |
0d945c1f9 block: remove the... |
108 |
spin_unlock(&q->queue_lock); |
2274b029f block: simplify i... |
109 110 111 112 |
} else { spin_unlock_irqrestore(&ioc->lock, flags); cpu_relax(); spin_lock_irqsave_nested(&ioc->lock, flags, 1); |
b2efa0526 block, cfq: unlin... |
113 |
} |
b2efa0526 block, cfq: unlin... |
114 |
} |
ffc4e7595 cfq-iosched: add ... |
115 |
|
2274b029f block: simplify i... |
116 |
spin_unlock_irqrestore(&ioc->lock, flags); |
b2efa0526 block, cfq: unlin... |
117 118 |
kmem_cache_free(iocontext_cachep, ioc); |
86db1e297 block: continue l... |
119 |
} |
42ec57a8f block: misc ioc c... |
120 121 122 123 124 |
/** * put_io_context - put a reference of io_context * @ioc: io_context to put * * Decrement reference count of @ioc and release it if the count reaches |
11a3122f6 block: strip out ... |
125 |
* zero. |
86db1e297 block: continue l... |
126 |
*/ |
11a3122f6 block: strip out ... |
127 |
void put_io_context(struct io_context *ioc) |
86db1e297 block: continue l... |
128 |
{ |
b2efa0526 block, cfq: unlin... |
129 |
unsigned long flags; |
ff8c1474c block: fix ioc le... |
130 |
bool free_ioc = false; |
b2efa0526 block, cfq: unlin... |
131 |
|
86db1e297 block: continue l... |
132 |
if (ioc == NULL) |
42ec57a8f block: misc ioc c... |
133 |
return; |
86db1e297 block: continue l... |
134 |
|
42ec57a8f block: misc ioc c... |
135 |
BUG_ON(atomic_long_read(&ioc->refcount) <= 0); |
86db1e297 block: continue l... |
136 |
|
b2efa0526 block, cfq: unlin... |
137 |
/* |
11a3122f6 block: strip out ... |
138 139 |
* Releasing ioc requires reverse order double locking and we may * already be holding a queue_lock. Do it asynchronously from wq. |
b2efa0526 block, cfq: unlin... |
140 |
*/ |
11a3122f6 block: strip out ... |
141 142 143 |
if (atomic_long_dec_and_test(&ioc->refcount)) { spin_lock_irqsave(&ioc->lock, flags); if (!hlist_empty(&ioc->icq_list)) |
695588f94 block: queue work... |
144 145 |
queue_work(system_power_efficient_wq, &ioc->release_work); |
ff8c1474c block: fix ioc le... |
146 147 |
else free_ioc = true; |
11a3122f6 block: strip out ... |
148 |
spin_unlock_irqrestore(&ioc->lock, flags); |
b2efa0526 block, cfq: unlin... |
149 |
} |
ff8c1474c block: fix ioc le... |
150 151 152 |
if (free_ioc) kmem_cache_free(iocontext_cachep, ioc); |
86db1e297 block: continue l... |
153 |
} |
f6e8d01be block: add io_con... |
154 155 156 157 158 159 160 161 |
/** * put_io_context_active - put active reference on ioc * @ioc: ioc of interest * * Undo get_io_context_active(). If active reference reaches zero after * put, @ioc can never issue further IOs and ioscheds are notified. */ void put_io_context_active(struct io_context *ioc) |
86db1e297 block: continue l... |
162 |
{ |
621032ad6 block: exit_io_co... |
163 |
unsigned long flags; |
f6e8d01be block: add io_con... |
164 |
struct io_cq *icq; |
86db1e297 block: continue l... |
165 |
|
f6e8d01be block: add io_con... |
166 |
if (!atomic_dec_and_test(&ioc->active_ref)) { |
621032ad6 block: exit_io_co... |
167 168 169 170 171 172 173 174 175 |
put_io_context(ioc); return; } /* * Need ioc lock to walk icq_list and q lock to exit icq. Perform * reverse double locking. Read comment in ioc_release_fn() for * explanation on the nested locking annotation. */ |
621032ad6 block: exit_io_co... |
176 |
spin_lock_irqsave_nested(&ioc->lock, flags, 1); |
b67bfe0d4 hlist: drop the n... |
177 |
hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) { |
621032ad6 block: exit_io_co... |
178 179 |
if (icq->flags & ICQ_EXITED) continue; |
3d492c2e0 blk-mq-sched: don... |
180 |
|
a1ce35fa4 block: remove dea... |
181 |
ioc_exit_icq(icq); |
621032ad6 block: exit_io_co... |
182 183 |
} spin_unlock_irqrestore(&ioc->lock, flags); |
11a3122f6 block: strip out ... |
184 |
put_io_context(ioc); |
86db1e297 block: continue l... |
185 |
} |
f6e8d01be block: add io_con... |
186 187 188 189 190 191 192 193 194 195 196 197 198 |
/* Called by the exiting task */ void exit_io_context(struct task_struct *task) { struct io_context *ioc; task_lock(task); ioc = task->io_context; task->io_context = NULL; task_unlock(task); atomic_dec(&ioc->nr_tasks); put_io_context_active(ioc); } |
7b36a7189 block: don't call... |
199 200 201 202 203 204 |
static void __ioc_clear_queue(struct list_head *icq_list) { unsigned long flags; while (!list_empty(icq_list)) { struct io_cq *icq = list_entry(icq_list->next, |
a1ce35fa4 block: remove dea... |
205 |
struct io_cq, q_node); |
7b36a7189 block: don't call... |
206 207 208 209 210 211 212 |
struct io_context *ioc = icq->ioc; spin_lock_irqsave(&ioc->lock, flags); ioc_destroy_icq(icq); spin_unlock_irqrestore(&ioc->lock, flags); } } |
7e5a87944 block, cfq: move ... |
213 214 215 216 |
/** * ioc_clear_queue - break any ioc association with the specified queue * @q: request_queue being cleared * |
7b36a7189 block: don't call... |
217 |
* Walk @q->icq_list and exit all io_cq's. |
7e5a87944 block, cfq: move ... |
218 219 220 |
*/ void ioc_clear_queue(struct request_queue *q) { |
7b36a7189 block: don't call... |
221 |
LIST_HEAD(icq_list); |
7e5a87944 block, cfq: move ... |
222 |
|
0d945c1f9 block: remove the... |
223 |
spin_lock_irq(&q->queue_lock); |
7b36a7189 block: don't call... |
224 |
list_splice_init(&q->icq_list, &icq_list); |
0d945c1f9 block: remove the... |
225 |
spin_unlock_irq(&q->queue_lock); |
7e5a87944 block, cfq: move ... |
226 |
|
a1ce35fa4 block: remove dea... |
227 |
__ioc_clear_queue(&icq_list); |
7e5a87944 block, cfq: move ... |
228 |
} |
24acfc34f block: interface ... |
229 |
int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) |
86db1e297 block: continue l... |
230 |
{ |
df4156569 block: rename the... |
231 |
struct io_context *ioc; |
3c9c708c9 block: avoid infi... |
232 |
int ret; |
86db1e297 block: continue l... |
233 |
|
42ec57a8f block: misc ioc c... |
234 235 236 |
ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO, node); if (unlikely(!ioc)) |
24acfc34f block: interface ... |
237 |
return -ENOMEM; |
42ec57a8f block: misc ioc c... |
238 239 240 |
/* initialize */ atomic_long_set(&ioc->refcount, 1); |
4638a83e8 block: uninitiali... |
241 |
atomic_set(&ioc->nr_tasks, 1); |
f6e8d01be block: add io_con... |
242 |
atomic_set(&ioc->active_ref, 1); |
42ec57a8f block: misc ioc c... |
243 |
spin_lock_init(&ioc->lock); |
c137969bd block, mm: remove... |
244 |
INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC); |
c58698073 block, cfq: reorg... |
245 |
INIT_HLIST_HEAD(&ioc->icq_list); |
b2efa0526 block, cfq: unlin... |
246 |
INIT_WORK(&ioc->release_work, ioc_release_fn); |
86db1e297 block: continue l... |
247 |
|
fd6383681 block: an exiting... |
248 249 250 251 252 253 254 |
/* * Try to install. ioc shouldn't be installed if someone else * already did or @task, which isn't %current, is exiting. Note * that we need to allow ioc creation on exiting %current as exit * path may issue IOs from e.g. exit_files(). The exit path is * responsible for not issuing IO after exit_io_context(). */ |
6e736be7f block: make ioc g... |
255 |
task_lock(task); |
fd6383681 block: an exiting... |
256 257 |
if (!task->io_context && (task == current || !(task->flags & PF_EXITING))) |
6e736be7f block: make ioc g... |
258 |
task->io_context = ioc; |
f2dbd76a0 block, cfq: repla... |
259 |
else |
6e736be7f block: make ioc g... |
260 |
kmem_cache_free(iocontext_cachep, ioc); |
3c9c708c9 block: avoid infi... |
261 262 |
ret = task->io_context ? 0 : -EBUSY; |
6e736be7f block: make ioc g... |
263 |
task_unlock(task); |
24acfc34f block: interface ... |
264 |
|
3c9c708c9 block: avoid infi... |
265 |
return ret; |
86db1e297 block: continue l... |
266 |
} |
86db1e297 block: continue l... |
267 |
|
6e736be7f block: make ioc g... |
268 269 270 271 272 273 274 275 276 |
/** * get_task_io_context - get io_context of a task * @task: task of interest * @gfp_flags: allocation flags, used if allocation is necessary * @node: allocation node, used if allocation is necessary * * Return io_context of @task. If it doesn't exist, it is created with * @gfp_flags and @node. The returned io_context has its reference count * incremented. |
86db1e297 block: continue l... |
277 |
* |
6e736be7f block: make ioc g... |
278 |
* This function always goes through task_lock() and it's better to use |
f2dbd76a0 block, cfq: repla... |
279 |
* %current->io_context + get_io_context() for %current. |
86db1e297 block: continue l... |
280 |
*/ |
6e736be7f block: make ioc g... |
281 282 |
struct io_context *get_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) |
86db1e297 block: continue l... |
283 |
{ |
6e736be7f block: make ioc g... |
284 |
struct io_context *ioc; |
86db1e297 block: continue l... |
285 |
|
d0164adc8 mm, page_alloc: d... |
286 |
might_sleep_if(gfpflags_allow_blocking(gfp_flags)); |
6e736be7f block: make ioc g... |
287 |
|
f2dbd76a0 block, cfq: repla... |
288 289 290 291 292 293 294 295 |
do { task_lock(task); ioc = task->io_context; if (likely(ioc)) { get_io_context(ioc); task_unlock(task); return ioc; } |
6e736be7f block: make ioc g... |
296 |
task_unlock(task); |
24acfc34f block: interface ... |
297 |
} while (!create_task_io_context(task, gfp_flags, node)); |
6e736be7f block: make ioc g... |
298 |
|
f2dbd76a0 block, cfq: repla... |
299 |
return NULL; |
86db1e297 block: continue l... |
300 |
} |
86db1e297 block: continue l... |
301 |
|
47fdd4ca9 block, cfq: move ... |
302 303 304 305 306 307 308 309 310 311 312 |
/** * ioc_lookup_icq - lookup io_cq from ioc * @ioc: the associated io_context * @q: the associated request_queue * * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called * with @q->queue_lock held. */ struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q) { struct io_cq *icq; |
0d945c1f9 block: remove the... |
313 |
lockdep_assert_held(&q->queue_lock); |
47fdd4ca9 block, cfq: move ... |
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 |
/* * icq's are indexed from @ioc using radix tree and hint pointer, * both of which are protected with RCU. All removals are done * holding both q and ioc locks, and we're holding q lock - if we * find a icq which points to us, it's guaranteed to be valid. */ rcu_read_lock(); icq = rcu_dereference(ioc->icq_hint); if (icq && icq->q == q) goto out; icq = radix_tree_lookup(&ioc->icq_tree, q->id); if (icq && icq->q == q) rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */ else icq = NULL; out: rcu_read_unlock(); return icq; } EXPORT_SYMBOL(ioc_lookup_icq); |
f1f8cc946 block, cfq: move ... |
336 337 |
/** * ioc_create_icq - create and link io_cq |
24acfc34f block: interface ... |
338 |
* @ioc: io_context of interest |
f1f8cc946 block, cfq: move ... |
339 340 341 |
* @q: request_queue of interest * @gfp_mask: allocation mask * |
24acfc34f block: interface ... |
342 343 |
* Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they * will be created using @gfp_mask. |
f1f8cc946 block, cfq: move ... |
344 345 346 347 |
* * The caller is responsible for ensuring @ioc won't go away and @q is * alive and will stay alive until this function returns. */ |
24acfc34f block: interface ... |
348 349 |
struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, gfp_t gfp_mask) |
f1f8cc946 block, cfq: move ... |
350 351 |
{ struct elevator_type *et = q->elevator->type; |
f1f8cc946 block, cfq: move ... |
352 353 354 |
struct io_cq *icq; /* allocate stuff */ |
f1f8cc946 block, cfq: move ... |
355 356 357 358 |
icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, q->node); if (!icq) return NULL; |
5e4c0d974 lib/radix-tree.c:... |
359 |
if (radix_tree_maybe_preload(gfp_mask) < 0) { |
f1f8cc946 block, cfq: move ... |
360 361 362 363 364 365 366 367 368 369 |
kmem_cache_free(et->icq_cache, icq); return NULL; } icq->ioc = ioc; icq->q = q; INIT_LIST_HEAD(&icq->q_node); INIT_HLIST_NODE(&icq->ioc_node); /* lock both q and ioc and try to link @icq */ |
0d945c1f9 block: remove the... |
370 |
spin_lock_irq(&q->queue_lock); |
f1f8cc946 block, cfq: move ... |
371 372 373 374 375 |
spin_lock(&ioc->lock); if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { hlist_add_head(&icq->ioc_node, &ioc->icq_list); list_add(&icq->q_node, &q->icq_list); |
f9cd4bfe9 block: get rid of... |
376 377 |
if (et->ops.init_icq) et->ops.init_icq(icq); |
f1f8cc946 block, cfq: move ... |
378 379 380 381 382 383 384 385 386 |
} else { kmem_cache_free(et->icq_cache, icq); icq = ioc_lookup_icq(ioc, q); if (!icq) printk(KERN_ERR "cfq: icq link failed! "); } spin_unlock(&ioc->lock); |
0d945c1f9 block: remove the... |
387 |
spin_unlock_irq(&q->queue_lock); |
f1f8cc946 block, cfq: move ... |
388 389 390 |
radix_tree_preload_end(); return icq; } |
133415982 make blk_ioc_init... |
391 |
static int __init blk_ioc_init(void) |
86db1e297 block: continue l... |
392 393 394 395 396 397 |
{ iocontext_cachep = kmem_cache_create("blkdev_ioc", sizeof(struct io_context), 0, SLAB_PANIC, NULL); return 0; } subsys_initcall(blk_ioc_init); |