Blame view

block/blk-ioc.c 11 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
86db1e297   Jens Axboe   block: continue l...
2
3
4
5
6
7
8
9
  /*
   * Functions related to io context handling
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/init.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
10
  #include <linux/slab.h>
f719ff9bc   Ingo Molnar   sched/headers: Pr...
11
  #include <linux/sched/task.h>
86db1e297   Jens Axboe   block: continue l...
12
13
14
15
16
17
18
  
  #include "blk.h"
  
  /*
   * For io context allocations
   */
  static struct kmem_cache *iocontext_cachep;
6e736be7f   Tejun Heo   block: make ioc g...
19
20
21
22
23
24
25
26
27
28
29
30
  /**
   * get_io_context - increment reference count to io_context
   * @ioc: io_context to get
   *
   * Increment reference count to @ioc.
   */
  void get_io_context(struct io_context *ioc)
  {
  	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
  	atomic_long_inc(&ioc->refcount);
  }
  EXPORT_SYMBOL(get_io_context);
7e5a87944   Tejun Heo   block, cfq: move ...
31
32
33
34
35
36
  static void icq_free_icq_rcu(struct rcu_head *head)
  {
  	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
  
  	kmem_cache_free(icq->__rcu_icq_cache, icq);
  }
3d492c2e0   Omar Sandoval   blk-mq-sched: don...
37
  /*
7b36a7189   Jens Axboe   block: don't call...
38
39
   * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
   * and queue locked for legacy.
3d492c2e0   Omar Sandoval   blk-mq-sched: don...
40
   */
7e5a87944   Tejun Heo   block, cfq: move ...
41
42
  static void ioc_exit_icq(struct io_cq *icq)
  {
621032ad6   Tejun Heo   block: exit_io_co...
43
44
45
46
  	struct elevator_type *et = icq->q->elevator->type;
  
  	if (icq->flags & ICQ_EXITED)
  		return;
bd166ef18   Jens Axboe   blk-mq-sched: add...
47
48
49
  	if (et->uses_mq && et->ops.mq.exit_icq)
  		et->ops.mq.exit_icq(icq);
  	else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn)
c51ca6cf5   Jens Axboe   block: move exist...
50
  		et->ops.sq.elevator_exit_icq_fn(icq);
621032ad6   Tejun Heo   block: exit_io_co...
51
52
53
  
  	icq->flags |= ICQ_EXITED;
  }
7b36a7189   Jens Axboe   block: don't call...
54
55
56
57
  /*
   * Release an icq. Called with ioc locked for blk-mq, and with both ioc
   * and queue locked for legacy.
   */
621032ad6   Tejun Heo   block: exit_io_co...
58
59
  static void ioc_destroy_icq(struct io_cq *icq)
  {
7e5a87944   Tejun Heo   block, cfq: move ...
60
61
62
63
64
  	struct io_context *ioc = icq->ioc;
  	struct request_queue *q = icq->q;
  	struct elevator_type *et = q->elevator->type;
  
  	lockdep_assert_held(&ioc->lock);
7e5a87944   Tejun Heo   block, cfq: move ...
65
66
67
68
69
70
71
72
73
74
  
  	radix_tree_delete(&ioc->icq_tree, icq->q->id);
  	hlist_del_init(&icq->ioc_node);
  	list_del_init(&icq->q_node);
  
  	/*
  	 * Both setting lookup hint to and clearing it from @icq are done
  	 * under queue_lock.  If it's not pointing to @icq now, it never
  	 * will.  Hint assignment itself can race safely.
  	 */
ec6c676a0   Paul E. McKenney   block: Substitute...
75
  	if (rcu_access_pointer(ioc->icq_hint) == icq)
7e5a87944   Tejun Heo   block, cfq: move ...
76
  		rcu_assign_pointer(ioc->icq_hint, NULL);
621032ad6   Tejun Heo   block: exit_io_co...
77
  	ioc_exit_icq(icq);
7e5a87944   Tejun Heo   block, cfq: move ...
78
79
80
81
82
83
84
85
  
  	/*
  	 * @icq->q might have gone away by the time RCU callback runs
  	 * making it impossible to determine icq_cache.  Record it in @icq.
  	 */
  	icq->__rcu_icq_cache = et->icq_cache;
  	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
  }
b2efa0526   Tejun Heo   block, cfq: unlin...
86
87
  /*
   * Slow path for ioc release in put_io_context().  Performs double-lock
c58698073   Tejun Heo   block, cfq: reorg...
88
   * dancing to unlink all icq's and then frees ioc.
b2efa0526   Tejun Heo   block, cfq: unlin...
89
90
   */
  static void ioc_release_fn(struct work_struct *work)
86db1e297   Jens Axboe   block: continue l...
91
  {
b2efa0526   Tejun Heo   block, cfq: unlin...
92
93
  	struct io_context *ioc = container_of(work, struct io_context,
  					      release_work);
d8c66c5d5   Tejun Heo   block: fix lockde...
94
  	unsigned long flags;
b2efa0526   Tejun Heo   block, cfq: unlin...
95

d8c66c5d5   Tejun Heo   block: fix lockde...
96
97
98
99
100
101
102
  	/*
  	 * Exiting icq may call into put_io_context() through elevator
  	 * which will trigger lockdep warning.  The ioc's are guaranteed to
  	 * be different, use a different locking subclass here.  Use
  	 * irqsave variant as there's no spin_lock_irq_nested().
  	 */
  	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
b2efa0526   Tejun Heo   block, cfq: unlin...
103

c58698073   Tejun Heo   block, cfq: reorg...
104
105
106
  	while (!hlist_empty(&ioc->icq_list)) {
  		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
  						struct io_cq, ioc_node);
2274b029f   Tejun Heo   block: simplify i...
107
108
109
  		struct request_queue *q = icq->q;
  
  		if (spin_trylock(q->queue_lock)) {
621032ad6   Tejun Heo   block: exit_io_co...
110
  			ioc_destroy_icq(icq);
2274b029f   Tejun Heo   block: simplify i...
111
112
113
114
115
  			spin_unlock(q->queue_lock);
  		} else {
  			spin_unlock_irqrestore(&ioc->lock, flags);
  			cpu_relax();
  			spin_lock_irqsave_nested(&ioc->lock, flags, 1);
b2efa0526   Tejun Heo   block, cfq: unlin...
116
  		}
b2efa0526   Tejun Heo   block, cfq: unlin...
117
  	}
ffc4e7595   Jens Axboe   cfq-iosched: add ...
118

2274b029f   Tejun Heo   block: simplify i...
119
  	spin_unlock_irqrestore(&ioc->lock, flags);
b2efa0526   Tejun Heo   block, cfq: unlin...
120
121
  
  	kmem_cache_free(iocontext_cachep, ioc);
86db1e297   Jens Axboe   block: continue l...
122
  }
42ec57a8f   Tejun Heo   block: misc ioc c...
123
124
125
126
127
  /**
   * put_io_context - put a reference of io_context
   * @ioc: io_context to put
   *
   * Decrement reference count of @ioc and release it if the count reaches
11a3122f6   Tejun Heo   block: strip out ...
128
   * zero.
86db1e297   Jens Axboe   block: continue l...
129
   */
11a3122f6   Tejun Heo   block: strip out ...
130
  void put_io_context(struct io_context *ioc)
86db1e297   Jens Axboe   block: continue l...
131
  {
b2efa0526   Tejun Heo   block, cfq: unlin...
132
  	unsigned long flags;
ff8c1474c   Xiaotian Feng   block: fix ioc le...
133
  	bool free_ioc = false;
b2efa0526   Tejun Heo   block, cfq: unlin...
134

86db1e297   Jens Axboe   block: continue l...
135
  	if (ioc == NULL)
42ec57a8f   Tejun Heo   block: misc ioc c...
136
  		return;
86db1e297   Jens Axboe   block: continue l...
137

42ec57a8f   Tejun Heo   block: misc ioc c...
138
  	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
86db1e297   Jens Axboe   block: continue l...
139

b2efa0526   Tejun Heo   block, cfq: unlin...
140
  	/*
11a3122f6   Tejun Heo   block: strip out ...
141
142
  	 * Releasing ioc requires reverse order double locking and we may
  	 * already be holding a queue_lock.  Do it asynchronously from wq.
b2efa0526   Tejun Heo   block, cfq: unlin...
143
  	 */
11a3122f6   Tejun Heo   block: strip out ...
144
145
146
  	if (atomic_long_dec_and_test(&ioc->refcount)) {
  		spin_lock_irqsave(&ioc->lock, flags);
  		if (!hlist_empty(&ioc->icq_list))
695588f94   Viresh Kumar   block: queue work...
147
148
  			queue_work(system_power_efficient_wq,
  					&ioc->release_work);
ff8c1474c   Xiaotian Feng   block: fix ioc le...
149
150
  		else
  			free_ioc = true;
11a3122f6   Tejun Heo   block: strip out ...
151
  		spin_unlock_irqrestore(&ioc->lock, flags);
b2efa0526   Tejun Heo   block, cfq: unlin...
152
  	}
ff8c1474c   Xiaotian Feng   block: fix ioc le...
153
154
155
  
  	if (free_ioc)
  		kmem_cache_free(iocontext_cachep, ioc);
86db1e297   Jens Axboe   block: continue l...
156
  }
b2efa0526   Tejun Heo   block, cfq: unlin...
157
  EXPORT_SYMBOL(put_io_context);
86db1e297   Jens Axboe   block: continue l...
158

f6e8d01be   Tejun Heo   block: add io_con...
159
160
161
162
163
164
165
166
  /**
   * put_io_context_active - put active reference on ioc
   * @ioc: ioc of interest
   *
   * Undo get_io_context_active().  If active reference reaches zero after
   * put, @ioc can never issue further IOs and ioscheds are notified.
   */
  void put_io_context_active(struct io_context *ioc)
86db1e297   Jens Axboe   block: continue l...
167
  {
3d492c2e0   Omar Sandoval   blk-mq-sched: don...
168
  	struct elevator_type *et;
621032ad6   Tejun Heo   block: exit_io_co...
169
  	unsigned long flags;
f6e8d01be   Tejun Heo   block: add io_con...
170
  	struct io_cq *icq;
86db1e297   Jens Axboe   block: continue l...
171

f6e8d01be   Tejun Heo   block: add io_con...
172
  	if (!atomic_dec_and_test(&ioc->active_ref)) {
621032ad6   Tejun Heo   block: exit_io_co...
173
174
175
176
177
178
179
180
181
182
183
  		put_io_context(ioc);
  		return;
  	}
  
  	/*
  	 * Need ioc lock to walk icq_list and q lock to exit icq.  Perform
  	 * reverse double locking.  Read comment in ioc_release_fn() for
  	 * explanation on the nested locking annotation.
  	 */
  retry:
  	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
b67bfe0d4   Sasha Levin   hlist: drop the n...
184
  	hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
621032ad6   Tejun Heo   block: exit_io_co...
185
186
  		if (icq->flags & ICQ_EXITED)
  			continue;
3d492c2e0   Omar Sandoval   blk-mq-sched: don...
187
188
189
  
  		et = icq->q->elevator->type;
  		if (et->uses_mq) {
621032ad6   Tejun Heo   block: exit_io_co...
190
  			ioc_exit_icq(icq);
621032ad6   Tejun Heo   block: exit_io_co...
191
  		} else {
3d492c2e0   Omar Sandoval   blk-mq-sched: don...
192
193
194
195
196
197
198
199
  			if (spin_trylock(icq->q->queue_lock)) {
  				ioc_exit_icq(icq);
  				spin_unlock(icq->q->queue_lock);
  			} else {
  				spin_unlock_irqrestore(&ioc->lock, flags);
  				cpu_relax();
  				goto retry;
  			}
621032ad6   Tejun Heo   block: exit_io_co...
200
201
202
  		}
  	}
  	spin_unlock_irqrestore(&ioc->lock, flags);
11a3122f6   Tejun Heo   block: strip out ...
203
  	put_io_context(ioc);
86db1e297   Jens Axboe   block: continue l...
204
  }
f6e8d01be   Tejun Heo   block: add io_con...
205
206
207
208
209
210
211
212
213
214
215
216
217
  /* Called by the exiting task */
  void exit_io_context(struct task_struct *task)
  {
  	struct io_context *ioc;
  
  	task_lock(task);
  	ioc = task->io_context;
  	task->io_context = NULL;
  	task_unlock(task);
  
  	atomic_dec(&ioc->nr_tasks);
  	put_io_context_active(ioc);
  }
7b36a7189   Jens Axboe   block: don't call...
218
219
220
221
222
223
224
225
226
227
228
229
230
231
  static void __ioc_clear_queue(struct list_head *icq_list)
  {
  	unsigned long flags;
  
  	while (!list_empty(icq_list)) {
  		struct io_cq *icq = list_entry(icq_list->next,
  					       struct io_cq, q_node);
  		struct io_context *ioc = icq->ioc;
  
  		spin_lock_irqsave(&ioc->lock, flags);
  		ioc_destroy_icq(icq);
  		spin_unlock_irqrestore(&ioc->lock, flags);
  	}
  }
7e5a87944   Tejun Heo   block, cfq: move ...
232
233
234
235
  /**
   * ioc_clear_queue - break any ioc association with the specified queue
   * @q: request_queue being cleared
   *
7b36a7189   Jens Axboe   block: don't call...
236
   * Walk @q->icq_list and exit all io_cq's.
7e5a87944   Tejun Heo   block, cfq: move ...
237
238
239
   */
  void ioc_clear_queue(struct request_queue *q)
  {
7b36a7189   Jens Axboe   block: don't call...
240
  	LIST_HEAD(icq_list);
7e5a87944   Tejun Heo   block, cfq: move ...
241

7b36a7189   Jens Axboe   block: don't call...
242
243
  	spin_lock_irq(q->queue_lock);
  	list_splice_init(&q->icq_list, &icq_list);
7e5a87944   Tejun Heo   block, cfq: move ...
244

7b36a7189   Jens Axboe   block: don't call...
245
246
247
248
249
250
  	if (q->mq_ops) {
  		spin_unlock_irq(q->queue_lock);
  		__ioc_clear_queue(&icq_list);
  	} else {
  		__ioc_clear_queue(&icq_list);
  		spin_unlock_irq(q->queue_lock);
7e5a87944   Tejun Heo   block, cfq: move ...
251
252
  	}
  }
24acfc34f   Tejun Heo   block: interface ...
253
  int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
86db1e297   Jens Axboe   block: continue l...
254
  {
df4156569   Paul Bolle   block: rename the...
255
  	struct io_context *ioc;
3c9c708c9   Eric Dumazet   block: avoid infi...
256
  	int ret;
86db1e297   Jens Axboe   block: continue l...
257

42ec57a8f   Tejun Heo   block: misc ioc c...
258
259
260
  	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
  				    node);
  	if (unlikely(!ioc))
24acfc34f   Tejun Heo   block: interface ...
261
  		return -ENOMEM;
42ec57a8f   Tejun Heo   block: misc ioc c...
262
263
264
  
  	/* initialize */
  	atomic_long_set(&ioc->refcount, 1);
4638a83e8   Olof Johansson   block: uninitiali...
265
  	atomic_set(&ioc->nr_tasks, 1);
f6e8d01be   Tejun Heo   block: add io_con...
266
  	atomic_set(&ioc->active_ref, 1);
42ec57a8f   Tejun Heo   block: misc ioc c...
267
  	spin_lock_init(&ioc->lock);
c58698073   Tejun Heo   block, cfq: reorg...
268
269
  	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
  	INIT_HLIST_HEAD(&ioc->icq_list);
b2efa0526   Tejun Heo   block, cfq: unlin...
270
  	INIT_WORK(&ioc->release_work, ioc_release_fn);
86db1e297   Jens Axboe   block: continue l...
271

fd6383681   Tejun Heo   block: an exiting...
272
273
274
275
276
277
278
  	/*
  	 * Try to install.  ioc shouldn't be installed if someone else
  	 * already did or @task, which isn't %current, is exiting.  Note
  	 * that we need to allow ioc creation on exiting %current as exit
  	 * path may issue IOs from e.g. exit_files().  The exit path is
  	 * responsible for not issuing IO after exit_io_context().
  	 */
6e736be7f   Tejun Heo   block: make ioc g...
279
  	task_lock(task);
fd6383681   Tejun Heo   block: an exiting...
280
281
  	if (!task->io_context &&
  	    (task == current || !(task->flags & PF_EXITING)))
6e736be7f   Tejun Heo   block: make ioc g...
282
  		task->io_context = ioc;
f2dbd76a0   Tejun Heo   block, cfq: repla...
283
  	else
6e736be7f   Tejun Heo   block: make ioc g...
284
  		kmem_cache_free(iocontext_cachep, ioc);
3c9c708c9   Eric Dumazet   block: avoid infi...
285
286
  
  	ret = task->io_context ? 0 : -EBUSY;
6e736be7f   Tejun Heo   block: make ioc g...
287
  	task_unlock(task);
24acfc34f   Tejun Heo   block: interface ...
288

3c9c708c9   Eric Dumazet   block: avoid infi...
289
  	return ret;
86db1e297   Jens Axboe   block: continue l...
290
  }
86db1e297   Jens Axboe   block: continue l...
291

6e736be7f   Tejun Heo   block: make ioc g...
292
293
294
295
296
297
298
299
300
  /**
   * get_task_io_context - get io_context of a task
   * @task: task of interest
   * @gfp_flags: allocation flags, used if allocation is necessary
   * @node: allocation node, used if allocation is necessary
   *
   * Return io_context of @task.  If it doesn't exist, it is created with
   * @gfp_flags and @node.  The returned io_context has its reference count
   * incremented.
86db1e297   Jens Axboe   block: continue l...
301
   *
6e736be7f   Tejun Heo   block: make ioc g...
302
   * This function always goes through task_lock() and it's better to use
f2dbd76a0   Tejun Heo   block, cfq: repla...
303
   * %current->io_context + get_io_context() for %current.
86db1e297   Jens Axboe   block: continue l...
304
   */
6e736be7f   Tejun Heo   block: make ioc g...
305
306
  struct io_context *get_task_io_context(struct task_struct *task,
  				       gfp_t gfp_flags, int node)
86db1e297   Jens Axboe   block: continue l...
307
  {
6e736be7f   Tejun Heo   block: make ioc g...
308
  	struct io_context *ioc;
86db1e297   Jens Axboe   block: continue l...
309

d0164adc8   Mel Gorman   mm, page_alloc: d...
310
  	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
6e736be7f   Tejun Heo   block: make ioc g...
311

f2dbd76a0   Tejun Heo   block, cfq: repla...
312
313
314
315
316
317
318
319
  	do {
  		task_lock(task);
  		ioc = task->io_context;
  		if (likely(ioc)) {
  			get_io_context(ioc);
  			task_unlock(task);
  			return ioc;
  		}
6e736be7f   Tejun Heo   block: make ioc g...
320
  		task_unlock(task);
24acfc34f   Tejun Heo   block: interface ...
321
  	} while (!create_task_io_context(task, gfp_flags, node));
6e736be7f   Tejun Heo   block: make ioc g...
322

f2dbd76a0   Tejun Heo   block, cfq: repla...
323
  	return NULL;
86db1e297   Jens Axboe   block: continue l...
324
  }
6e736be7f   Tejun Heo   block: make ioc g...
325
  EXPORT_SYMBOL(get_task_io_context);
86db1e297   Jens Axboe   block: continue l...
326

47fdd4ca9   Tejun Heo   block, cfq: move ...
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
  /**
   * ioc_lookup_icq - lookup io_cq from ioc
   * @ioc: the associated io_context
   * @q: the associated request_queue
   *
   * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
   * with @q->queue_lock held.
   */
  struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
  {
  	struct io_cq *icq;
  
  	lockdep_assert_held(q->queue_lock);
  
  	/*
  	 * icq's are indexed from @ioc using radix tree and hint pointer,
  	 * both of which are protected with RCU.  All removals are done
  	 * holding both q and ioc locks, and we're holding q lock - if we
  	 * find a icq which points to us, it's guaranteed to be valid.
  	 */
  	rcu_read_lock();
  	icq = rcu_dereference(ioc->icq_hint);
  	if (icq && icq->q == q)
  		goto out;
  
  	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
  	if (icq && icq->q == q)
  		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
  	else
  		icq = NULL;
  out:
  	rcu_read_unlock();
  	return icq;
  }
  EXPORT_SYMBOL(ioc_lookup_icq);
f1f8cc946   Tejun Heo   block, cfq: move ...
362
363
  /**
   * ioc_create_icq - create and link io_cq
24acfc34f   Tejun Heo   block: interface ...
364
   * @ioc: io_context of interest
f1f8cc946   Tejun Heo   block, cfq: move ...
365
366
367
   * @q: request_queue of interest
   * @gfp_mask: allocation mask
   *
24acfc34f   Tejun Heo   block: interface ...
368
369
   * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they
   * will be created using @gfp_mask.
f1f8cc946   Tejun Heo   block, cfq: move ...
370
371
372
373
   *
   * The caller is responsible for ensuring @ioc won't go away and @q is
   * alive and will stay alive until this function returns.
   */
24acfc34f   Tejun Heo   block: interface ...
374
375
  struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
  			     gfp_t gfp_mask)
f1f8cc946   Tejun Heo   block, cfq: move ...
376
377
  {
  	struct elevator_type *et = q->elevator->type;
f1f8cc946   Tejun Heo   block, cfq: move ...
378
379
380
  	struct io_cq *icq;
  
  	/* allocate stuff */
f1f8cc946   Tejun Heo   block, cfq: move ...
381
382
383
384
  	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
  				    q->node);
  	if (!icq)
  		return NULL;
5e4c0d974   Jan Kara   lib/radix-tree.c:...
385
  	if (radix_tree_maybe_preload(gfp_mask) < 0) {
f1f8cc946   Tejun Heo   block, cfq: move ...
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
  		kmem_cache_free(et->icq_cache, icq);
  		return NULL;
  	}
  
  	icq->ioc = ioc;
  	icq->q = q;
  	INIT_LIST_HEAD(&icq->q_node);
  	INIT_HLIST_NODE(&icq->ioc_node);
  
  	/* lock both q and ioc and try to link @icq */
  	spin_lock_irq(q->queue_lock);
  	spin_lock(&ioc->lock);
  
  	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
  		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
  		list_add(&icq->q_node, &q->icq_list);
bd166ef18   Jens Axboe   blk-mq-sched: add...
402
403
404
  		if (et->uses_mq && et->ops.mq.init_icq)
  			et->ops.mq.init_icq(icq);
  		else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn)
c51ca6cf5   Jens Axboe   block: move exist...
405
  			et->ops.sq.elevator_init_icq_fn(icq);
f1f8cc946   Tejun Heo   block, cfq: move ...
406
407
408
409
410
411
412
413
414
415
416
417
418
  	} else {
  		kmem_cache_free(et->icq_cache, icq);
  		icq = ioc_lookup_icq(ioc, q);
  		if (!icq)
  			printk(KERN_ERR "cfq: icq link failed!
  ");
  	}
  
  	spin_unlock(&ioc->lock);
  	spin_unlock_irq(q->queue_lock);
  	radix_tree_preload_end();
  	return icq;
  }
133415982   Adrian Bunk   make blk_ioc_init...
419
  static int __init blk_ioc_init(void)
86db1e297   Jens Axboe   block: continue l...
420
421
422
423
424
425
  {
  	iocontext_cachep = kmem_cache_create("blkdev_ioc",
  			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
  	return 0;
  }
  subsys_initcall(blk_ioc_init);