Blame view

block/blk-ioc.c 12.7 KB
86db1e297   Jens Axboe   block: continue l...
1
2
3
4
5
6
7
8
9
  /*
   * Functions related to io context handling
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/init.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
5a0e3ad6a   Tejun Heo   include cleanup: ...
10
  #include <linux/slab.h>
86db1e297   Jens Axboe   block: continue l...
11
12
13
14
15
16
17
  
  #include "blk.h"
  
  /*
   * For io context allocations
   */
  static struct kmem_cache *iocontext_cachep;
6e736be7f   Tejun Heo   block: make ioc g...
18
19
20
21
22
23
24
25
26
27
28
29
  /**
   * get_io_context - increment reference count to io_context
   * @ioc: io_context to get
   *
   * Increment reference count to @ioc.
   */
  void get_io_context(struct io_context *ioc)
  {
  	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
  	atomic_long_inc(&ioc->refcount);
  }
  EXPORT_SYMBOL(get_io_context);
b2efa0526   Tejun Heo   block, cfq: unlin...
30
31
32
33
34
35
36
37
38
39
40
41
42
43
  /*
   * Releasing ioc may nest into another put_io_context() leading to nested
   * fast path release.  As the ioc's can't be the same, this is okay but
   * makes lockdep whine.  Keep track of nesting and use it as subclass.
   */
  #ifdef CONFIG_LOCKDEP
  #define ioc_release_depth(q)		((q) ? (q)->ioc_release_depth : 0)
  #define ioc_release_depth_inc(q)	(q)->ioc_release_depth++
  #define ioc_release_depth_dec(q)	(q)->ioc_release_depth--
  #else
  #define ioc_release_depth(q)		0
  #define ioc_release_depth_inc(q)	do { } while (0)
  #define ioc_release_depth_dec(q)	do { } while (0)
  #endif
7e5a87944   Tejun Heo   block, cfq: move ...
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
  static void icq_free_icq_rcu(struct rcu_head *head)
  {
  	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
  
  	kmem_cache_free(icq->__rcu_icq_cache, icq);
  }
  
  /*
   * Exit and free an icq.  Called with both ioc and q locked.
   */
  static void ioc_exit_icq(struct io_cq *icq)
  {
  	struct io_context *ioc = icq->ioc;
  	struct request_queue *q = icq->q;
  	struct elevator_type *et = q->elevator->type;
  
  	lockdep_assert_held(&ioc->lock);
  	lockdep_assert_held(q->queue_lock);
  
  	radix_tree_delete(&ioc->icq_tree, icq->q->id);
  	hlist_del_init(&icq->ioc_node);
  	list_del_init(&icq->q_node);
  
  	/*
  	 * Both setting lookup hint to and clearing it from @icq are done
  	 * under queue_lock.  If it's not pointing to @icq now, it never
  	 * will.  Hint assignment itself can race safely.
  	 */
  	if (rcu_dereference_raw(ioc->icq_hint) == icq)
  		rcu_assign_pointer(ioc->icq_hint, NULL);
  
  	if (et->ops.elevator_exit_icq_fn) {
  		ioc_release_depth_inc(q);
  		et->ops.elevator_exit_icq_fn(icq);
  		ioc_release_depth_dec(q);
  	}
  
  	/*
  	 * @icq->q might have gone away by the time RCU callback runs
  	 * making it impossible to determine icq_cache.  Record it in @icq.
  	 */
  	icq->__rcu_icq_cache = et->icq_cache;
  	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
  }
b2efa0526   Tejun Heo   block, cfq: unlin...
88
89
  /*
   * Slow path for ioc release in put_io_context().  Performs double-lock
c58698073   Tejun Heo   block, cfq: reorg...
90
   * dancing to unlink all icq's and then frees ioc.
b2efa0526   Tejun Heo   block, cfq: unlin...
91
92
   */
  static void ioc_release_fn(struct work_struct *work)
86db1e297   Jens Axboe   block: continue l...
93
  {
b2efa0526   Tejun Heo   block, cfq: unlin...
94
95
96
97
98
  	struct io_context *ioc = container_of(work, struct io_context,
  					      release_work);
  	struct request_queue *last_q = NULL;
  
  	spin_lock_irq(&ioc->lock);
c58698073   Tejun Heo   block, cfq: reorg...
99
100
101
102
  	while (!hlist_empty(&ioc->icq_list)) {
  		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
  						struct io_cq, ioc_node);
  		struct request_queue *this_q = icq->q;
b2efa0526   Tejun Heo   block, cfq: unlin...
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
  
  		if (this_q != last_q) {
  			/*
  			 * Need to switch to @this_q.  Once we release
  			 * @ioc->lock, it can go away along with @cic.
  			 * Hold on to it.
  			 */
  			__blk_get_queue(this_q);
  
  			/*
  			 * blk_put_queue() might sleep thanks to kobject
  			 * idiocy.  Always release both locks, put and
  			 * restart.
  			 */
  			if (last_q) {
  				spin_unlock(last_q->queue_lock);
  				spin_unlock_irq(&ioc->lock);
  				blk_put_queue(last_q);
  			} else {
  				spin_unlock_irq(&ioc->lock);
  			}
  
  			last_q = this_q;
  			spin_lock_irq(this_q->queue_lock);
  			spin_lock(&ioc->lock);
  			continue;
  		}
7e5a87944   Tejun Heo   block, cfq: move ...
130
  		ioc_exit_icq(icq);
b2efa0526   Tejun Heo   block, cfq: unlin...
131
  	}
ffc4e7595   Jens Axboe   cfq-iosched: add ...
132

b2efa0526   Tejun Heo   block, cfq: unlin...
133
134
135
136
137
138
  	if (last_q) {
  		spin_unlock(last_q->queue_lock);
  		spin_unlock_irq(&ioc->lock);
  		blk_put_queue(last_q);
  	} else {
  		spin_unlock_irq(&ioc->lock);
ffc4e7595   Jens Axboe   cfq-iosched: add ...
139
  	}
b2efa0526   Tejun Heo   block, cfq: unlin...
140
141
  
  	kmem_cache_free(iocontext_cachep, ioc);
86db1e297   Jens Axboe   block: continue l...
142
  }
42ec57a8f   Tejun Heo   block: misc ioc c...
143
144
145
  /**
   * put_io_context - put a reference of io_context
   * @ioc: io_context to put
b2efa0526   Tejun Heo   block, cfq: unlin...
146
   * @locked_q: request_queue the caller is holding queue_lock of (hint)
42ec57a8f   Tejun Heo   block: misc ioc c...
147
148
   *
   * Decrement reference count of @ioc and release it if the count reaches
b2efa0526   Tejun Heo   block, cfq: unlin...
149
150
151
   * zero.  If the caller is holding queue_lock of a queue, it can indicate
   * that with @locked_q.  This is an optimization hint and the caller is
   * allowed to pass in %NULL even when it's holding a queue_lock.
86db1e297   Jens Axboe   block: continue l...
152
   */
b2efa0526   Tejun Heo   block, cfq: unlin...
153
  void put_io_context(struct io_context *ioc, struct request_queue *locked_q)
86db1e297   Jens Axboe   block: continue l...
154
  {
b2efa0526   Tejun Heo   block, cfq: unlin...
155
156
  	struct request_queue *last_q = locked_q;
  	unsigned long flags;
86db1e297   Jens Axboe   block: continue l...
157
  	if (ioc == NULL)
42ec57a8f   Tejun Heo   block: misc ioc c...
158
  		return;
86db1e297   Jens Axboe   block: continue l...
159

42ec57a8f   Tejun Heo   block: misc ioc c...
160
  	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
b2efa0526   Tejun Heo   block, cfq: unlin...
161
162
  	if (locked_q)
  		lockdep_assert_held(locked_q->queue_lock);
86db1e297   Jens Axboe   block: continue l...
163

42ec57a8f   Tejun Heo   block: misc ioc c...
164
165
  	if (!atomic_long_dec_and_test(&ioc->refcount))
  		return;
86db1e297   Jens Axboe   block: continue l...
166

b2efa0526   Tejun Heo   block, cfq: unlin...
167
  	/*
c58698073   Tejun Heo   block, cfq: reorg...
168
  	 * Destroy @ioc.  This is a bit messy because icq's are chained
b2efa0526   Tejun Heo   block, cfq: unlin...
169
  	 * from both ioc and queue, and ioc->lock nests inside queue_lock.
c58698073   Tejun Heo   block, cfq: reorg...
170
171
  	 * The inner ioc->lock should be held to walk our icq_list and then
  	 * for each icq the outer matching queue_lock should be grabbed.
b2efa0526   Tejun Heo   block, cfq: unlin...
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
  	 * ie. We need to do reverse-order double lock dancing.
  	 *
  	 * Another twist is that we are often called with one of the
  	 * matching queue_locks held as indicated by @locked_q, which
  	 * prevents performing double-lock dance for other queues.
  	 *
  	 * So, we do it in two stages.  The fast path uses the queue_lock
  	 * the caller is holding and, if other queues need to be accessed,
  	 * uses trylock to avoid introducing locking dependency.  This can
  	 * handle most cases, especially if @ioc was performing IO on only
  	 * single device.
  	 *
  	 * If trylock doesn't cut it, we defer to @ioc->release_work which
  	 * can do all the double-locking dancing.
  	 */
  	spin_lock_irqsave_nested(&ioc->lock, flags,
  				 ioc_release_depth(locked_q));
c58698073   Tejun Heo   block, cfq: reorg...
189
190
191
192
  	while (!hlist_empty(&ioc->icq_list)) {
  		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
  						struct io_cq, ioc_node);
  		struct request_queue *this_q = icq->q;
b2efa0526   Tejun Heo   block, cfq: unlin...
193
194
195
196
197
198
199
200
201
202
203
  
  		if (this_q != last_q) {
  			if (last_q && last_q != locked_q)
  				spin_unlock(last_q->queue_lock);
  			last_q = NULL;
  
  			if (!spin_trylock(this_q->queue_lock))
  				break;
  			last_q = this_q;
  			continue;
  		}
7e5a87944   Tejun Heo   block, cfq: move ...
204
  		ioc_exit_icq(icq);
b2efa0526   Tejun Heo   block, cfq: unlin...
205
  	}
86db1e297   Jens Axboe   block: continue l...
206

b2efa0526   Tejun Heo   block, cfq: unlin...
207
208
  	if (last_q && last_q != locked_q)
  		spin_unlock(last_q->queue_lock);
86db1e297   Jens Axboe   block: continue l...
209

b2efa0526   Tejun Heo   block, cfq: unlin...
210
  	spin_unlock_irqrestore(&ioc->lock, flags);
ffc4e7595   Jens Axboe   cfq-iosched: add ...
211

c58698073   Tejun Heo   block, cfq: reorg...
212
213
  	/* if no icq is left, we're done; otherwise, kick release_work */
  	if (hlist_empty(&ioc->icq_list))
b2efa0526   Tejun Heo   block, cfq: unlin...
214
215
216
  		kmem_cache_free(iocontext_cachep, ioc);
  	else
  		schedule_work(&ioc->release_work);
86db1e297   Jens Axboe   block: continue l...
217
  }
b2efa0526   Tejun Heo   block, cfq: unlin...
218
  EXPORT_SYMBOL(put_io_context);
86db1e297   Jens Axboe   block: continue l...
219

27667c996   Bart Van Assche   block: Clean up e...
220
  /* Called by the exiting task */
b69f22920   Louis Rilling   block: Fix io_con...
221
  void exit_io_context(struct task_struct *task)
86db1e297   Jens Axboe   block: continue l...
222
223
  {
  	struct io_context *ioc;
b69f22920   Louis Rilling   block: Fix io_con...
224
225
226
227
  	task_lock(task);
  	ioc = task->io_context;
  	task->io_context = NULL;
  	task_unlock(task);
86db1e297   Jens Axboe   block: continue l...
228

b2efa0526   Tejun Heo   block, cfq: unlin...
229
230
  	atomic_dec(&ioc->nr_tasks);
  	put_io_context(ioc, NULL);
86db1e297   Jens Axboe   block: continue l...
231
  }
7e5a87944   Tejun Heo   block, cfq: move ...
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
  /**
   * ioc_clear_queue - break any ioc association with the specified queue
   * @q: request_queue being cleared
   *
   * Walk @q->icq_list and exit all io_cq's.  Must be called with @q locked.
   */
  void ioc_clear_queue(struct request_queue *q)
  {
  	lockdep_assert_held(q->queue_lock);
  
  	while (!list_empty(&q->icq_list)) {
  		struct io_cq *icq = list_entry(q->icq_list.next,
  					       struct io_cq, q_node);
  		struct io_context *ioc = icq->ioc;
  
  		spin_lock(&ioc->lock);
  		ioc_exit_icq(icq);
  		spin_unlock(&ioc->lock);
  	}
  }
f2dbd76a0   Tejun Heo   block, cfq: repla...
252
253
  void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
  				int node)
86db1e297   Jens Axboe   block: continue l...
254
  {
df4156569   Paul Bolle   block: rename the...
255
  	struct io_context *ioc;
86db1e297   Jens Axboe   block: continue l...
256

42ec57a8f   Tejun Heo   block: misc ioc c...
257
258
259
  	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
  				    node);
  	if (unlikely(!ioc))
f2dbd76a0   Tejun Heo   block, cfq: repla...
260
  		return;
42ec57a8f   Tejun Heo   block: misc ioc c...
261
262
263
264
265
  
  	/* initialize */
  	atomic_long_set(&ioc->refcount, 1);
  	atomic_set(&ioc->nr_tasks, 1);
  	spin_lock_init(&ioc->lock);
c58698073   Tejun Heo   block, cfq: reorg...
266
267
  	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
  	INIT_HLIST_HEAD(&ioc->icq_list);
b2efa0526   Tejun Heo   block, cfq: unlin...
268
  	INIT_WORK(&ioc->release_work, ioc_release_fn);
86db1e297   Jens Axboe   block: continue l...
269

fd6383681   Tejun Heo   block: an exiting...
270
271
272
273
274
275
276
  	/*
  	 * Try to install.  ioc shouldn't be installed if someone else
  	 * already did or @task, which isn't %current, is exiting.  Note
  	 * that we need to allow ioc creation on exiting %current as exit
  	 * path may issue IOs from e.g. exit_files().  The exit path is
  	 * responsible for not issuing IO after exit_io_context().
  	 */
6e736be7f   Tejun Heo   block: make ioc g...
277
  	task_lock(task);
fd6383681   Tejun Heo   block: an exiting...
278
279
  	if (!task->io_context &&
  	    (task == current || !(task->flags & PF_EXITING)))
6e736be7f   Tejun Heo   block: make ioc g...
280
  		task->io_context = ioc;
f2dbd76a0   Tejun Heo   block, cfq: repla...
281
  	else
6e736be7f   Tejun Heo   block: make ioc g...
282
  		kmem_cache_free(iocontext_cachep, ioc);
6e736be7f   Tejun Heo   block: make ioc g...
283
  	task_unlock(task);
86db1e297   Jens Axboe   block: continue l...
284
  }
86db1e297   Jens Axboe   block: continue l...
285

6e736be7f   Tejun Heo   block: make ioc g...
286
287
288
289
290
291
292
293
294
  /**
   * get_task_io_context - get io_context of a task
   * @task: task of interest
   * @gfp_flags: allocation flags, used if allocation is necessary
   * @node: allocation node, used if allocation is necessary
   *
   * Return io_context of @task.  If it doesn't exist, it is created with
   * @gfp_flags and @node.  The returned io_context has its reference count
   * incremented.
86db1e297   Jens Axboe   block: continue l...
295
   *
6e736be7f   Tejun Heo   block: make ioc g...
296
   * This function always goes through task_lock() and it's better to use
f2dbd76a0   Tejun Heo   block, cfq: repla...
297
   * %current->io_context + get_io_context() for %current.
86db1e297   Jens Axboe   block: continue l...
298
   */
6e736be7f   Tejun Heo   block: make ioc g...
299
300
  struct io_context *get_task_io_context(struct task_struct *task,
  				       gfp_t gfp_flags, int node)
86db1e297   Jens Axboe   block: continue l...
301
  {
6e736be7f   Tejun Heo   block: make ioc g...
302
  	struct io_context *ioc;
86db1e297   Jens Axboe   block: continue l...
303

6e736be7f   Tejun Heo   block: make ioc g...
304
  	might_sleep_if(gfp_flags & __GFP_WAIT);
f2dbd76a0   Tejun Heo   block, cfq: repla...
305
306
307
308
309
310
311
312
  	do {
  		task_lock(task);
  		ioc = task->io_context;
  		if (likely(ioc)) {
  			get_io_context(ioc);
  			task_unlock(task);
  			return ioc;
  		}
6e736be7f   Tejun Heo   block: make ioc g...
313
  		task_unlock(task);
f2dbd76a0   Tejun Heo   block, cfq: repla...
314
  	} while (create_io_context(task, gfp_flags, node));
6e736be7f   Tejun Heo   block: make ioc g...
315

f2dbd76a0   Tejun Heo   block, cfq: repla...
316
  	return NULL;
86db1e297   Jens Axboe   block: continue l...
317
  }
6e736be7f   Tejun Heo   block: make ioc g...
318
  EXPORT_SYMBOL(get_task_io_context);
86db1e297   Jens Axboe   block: continue l...
319

47fdd4ca9   Tejun Heo   block, cfq: move ...
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
  /**
   * ioc_lookup_icq - lookup io_cq from ioc
   * @ioc: the associated io_context
   * @q: the associated request_queue
   *
   * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
   * with @q->queue_lock held.
   */
  struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
  {
  	struct io_cq *icq;
  
  	lockdep_assert_held(q->queue_lock);
  
  	/*
  	 * icq's are indexed from @ioc using radix tree and hint pointer,
  	 * both of which are protected with RCU.  All removals are done
  	 * holding both q and ioc locks, and we're holding q lock - if we
  	 * find a icq which points to us, it's guaranteed to be valid.
  	 */
  	rcu_read_lock();
  	icq = rcu_dereference(ioc->icq_hint);
  	if (icq && icq->q == q)
  		goto out;
  
  	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
  	if (icq && icq->q == q)
  		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
  	else
  		icq = NULL;
  out:
  	rcu_read_unlock();
  	return icq;
  }
  EXPORT_SYMBOL(ioc_lookup_icq);
f1f8cc946   Tejun Heo   block, cfq: move ...
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
  /**
   * ioc_create_icq - create and link io_cq
   * @q: request_queue of interest
   * @gfp_mask: allocation mask
   *
   * Make sure io_cq linking %current->io_context and @q exists.  If either
   * io_context and/or icq don't exist, they will be created using @gfp_mask.
   *
   * The caller is responsible for ensuring @ioc won't go away and @q is
   * alive and will stay alive until this function returns.
   */
  struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
  {
  	struct elevator_type *et = q->elevator->type;
  	struct io_context *ioc;
  	struct io_cq *icq;
  
  	/* allocate stuff */
  	ioc = create_io_context(current, gfp_mask, q->node);
  	if (!ioc)
  		return NULL;
  
  	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
  				    q->node);
  	if (!icq)
  		return NULL;
  
  	if (radix_tree_preload(gfp_mask) < 0) {
  		kmem_cache_free(et->icq_cache, icq);
  		return NULL;
  	}
  
  	icq->ioc = ioc;
  	icq->q = q;
  	INIT_LIST_HEAD(&icq->q_node);
  	INIT_HLIST_NODE(&icq->ioc_node);
  
  	/* lock both q and ioc and try to link @icq */
  	spin_lock_irq(q->queue_lock);
  	spin_lock(&ioc->lock);
  
  	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
  		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
  		list_add(&icq->q_node, &q->icq_list);
  		if (et->ops.elevator_init_icq_fn)
  			et->ops.elevator_init_icq_fn(icq);
  	} else {
  		kmem_cache_free(et->icq_cache, icq);
  		icq = ioc_lookup_icq(ioc, q);
  		if (!icq)
  			printk(KERN_ERR "cfq: icq link failed!
  ");
  	}
  
  	spin_unlock(&ioc->lock);
  	spin_unlock_irq(q->queue_lock);
  	radix_tree_preload_end();
  	return icq;
  }
dc86900e0   Tejun Heo   block, cfq: move ...
414
415
  void ioc_set_changed(struct io_context *ioc, int which)
  {
c58698073   Tejun Heo   block, cfq: reorg...
416
  	struct io_cq *icq;
dc86900e0   Tejun Heo   block, cfq: move ...
417
  	struct hlist_node *n;
c58698073   Tejun Heo   block, cfq: reorg...
418
419
  	hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
  		set_bit(which, &icq->changed);
dc86900e0   Tejun Heo   block, cfq: move ...
420
421
422
423
424
425
426
  }
  
  /**
   * ioc_ioprio_changed - notify ioprio change
   * @ioc: io_context of interest
   * @ioprio: new ioprio
   *
c58698073   Tejun Heo   block, cfq: reorg...
427
428
   * @ioc's ioprio has changed to @ioprio.  Set %ICQ_IOPRIO_CHANGED for all
   * icq's.  iosched is responsible for checking the bit and applying it on
dc86900e0   Tejun Heo   block, cfq: move ...
429
430
431
432
433
434
435
436
   * request issue path.
   */
  void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
  {
  	unsigned long flags;
  
  	spin_lock_irqsave(&ioc->lock, flags);
  	ioc->ioprio = ioprio;
c58698073   Tejun Heo   block, cfq: reorg...
437
  	ioc_set_changed(ioc, ICQ_IOPRIO_CHANGED);
dc86900e0   Tejun Heo   block, cfq: move ...
438
439
440
441
442
443
444
  	spin_unlock_irqrestore(&ioc->lock, flags);
  }
  
  /**
   * ioc_cgroup_changed - notify cgroup change
   * @ioc: io_context of interest
   *
c58698073   Tejun Heo   block, cfq: reorg...
445
   * @ioc's cgroup has changed.  Set %ICQ_CGROUP_CHANGED for all icq's.
dc86900e0   Tejun Heo   block, cfq: move ...
446
447
448
449
450
451
452
453
   * iosched is responsible for checking the bit and applying it on request
   * issue path.
   */
  void ioc_cgroup_changed(struct io_context *ioc)
  {
  	unsigned long flags;
  
  	spin_lock_irqsave(&ioc->lock, flags);
c58698073   Tejun Heo   block, cfq: reorg...
454
  	ioc_set_changed(ioc, ICQ_CGROUP_CHANGED);
dc86900e0   Tejun Heo   block, cfq: move ...
455
456
  	spin_unlock_irqrestore(&ioc->lock, flags);
  }
64c42998f   Jens Axboe   block: ioc_cgroup...
457
  EXPORT_SYMBOL(ioc_cgroup_changed);
dc86900e0   Tejun Heo   block, cfq: move ...
458

133415982   Adrian Bunk   make blk_ioc_init...
459
  static int __init blk_ioc_init(void)
86db1e297   Jens Axboe   block: continue l...
460
461
462
463
464
465
  {
  	iocontext_cachep = kmem_cache_create("blkdev_ioc",
  			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
  	return 0;
  }
  subsys_initcall(blk_ioc_init);