Blame view

block/blk-ioc.c 3.87 KB
86db1e297   Jens Axboe   block: continue l...
1
2
3
4
5
6
7
8
9
  /*
   * Functions related to io context handling
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/init.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
5a0e3ad6a   Tejun Heo   include cleanup: ...
10
  #include <linux/slab.h>
86db1e297   Jens Axboe   block: continue l...
11
12
13
14
15
16
17
18
19
20
  
  #include "blk.h"
  
  /*
   * For io context allocations
   */
  static struct kmem_cache *iocontext_cachep;
  
  static void cfq_dtor(struct io_context *ioc)
  {
ffc4e7595   Jens Axboe   cfq-iosched: add ...
21
22
23
24
25
26
27
  	if (!hlist_empty(&ioc->cic_list)) {
  		struct cfq_io_context *cic;
  
  		cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
  								cic_list);
  		cic->dtor(ioc);
  	}
86db1e297   Jens Axboe   block: continue l...
28
29
30
31
32
33
34
35
36
37
  }
  
  /*
   * IO Context helper functions. put_io_context() returns 1 if there are no
   * more users of this io context, 0 otherwise.
   */
  int put_io_context(struct io_context *ioc)
  {
  	if (ioc == NULL)
  		return 1;
d9c7d394a   Nikanth Karthikesan   block: prevent po...
38
  	BUG_ON(atomic_long_read(&ioc->refcount) == 0);
86db1e297   Jens Axboe   block: continue l...
39

d9c7d394a   Nikanth Karthikesan   block: prevent po...
40
  	if (atomic_long_dec_and_test(&ioc->refcount)) {
86db1e297   Jens Axboe   block: continue l...
41
  		rcu_read_lock();
86db1e297   Jens Axboe   block: continue l...
42
  		cfq_dtor(ioc);
07416d29b   Jens Axboe   cfq-iosched: fix ...
43
  		rcu_read_unlock();
86db1e297   Jens Axboe   block: continue l...
44
45
46
47
48
49
50
51
52
53
  
  		kmem_cache_free(iocontext_cachep, ioc);
  		return 1;
  	}
  	return 0;
  }
  EXPORT_SYMBOL(put_io_context);
  
  static void cfq_exit(struct io_context *ioc)
  {
86db1e297   Jens Axboe   block: continue l...
54
  	rcu_read_lock();
86db1e297   Jens Axboe   block: continue l...
55

ffc4e7595   Jens Axboe   cfq-iosched: add ...
56
57
58
59
60
61
62
63
  	if (!hlist_empty(&ioc->cic_list)) {
  		struct cfq_io_context *cic;
  
  		cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
  								cic_list);
  		cic->exit(ioc);
  	}
  	rcu_read_unlock();
86db1e297   Jens Axboe   block: continue l...
64
65
66
  }
  
  /* Called by the exitting task */
b69f22920   Louis Rilling   block: Fix io_con...
67
  void exit_io_context(struct task_struct *task)
86db1e297   Jens Axboe   block: continue l...
68
69
  {
  	struct io_context *ioc;
b69f22920   Louis Rilling   block: Fix io_con...
70
71
72
73
  	task_lock(task);
  	ioc = task->io_context;
  	task->io_context = NULL;
  	task_unlock(task);
86db1e297   Jens Axboe   block: continue l...
74
75
  
  	if (atomic_dec_and_test(&ioc->nr_tasks)) {
86db1e297   Jens Axboe   block: continue l...
76
  		cfq_exit(ioc);
86db1e297   Jens Axboe   block: continue l...
77
  	}
61cc74fbb   Louis Rilling   block: Fix io_con...
78
  	put_io_context(ioc);
86db1e297   Jens Axboe   block: continue l...
79
80
81
82
83
84
85
86
  }
  
  struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
  {
  	struct io_context *ret;
  
  	ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
  	if (ret) {
d9c7d394a   Nikanth Karthikesan   block: prevent po...
87
  		atomic_long_set(&ret->refcount, 1);
86db1e297   Jens Axboe   block: continue l...
88
89
90
91
  		atomic_set(&ret->nr_tasks, 1);
  		spin_lock_init(&ret->lock);
  		ret->ioprio_changed = 0;
  		ret->ioprio = 0;
4671a1322   Richard Kennedy   block: don't acce...
92
  		ret->last_waited = 0; /* doesn't matter... */
86db1e297   Jens Axboe   block: continue l...
93
  		ret->nr_batch_requests = 0; /* because this is 0 */
86db1e297   Jens Axboe   block: continue l...
94
  		INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
ffc4e7595   Jens Axboe   cfq-iosched: add ...
95
  		INIT_HLIST_HEAD(&ret->cic_list);
86db1e297   Jens Axboe   block: continue l...
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
  		ret->ioc_data = NULL;
  	}
  
  	return ret;
  }
  
  /*
   * If the current task has no IO context then create one and initialise it.
   * Otherwise, return its existing IO context.
   *
   * This returned IO context doesn't have a specifically elevated refcount,
   * but since the current task itself holds a reference, the context can be
   * used in general code, so long as it stays within `current` context.
   */
  struct io_context *current_io_context(gfp_t gfp_flags, int node)
  {
  	struct task_struct *tsk = current;
  	struct io_context *ret;
  
  	ret = tsk->io_context;
  	if (likely(ret))
  		return ret;
  
  	ret = alloc_io_context(gfp_flags, node);
  	if (ret) {
  		/* make sure set_task_ioprio() sees the settings above */
  		smp_wmb();
  		tsk->io_context = ret;
  	}
  
  	return ret;
  }
  
  /*
   * If the current task has no IO context then create one and initialise it.
   * If it does have a context, take a ref on it.
   *
   * This is always called in the context of the task which submitted the I/O.
   */
  struct io_context *get_io_context(gfp_t gfp_flags, int node)
  {
  	struct io_context *ret = NULL;
  
  	/*
  	 * Check for unlikely race with exiting task. ioc ref count is
  	 * zero when ioc is being detached.
  	 */
  	do {
  		ret = current_io_context(gfp_flags, node);
  		if (unlikely(!ret))
  			break;
d9c7d394a   Nikanth Karthikesan   block: prevent po...
147
  	} while (!atomic_long_inc_not_zero(&ret->refcount));
86db1e297   Jens Axboe   block: continue l...
148
149
150
151
152
153
154
155
156
157
158
  
  	return ret;
  }
  EXPORT_SYMBOL(get_io_context);
  
  void copy_io_context(struct io_context **pdst, struct io_context **psrc)
  {
  	struct io_context *src = *psrc;
  	struct io_context *dst = *pdst;
  
  	if (src) {
d9c7d394a   Nikanth Karthikesan   block: prevent po...
159
160
  		BUG_ON(atomic_long_read(&src->refcount) == 0);
  		atomic_long_inc(&src->refcount);
86db1e297   Jens Axboe   block: continue l...
161
162
163
164
165
  		put_io_context(dst);
  		*pdst = src;
  	}
  }
  EXPORT_SYMBOL(copy_io_context);
133415982   Adrian Bunk   make blk_ioc_init...
166
  static int __init blk_ioc_init(void)
86db1e297   Jens Axboe   block: continue l...
167
168
169
170
171
172
  {
  	iocontext_cachep = kmem_cache_create("blkdev_ioc",
  			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
  	return 0;
  }
  subsys_initcall(blk_ioc_init);