Commit a3c3cac5d31879cd9ae2de7874dc6544ca704aec
1 parent
774d5f14ee
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
SUNRPC: Prevent an rpc_task wakeup race
The lockless RPC_IS_QUEUED() test in __rpc_execute means that we need to be careful about ordering the calls to rpc_test_and_set_running(task) and rpc_clear_queued(task). If we get the order wrong, then we may end up testing the RPC_TASK_RUNNING flag after __rpc_execute() has looped and changed the state of the rpc_task. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com> Cc: stable@vger.kernel.org
Showing 1 changed file with 7 additions and 1 deletions Inline Diff
net/sunrpc/sched.c
1 | /* | 1 | /* |
2 | * linux/net/sunrpc/sched.c | 2 | * linux/net/sunrpc/sched.c |
3 | * | 3 | * |
4 | * Scheduling for synchronous and asynchronous RPC requests. | 4 | * Scheduling for synchronous and asynchronous RPC requests. |
5 | * | 5 | * |
6 | * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> | 6 | * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> |
7 | * | 7 | * |
8 | * TCP NFS related read + write fixes | 8 | * TCP NFS related read + write fixes |
9 | * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> | 9 | * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | 13 | ||
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/mempool.h> | 17 | #include <linux/mempool.h> |
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | #include <linux/spinlock.h> | 19 | #include <linux/spinlock.h> |
20 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
21 | #include <linux/freezer.h> | 21 | #include <linux/freezer.h> |
22 | 22 | ||
23 | #include <linux/sunrpc/clnt.h> | 23 | #include <linux/sunrpc/clnt.h> |
24 | 24 | ||
25 | #include "sunrpc.h" | 25 | #include "sunrpc.h" |
26 | 26 | ||
27 | #ifdef RPC_DEBUG | 27 | #ifdef RPC_DEBUG |
28 | #define RPCDBG_FACILITY RPCDBG_SCHED | 28 | #define RPCDBG_FACILITY RPCDBG_SCHED |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | #define CREATE_TRACE_POINTS | 31 | #define CREATE_TRACE_POINTS |
32 | #include <trace/events/sunrpc.h> | 32 | #include <trace/events/sunrpc.h> |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * RPC slabs and memory pools | 35 | * RPC slabs and memory pools |
36 | */ | 36 | */ |
37 | #define RPC_BUFFER_MAXSIZE (2048) | 37 | #define RPC_BUFFER_MAXSIZE (2048) |
38 | #define RPC_BUFFER_POOLSIZE (8) | 38 | #define RPC_BUFFER_POOLSIZE (8) |
39 | #define RPC_TASK_POOLSIZE (8) | 39 | #define RPC_TASK_POOLSIZE (8) |
40 | static struct kmem_cache *rpc_task_slabp __read_mostly; | 40 | static struct kmem_cache *rpc_task_slabp __read_mostly; |
41 | static struct kmem_cache *rpc_buffer_slabp __read_mostly; | 41 | static struct kmem_cache *rpc_buffer_slabp __read_mostly; |
42 | static mempool_t *rpc_task_mempool __read_mostly; | 42 | static mempool_t *rpc_task_mempool __read_mostly; |
43 | static mempool_t *rpc_buffer_mempool __read_mostly; | 43 | static mempool_t *rpc_buffer_mempool __read_mostly; |
44 | 44 | ||
45 | static void rpc_async_schedule(struct work_struct *); | 45 | static void rpc_async_schedule(struct work_struct *); |
46 | static void rpc_release_task(struct rpc_task *task); | 46 | static void rpc_release_task(struct rpc_task *task); |
47 | static void __rpc_queue_timer_fn(unsigned long ptr); | 47 | static void __rpc_queue_timer_fn(unsigned long ptr); |
48 | 48 | ||
49 | /* | 49 | /* |
50 | * RPC tasks sit here while waiting for conditions to improve. | 50 | * RPC tasks sit here while waiting for conditions to improve. |
51 | */ | 51 | */ |
52 | static struct rpc_wait_queue delay_queue; | 52 | static struct rpc_wait_queue delay_queue; |
53 | 53 | ||
54 | /* | 54 | /* |
55 | * rpciod-related stuff | 55 | * rpciod-related stuff |
56 | */ | 56 | */ |
57 | struct workqueue_struct *rpciod_workqueue; | 57 | struct workqueue_struct *rpciod_workqueue; |
58 | 58 | ||
59 | /* | 59 | /* |
60 | * Disable the timer for a given RPC task. Should be called with | 60 | * Disable the timer for a given RPC task. Should be called with |
61 | * queue->lock and bh_disabled in order to avoid races within | 61 | * queue->lock and bh_disabled in order to avoid races within |
62 | * rpc_run_timer(). | 62 | * rpc_run_timer(). |
63 | */ | 63 | */ |
64 | static void | 64 | static void |
65 | __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) | 65 | __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) |
66 | { | 66 | { |
67 | if (task->tk_timeout == 0) | 67 | if (task->tk_timeout == 0) |
68 | return; | 68 | return; |
69 | dprintk("RPC: %5u disabling timer\n", task->tk_pid); | 69 | dprintk("RPC: %5u disabling timer\n", task->tk_pid); |
70 | task->tk_timeout = 0; | 70 | task->tk_timeout = 0; |
71 | list_del(&task->u.tk_wait.timer_list); | 71 | list_del(&task->u.tk_wait.timer_list); |
72 | if (list_empty(&queue->timer_list.list)) | 72 | if (list_empty(&queue->timer_list.list)) |
73 | del_timer(&queue->timer_list.timer); | 73 | del_timer(&queue->timer_list.timer); |
74 | } | 74 | } |
75 | 75 | ||
76 | static void | 76 | static void |
77 | rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) | 77 | rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) |
78 | { | 78 | { |
79 | queue->timer_list.expires = expires; | 79 | queue->timer_list.expires = expires; |
80 | mod_timer(&queue->timer_list.timer, expires); | 80 | mod_timer(&queue->timer_list.timer, expires); |
81 | } | 81 | } |
82 | 82 | ||
83 | /* | 83 | /* |
84 | * Set up a timer for the current task. | 84 | * Set up a timer for the current task. |
85 | */ | 85 | */ |
86 | static void | 86 | static void |
87 | __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) | 87 | __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) |
88 | { | 88 | { |
89 | if (!task->tk_timeout) | 89 | if (!task->tk_timeout) |
90 | return; | 90 | return; |
91 | 91 | ||
92 | dprintk("RPC: %5u setting alarm for %lu ms\n", | 92 | dprintk("RPC: %5u setting alarm for %lu ms\n", |
93 | task->tk_pid, task->tk_timeout * 1000 / HZ); | 93 | task->tk_pid, task->tk_timeout * 1000 / HZ); |
94 | 94 | ||
95 | task->u.tk_wait.expires = jiffies + task->tk_timeout; | 95 | task->u.tk_wait.expires = jiffies + task->tk_timeout; |
96 | if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires)) | 96 | if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires)) |
97 | rpc_set_queue_timer(queue, task->u.tk_wait.expires); | 97 | rpc_set_queue_timer(queue, task->u.tk_wait.expires); |
98 | list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); | 98 | list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); |
99 | } | 99 | } |
100 | 100 | ||
101 | static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue) | 101 | static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue) |
102 | { | 102 | { |
103 | struct list_head *q = &queue->tasks[queue->priority]; | 103 | struct list_head *q = &queue->tasks[queue->priority]; |
104 | struct rpc_task *task; | 104 | struct rpc_task *task; |
105 | 105 | ||
106 | if (!list_empty(q)) { | 106 | if (!list_empty(q)) { |
107 | task = list_first_entry(q, struct rpc_task, u.tk_wait.list); | 107 | task = list_first_entry(q, struct rpc_task, u.tk_wait.list); |
108 | if (task->tk_owner == queue->owner) | 108 | if (task->tk_owner == queue->owner) |
109 | list_move_tail(&task->u.tk_wait.list, q); | 109 | list_move_tail(&task->u.tk_wait.list, q); |
110 | } | 110 | } |
111 | } | 111 | } |
112 | 112 | ||
113 | static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) | 113 | static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) |
114 | { | 114 | { |
115 | if (queue->priority != priority) { | 115 | if (queue->priority != priority) { |
116 | /* Fairness: rotate the list when changing priority */ | 116 | /* Fairness: rotate the list when changing priority */ |
117 | rpc_rotate_queue_owner(queue); | 117 | rpc_rotate_queue_owner(queue); |
118 | queue->priority = priority; | 118 | queue->priority = priority; |
119 | } | 119 | } |
120 | } | 120 | } |
121 | 121 | ||
122 | static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) | 122 | static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) |
123 | { | 123 | { |
124 | queue->owner = pid; | 124 | queue->owner = pid; |
125 | queue->nr = RPC_BATCH_COUNT; | 125 | queue->nr = RPC_BATCH_COUNT; |
126 | } | 126 | } |
127 | 127 | ||
128 | static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) | 128 | static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) |
129 | { | 129 | { |
130 | rpc_set_waitqueue_priority(queue, queue->maxpriority); | 130 | rpc_set_waitqueue_priority(queue, queue->maxpriority); |
131 | rpc_set_waitqueue_owner(queue, 0); | 131 | rpc_set_waitqueue_owner(queue, 0); |
132 | } | 132 | } |
133 | 133 | ||
134 | /* | 134 | /* |
135 | * Add new request to a priority queue. | 135 | * Add new request to a priority queue. |
136 | */ | 136 | */ |
137 | static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, | 137 | static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, |
138 | struct rpc_task *task, | 138 | struct rpc_task *task, |
139 | unsigned char queue_priority) | 139 | unsigned char queue_priority) |
140 | { | 140 | { |
141 | struct list_head *q; | 141 | struct list_head *q; |
142 | struct rpc_task *t; | 142 | struct rpc_task *t; |
143 | 143 | ||
144 | INIT_LIST_HEAD(&task->u.tk_wait.links); | 144 | INIT_LIST_HEAD(&task->u.tk_wait.links); |
145 | if (unlikely(queue_priority > queue->maxpriority)) | 145 | if (unlikely(queue_priority > queue->maxpriority)) |
146 | queue_priority = queue->maxpriority; | 146 | queue_priority = queue->maxpriority; |
147 | if (queue_priority > queue->priority) | 147 | if (queue_priority > queue->priority) |
148 | rpc_set_waitqueue_priority(queue, queue_priority); | 148 | rpc_set_waitqueue_priority(queue, queue_priority); |
149 | q = &queue->tasks[queue_priority]; | 149 | q = &queue->tasks[queue_priority]; |
150 | list_for_each_entry(t, q, u.tk_wait.list) { | 150 | list_for_each_entry(t, q, u.tk_wait.list) { |
151 | if (t->tk_owner == task->tk_owner) { | 151 | if (t->tk_owner == task->tk_owner) { |
152 | list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); | 152 | list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); |
153 | return; | 153 | return; |
154 | } | 154 | } |
155 | } | 155 | } |
156 | list_add_tail(&task->u.tk_wait.list, q); | 156 | list_add_tail(&task->u.tk_wait.list, q); |
157 | } | 157 | } |
158 | 158 | ||
159 | /* | 159 | /* |
160 | * Add new request to wait queue. | 160 | * Add new request to wait queue. |
161 | * | 161 | * |
162 | * Swapper tasks always get inserted at the head of the queue. | 162 | * Swapper tasks always get inserted at the head of the queue. |
163 | * This should avoid many nasty memory deadlocks and hopefully | 163 | * This should avoid many nasty memory deadlocks and hopefully |
164 | * improve overall performance. | 164 | * improve overall performance. |
165 | * Everyone else gets appended to the queue to ensure proper FIFO behavior. | 165 | * Everyone else gets appended to the queue to ensure proper FIFO behavior. |
166 | */ | 166 | */ |
167 | static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, | 167 | static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, |
168 | struct rpc_task *task, | 168 | struct rpc_task *task, |
169 | unsigned char queue_priority) | 169 | unsigned char queue_priority) |
170 | { | 170 | { |
171 | WARN_ON_ONCE(RPC_IS_QUEUED(task)); | 171 | WARN_ON_ONCE(RPC_IS_QUEUED(task)); |
172 | if (RPC_IS_QUEUED(task)) | 172 | if (RPC_IS_QUEUED(task)) |
173 | return; | 173 | return; |
174 | 174 | ||
175 | if (RPC_IS_PRIORITY(queue)) | 175 | if (RPC_IS_PRIORITY(queue)) |
176 | __rpc_add_wait_queue_priority(queue, task, queue_priority); | 176 | __rpc_add_wait_queue_priority(queue, task, queue_priority); |
177 | else if (RPC_IS_SWAPPER(task)) | 177 | else if (RPC_IS_SWAPPER(task)) |
178 | list_add(&task->u.tk_wait.list, &queue->tasks[0]); | 178 | list_add(&task->u.tk_wait.list, &queue->tasks[0]); |
179 | else | 179 | else |
180 | list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); | 180 | list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); |
181 | task->tk_waitqueue = queue; | 181 | task->tk_waitqueue = queue; |
182 | queue->qlen++; | 182 | queue->qlen++; |
183 | /* barrier matches the read in rpc_wake_up_task_queue_locked() */ | 183 | /* barrier matches the read in rpc_wake_up_task_queue_locked() */ |
184 | smp_wmb(); | 184 | smp_wmb(); |
185 | rpc_set_queued(task); | 185 | rpc_set_queued(task); |
186 | 186 | ||
187 | dprintk("RPC: %5u added to queue %p \"%s\"\n", | 187 | dprintk("RPC: %5u added to queue %p \"%s\"\n", |
188 | task->tk_pid, queue, rpc_qname(queue)); | 188 | task->tk_pid, queue, rpc_qname(queue)); |
189 | } | 189 | } |
190 | 190 | ||
191 | /* | 191 | /* |
192 | * Remove request from a priority queue. | 192 | * Remove request from a priority queue. |
193 | */ | 193 | */ |
194 | static void __rpc_remove_wait_queue_priority(struct rpc_task *task) | 194 | static void __rpc_remove_wait_queue_priority(struct rpc_task *task) |
195 | { | 195 | { |
196 | struct rpc_task *t; | 196 | struct rpc_task *t; |
197 | 197 | ||
198 | if (!list_empty(&task->u.tk_wait.links)) { | 198 | if (!list_empty(&task->u.tk_wait.links)) { |
199 | t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list); | 199 | t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list); |
200 | list_move(&t->u.tk_wait.list, &task->u.tk_wait.list); | 200 | list_move(&t->u.tk_wait.list, &task->u.tk_wait.list); |
201 | list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links); | 201 | list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links); |
202 | } | 202 | } |
203 | } | 203 | } |
204 | 204 | ||
205 | /* | 205 | /* |
206 | * Remove request from queue. | 206 | * Remove request from queue. |
207 | * Note: must be called with spin lock held. | 207 | * Note: must be called with spin lock held. |
208 | */ | 208 | */ |
209 | static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) | 209 | static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) |
210 | { | 210 | { |
211 | __rpc_disable_timer(queue, task); | 211 | __rpc_disable_timer(queue, task); |
212 | if (RPC_IS_PRIORITY(queue)) | 212 | if (RPC_IS_PRIORITY(queue)) |
213 | __rpc_remove_wait_queue_priority(task); | 213 | __rpc_remove_wait_queue_priority(task); |
214 | list_del(&task->u.tk_wait.list); | 214 | list_del(&task->u.tk_wait.list); |
215 | queue->qlen--; | 215 | queue->qlen--; |
216 | dprintk("RPC: %5u removed from queue %p \"%s\"\n", | 216 | dprintk("RPC: %5u removed from queue %p \"%s\"\n", |
217 | task->tk_pid, queue, rpc_qname(queue)); | 217 | task->tk_pid, queue, rpc_qname(queue)); |
218 | } | 218 | } |
219 | 219 | ||
220 | static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) | 220 | static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) |
221 | { | 221 | { |
222 | int i; | 222 | int i; |
223 | 223 | ||
224 | spin_lock_init(&queue->lock); | 224 | spin_lock_init(&queue->lock); |
225 | for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) | 225 | for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) |
226 | INIT_LIST_HEAD(&queue->tasks[i]); | 226 | INIT_LIST_HEAD(&queue->tasks[i]); |
227 | queue->maxpriority = nr_queues - 1; | 227 | queue->maxpriority = nr_queues - 1; |
228 | rpc_reset_waitqueue_priority(queue); | 228 | rpc_reset_waitqueue_priority(queue); |
229 | queue->qlen = 0; | 229 | queue->qlen = 0; |
230 | setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue); | 230 | setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue); |
231 | INIT_LIST_HEAD(&queue->timer_list.list); | 231 | INIT_LIST_HEAD(&queue->timer_list.list); |
232 | rpc_assign_waitqueue_name(queue, qname); | 232 | rpc_assign_waitqueue_name(queue, qname); |
233 | } | 233 | } |
234 | 234 | ||
235 | void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) | 235 | void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) |
236 | { | 236 | { |
237 | __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); | 237 | __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); |
238 | } | 238 | } |
239 | EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue); | 239 | EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue); |
240 | 240 | ||
241 | void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) | 241 | void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) |
242 | { | 242 | { |
243 | __rpc_init_priority_wait_queue(queue, qname, 1); | 243 | __rpc_init_priority_wait_queue(queue, qname, 1); |
244 | } | 244 | } |
245 | EXPORT_SYMBOL_GPL(rpc_init_wait_queue); | 245 | EXPORT_SYMBOL_GPL(rpc_init_wait_queue); |
246 | 246 | ||
247 | void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) | 247 | void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) |
248 | { | 248 | { |
249 | del_timer_sync(&queue->timer_list.timer); | 249 | del_timer_sync(&queue->timer_list.timer); |
250 | } | 250 | } |
251 | EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); | 251 | EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); |
252 | 252 | ||
253 | static int rpc_wait_bit_killable(void *word) | 253 | static int rpc_wait_bit_killable(void *word) |
254 | { | 254 | { |
255 | if (fatal_signal_pending(current)) | 255 | if (fatal_signal_pending(current)) |
256 | return -ERESTARTSYS; | 256 | return -ERESTARTSYS; |
257 | freezable_schedule(); | 257 | freezable_schedule(); |
258 | return 0; | 258 | return 0; |
259 | } | 259 | } |
260 | 260 | ||
261 | #ifdef RPC_DEBUG | 261 | #ifdef RPC_DEBUG |
262 | static void rpc_task_set_debuginfo(struct rpc_task *task) | 262 | static void rpc_task_set_debuginfo(struct rpc_task *task) |
263 | { | 263 | { |
264 | static atomic_t rpc_pid; | 264 | static atomic_t rpc_pid; |
265 | 265 | ||
266 | task->tk_pid = atomic_inc_return(&rpc_pid); | 266 | task->tk_pid = atomic_inc_return(&rpc_pid); |
267 | } | 267 | } |
268 | #else | 268 | #else |
269 | static inline void rpc_task_set_debuginfo(struct rpc_task *task) | 269 | static inline void rpc_task_set_debuginfo(struct rpc_task *task) |
270 | { | 270 | { |
271 | } | 271 | } |
272 | #endif | 272 | #endif |
273 | 273 | ||
274 | static void rpc_set_active(struct rpc_task *task) | 274 | static void rpc_set_active(struct rpc_task *task) |
275 | { | 275 | { |
276 | trace_rpc_task_begin(task->tk_client, task, NULL); | 276 | trace_rpc_task_begin(task->tk_client, task, NULL); |
277 | 277 | ||
278 | rpc_task_set_debuginfo(task); | 278 | rpc_task_set_debuginfo(task); |
279 | set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); | 279 | set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); |
280 | } | 280 | } |
281 | 281 | ||
282 | /* | 282 | /* |
283 | * Mark an RPC call as having completed by clearing the 'active' bit | 283 | * Mark an RPC call as having completed by clearing the 'active' bit |
284 | * and then waking up all tasks that were sleeping. | 284 | * and then waking up all tasks that were sleeping. |
285 | */ | 285 | */ |
286 | static int rpc_complete_task(struct rpc_task *task) | 286 | static int rpc_complete_task(struct rpc_task *task) |
287 | { | 287 | { |
288 | void *m = &task->tk_runstate; | 288 | void *m = &task->tk_runstate; |
289 | wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); | 289 | wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); |
290 | struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE); | 290 | struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE); |
291 | unsigned long flags; | 291 | unsigned long flags; |
292 | int ret; | 292 | int ret; |
293 | 293 | ||
294 | trace_rpc_task_complete(task->tk_client, task, NULL); | 294 | trace_rpc_task_complete(task->tk_client, task, NULL); |
295 | 295 | ||
296 | spin_lock_irqsave(&wq->lock, flags); | 296 | spin_lock_irqsave(&wq->lock, flags); |
297 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); | 297 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); |
298 | ret = atomic_dec_and_test(&task->tk_count); | 298 | ret = atomic_dec_and_test(&task->tk_count); |
299 | if (waitqueue_active(wq)) | 299 | if (waitqueue_active(wq)) |
300 | __wake_up_locked_key(wq, TASK_NORMAL, &k); | 300 | __wake_up_locked_key(wq, TASK_NORMAL, &k); |
301 | spin_unlock_irqrestore(&wq->lock, flags); | 301 | spin_unlock_irqrestore(&wq->lock, flags); |
302 | return ret; | 302 | return ret; |
303 | } | 303 | } |
304 | 304 | ||
305 | /* | 305 | /* |
306 | * Allow callers to wait for completion of an RPC call | 306 | * Allow callers to wait for completion of an RPC call |
307 | * | 307 | * |
308 | * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit() | 308 | * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit() |
309 | * to enforce taking of the wq->lock and hence avoid races with | 309 | * to enforce taking of the wq->lock and hence avoid races with |
310 | * rpc_complete_task(). | 310 | * rpc_complete_task(). |
311 | */ | 311 | */ |
312 | int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) | 312 | int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) |
313 | { | 313 | { |
314 | if (action == NULL) | 314 | if (action == NULL) |
315 | action = rpc_wait_bit_killable; | 315 | action = rpc_wait_bit_killable; |
316 | return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, | 316 | return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, |
317 | action, TASK_KILLABLE); | 317 | action, TASK_KILLABLE); |
318 | } | 318 | } |
319 | EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); | 319 | EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); |
320 | 320 | ||
321 | /* | 321 | /* |
322 | * Make an RPC task runnable. | 322 | * Make an RPC task runnable. |
323 | * | 323 | * |
324 | * Note: If the task is ASYNC, and is being made runnable after sitting on an | 324 | * Note: If the task is ASYNC, and is being made runnable after sitting on an |
325 | * rpc_wait_queue, this must be called with the queue spinlock held to protect | 325 | * rpc_wait_queue, this must be called with the queue spinlock held to protect |
326 | * the wait queue operation. | 326 | * the wait queue operation. |
327 | * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(), | ||
328 | * which is needed to ensure that __rpc_execute() doesn't loop (due to the | ||
329 | * lockless RPC_IS_QUEUED() test) before we've had a chance to test | ||
330 | * the RPC_TASK_RUNNING flag. | ||
327 | */ | 331 | */ |
328 | static void rpc_make_runnable(struct rpc_task *task) | 332 | static void rpc_make_runnable(struct rpc_task *task) |
329 | { | 333 | { |
334 | bool need_wakeup = !rpc_test_and_set_running(task); | ||
335 | |||
330 | rpc_clear_queued(task); | 336 | rpc_clear_queued(task); |
331 | if (rpc_test_and_set_running(task)) | 337 | if (!need_wakeup) |
332 | return; | 338 | return; |
333 | if (RPC_IS_ASYNC(task)) { | 339 | if (RPC_IS_ASYNC(task)) { |
334 | INIT_WORK(&task->u.tk_work, rpc_async_schedule); | 340 | INIT_WORK(&task->u.tk_work, rpc_async_schedule); |
335 | queue_work(rpciod_workqueue, &task->u.tk_work); | 341 | queue_work(rpciod_workqueue, &task->u.tk_work); |
336 | } else | 342 | } else |
337 | wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); | 343 | wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); |
338 | } | 344 | } |
339 | 345 | ||
340 | /* | 346 | /* |
341 | * Prepare for sleeping on a wait queue. | 347 | * Prepare for sleeping on a wait queue. |
342 | * By always appending tasks to the list we ensure FIFO behavior. | 348 | * By always appending tasks to the list we ensure FIFO behavior. |
343 | * NB: An RPC task will only receive interrupt-driven events as long | 349 | * NB: An RPC task will only receive interrupt-driven events as long |
344 | * as it's on a wait queue. | 350 | * as it's on a wait queue. |
345 | */ | 351 | */ |
346 | static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, | 352 | static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, |
347 | struct rpc_task *task, | 353 | struct rpc_task *task, |
348 | rpc_action action, | 354 | rpc_action action, |
349 | unsigned char queue_priority) | 355 | unsigned char queue_priority) |
350 | { | 356 | { |
351 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", | 357 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", |
352 | task->tk_pid, rpc_qname(q), jiffies); | 358 | task->tk_pid, rpc_qname(q), jiffies); |
353 | 359 | ||
354 | trace_rpc_task_sleep(task->tk_client, task, q); | 360 | trace_rpc_task_sleep(task->tk_client, task, q); |
355 | 361 | ||
356 | __rpc_add_wait_queue(q, task, queue_priority); | 362 | __rpc_add_wait_queue(q, task, queue_priority); |
357 | 363 | ||
358 | WARN_ON_ONCE(task->tk_callback != NULL); | 364 | WARN_ON_ONCE(task->tk_callback != NULL); |
359 | task->tk_callback = action; | 365 | task->tk_callback = action; |
360 | __rpc_add_timer(q, task); | 366 | __rpc_add_timer(q, task); |
361 | } | 367 | } |
362 | 368 | ||
363 | void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | 369 | void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, |
364 | rpc_action action) | 370 | rpc_action action) |
365 | { | 371 | { |
366 | /* We shouldn't ever put an inactive task to sleep */ | 372 | /* We shouldn't ever put an inactive task to sleep */ |
367 | WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); | 373 | WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); |
368 | if (!RPC_IS_ACTIVATED(task)) { | 374 | if (!RPC_IS_ACTIVATED(task)) { |
369 | task->tk_status = -EIO; | 375 | task->tk_status = -EIO; |
370 | rpc_put_task_async(task); | 376 | rpc_put_task_async(task); |
371 | return; | 377 | return; |
372 | } | 378 | } |
373 | 379 | ||
374 | /* | 380 | /* |
375 | * Protect the queue operations. | 381 | * Protect the queue operations. |
376 | */ | 382 | */ |
377 | spin_lock_bh(&q->lock); | 383 | spin_lock_bh(&q->lock); |
378 | __rpc_sleep_on_priority(q, task, action, task->tk_priority); | 384 | __rpc_sleep_on_priority(q, task, action, task->tk_priority); |
379 | spin_unlock_bh(&q->lock); | 385 | spin_unlock_bh(&q->lock); |
380 | } | 386 | } |
381 | EXPORT_SYMBOL_GPL(rpc_sleep_on); | 387 | EXPORT_SYMBOL_GPL(rpc_sleep_on); |
382 | 388 | ||
383 | void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, | 389 | void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, |
384 | rpc_action action, int priority) | 390 | rpc_action action, int priority) |
385 | { | 391 | { |
386 | /* We shouldn't ever put an inactive task to sleep */ | 392 | /* We shouldn't ever put an inactive task to sleep */ |
387 | WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); | 393 | WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); |
388 | if (!RPC_IS_ACTIVATED(task)) { | 394 | if (!RPC_IS_ACTIVATED(task)) { |
389 | task->tk_status = -EIO; | 395 | task->tk_status = -EIO; |
390 | rpc_put_task_async(task); | 396 | rpc_put_task_async(task); |
391 | return; | 397 | return; |
392 | } | 398 | } |
393 | 399 | ||
394 | /* | 400 | /* |
395 | * Protect the queue operations. | 401 | * Protect the queue operations. |
396 | */ | 402 | */ |
397 | spin_lock_bh(&q->lock); | 403 | spin_lock_bh(&q->lock); |
398 | __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW); | 404 | __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW); |
399 | spin_unlock_bh(&q->lock); | 405 | spin_unlock_bh(&q->lock); |
400 | } | 406 | } |
401 | EXPORT_SYMBOL_GPL(rpc_sleep_on_priority); | 407 | EXPORT_SYMBOL_GPL(rpc_sleep_on_priority); |
402 | 408 | ||
403 | /** | 409 | /** |
404 | * __rpc_do_wake_up_task - wake up a single rpc_task | 410 | * __rpc_do_wake_up_task - wake up a single rpc_task |
405 | * @queue: wait queue | 411 | * @queue: wait queue |
406 | * @task: task to be woken up | 412 | * @task: task to be woken up |
407 | * | 413 | * |
408 | * Caller must hold queue->lock, and have cleared the task queued flag. | 414 | * Caller must hold queue->lock, and have cleared the task queued flag. |
409 | */ | 415 | */ |
410 | static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task) | 416 | static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task) |
411 | { | 417 | { |
412 | dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", | 418 | dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", |
413 | task->tk_pid, jiffies); | 419 | task->tk_pid, jiffies); |
414 | 420 | ||
415 | /* Has the task been executed yet? If not, we cannot wake it up! */ | 421 | /* Has the task been executed yet? If not, we cannot wake it up! */ |
416 | if (!RPC_IS_ACTIVATED(task)) { | 422 | if (!RPC_IS_ACTIVATED(task)) { |
417 | printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); | 423 | printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); |
418 | return; | 424 | return; |
419 | } | 425 | } |
420 | 426 | ||
421 | trace_rpc_task_wakeup(task->tk_client, task, queue); | 427 | trace_rpc_task_wakeup(task->tk_client, task, queue); |
422 | 428 | ||
423 | __rpc_remove_wait_queue(queue, task); | 429 | __rpc_remove_wait_queue(queue, task); |
424 | 430 | ||
425 | rpc_make_runnable(task); | 431 | rpc_make_runnable(task); |
426 | 432 | ||
427 | dprintk("RPC: __rpc_wake_up_task done\n"); | 433 | dprintk("RPC: __rpc_wake_up_task done\n"); |
428 | } | 434 | } |
429 | 435 | ||
430 | /* | 436 | /* |
431 | * Wake up a queued task while the queue lock is being held | 437 | * Wake up a queued task while the queue lock is being held |
432 | */ | 438 | */ |
433 | static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) | 439 | static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) |
434 | { | 440 | { |
435 | if (RPC_IS_QUEUED(task)) { | 441 | if (RPC_IS_QUEUED(task)) { |
436 | smp_rmb(); | 442 | smp_rmb(); |
437 | if (task->tk_waitqueue == queue) | 443 | if (task->tk_waitqueue == queue) |
438 | __rpc_do_wake_up_task(queue, task); | 444 | __rpc_do_wake_up_task(queue, task); |
439 | } | 445 | } |
440 | } | 446 | } |
441 | 447 | ||
442 | /* | 448 | /* |
443 | * Tests whether rpc queue is empty | 449 | * Tests whether rpc queue is empty |
444 | */ | 450 | */ |
445 | int rpc_queue_empty(struct rpc_wait_queue *queue) | 451 | int rpc_queue_empty(struct rpc_wait_queue *queue) |
446 | { | 452 | { |
447 | int res; | 453 | int res; |
448 | 454 | ||
449 | spin_lock_bh(&queue->lock); | 455 | spin_lock_bh(&queue->lock); |
450 | res = queue->qlen; | 456 | res = queue->qlen; |
451 | spin_unlock_bh(&queue->lock); | 457 | spin_unlock_bh(&queue->lock); |
452 | return res == 0; | 458 | return res == 0; |
453 | } | 459 | } |
454 | EXPORT_SYMBOL_GPL(rpc_queue_empty); | 460 | EXPORT_SYMBOL_GPL(rpc_queue_empty); |
455 | 461 | ||
456 | /* | 462 | /* |
457 | * Wake up a task on a specific queue | 463 | * Wake up a task on a specific queue |
458 | */ | 464 | */ |
459 | void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) | 465 | void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) |
460 | { | 466 | { |
461 | spin_lock_bh(&queue->lock); | 467 | spin_lock_bh(&queue->lock); |
462 | rpc_wake_up_task_queue_locked(queue, task); | 468 | rpc_wake_up_task_queue_locked(queue, task); |
463 | spin_unlock_bh(&queue->lock); | 469 | spin_unlock_bh(&queue->lock); |
464 | } | 470 | } |
465 | EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); | 471 | EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); |
466 | 472 | ||
467 | /* | 473 | /* |
468 | * Wake up the next task on a priority queue. | 474 | * Wake up the next task on a priority queue. |
469 | */ | 475 | */ |
470 | static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue) | 476 | static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue) |
471 | { | 477 | { |
472 | struct list_head *q; | 478 | struct list_head *q; |
473 | struct rpc_task *task; | 479 | struct rpc_task *task; |
474 | 480 | ||
475 | /* | 481 | /* |
476 | * Service a batch of tasks from a single owner. | 482 | * Service a batch of tasks from a single owner. |
477 | */ | 483 | */ |
478 | q = &queue->tasks[queue->priority]; | 484 | q = &queue->tasks[queue->priority]; |
479 | if (!list_empty(q)) { | 485 | if (!list_empty(q)) { |
480 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); | 486 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); |
481 | if (queue->owner == task->tk_owner) { | 487 | if (queue->owner == task->tk_owner) { |
482 | if (--queue->nr) | 488 | if (--queue->nr) |
483 | goto out; | 489 | goto out; |
484 | list_move_tail(&task->u.tk_wait.list, q); | 490 | list_move_tail(&task->u.tk_wait.list, q); |
485 | } | 491 | } |
486 | /* | 492 | /* |
487 | * Check if we need to switch queues. | 493 | * Check if we need to switch queues. |
488 | */ | 494 | */ |
489 | goto new_owner; | 495 | goto new_owner; |
490 | } | 496 | } |
491 | 497 | ||
492 | /* | 498 | /* |
493 | * Service the next queue. | 499 | * Service the next queue. |
494 | */ | 500 | */ |
495 | do { | 501 | do { |
496 | if (q == &queue->tasks[0]) | 502 | if (q == &queue->tasks[0]) |
497 | q = &queue->tasks[queue->maxpriority]; | 503 | q = &queue->tasks[queue->maxpriority]; |
498 | else | 504 | else |
499 | q = q - 1; | 505 | q = q - 1; |
500 | if (!list_empty(q)) { | 506 | if (!list_empty(q)) { |
501 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); | 507 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); |
502 | goto new_queue; | 508 | goto new_queue; |
503 | } | 509 | } |
504 | } while (q != &queue->tasks[queue->priority]); | 510 | } while (q != &queue->tasks[queue->priority]); |
505 | 511 | ||
506 | rpc_reset_waitqueue_priority(queue); | 512 | rpc_reset_waitqueue_priority(queue); |
507 | return NULL; | 513 | return NULL; |
508 | 514 | ||
509 | new_queue: | 515 | new_queue: |
510 | rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); | 516 | rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); |
511 | new_owner: | 517 | new_owner: |
512 | rpc_set_waitqueue_owner(queue, task->tk_owner); | 518 | rpc_set_waitqueue_owner(queue, task->tk_owner); |
513 | out: | 519 | out: |
514 | return task; | 520 | return task; |
515 | } | 521 | } |
516 | 522 | ||
517 | static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue) | 523 | static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue) |
518 | { | 524 | { |
519 | if (RPC_IS_PRIORITY(queue)) | 525 | if (RPC_IS_PRIORITY(queue)) |
520 | return __rpc_find_next_queued_priority(queue); | 526 | return __rpc_find_next_queued_priority(queue); |
521 | if (!list_empty(&queue->tasks[0])) | 527 | if (!list_empty(&queue->tasks[0])) |
522 | return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list); | 528 | return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list); |
523 | return NULL; | 529 | return NULL; |
524 | } | 530 | } |
525 | 531 | ||
526 | /* | 532 | /* |
527 | * Wake up the first task on the wait queue. | 533 | * Wake up the first task on the wait queue. |
528 | */ | 534 | */ |
529 | struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue, | 535 | struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue, |
530 | bool (*func)(struct rpc_task *, void *), void *data) | 536 | bool (*func)(struct rpc_task *, void *), void *data) |
531 | { | 537 | { |
532 | struct rpc_task *task = NULL; | 538 | struct rpc_task *task = NULL; |
533 | 539 | ||
534 | dprintk("RPC: wake_up_first(%p \"%s\")\n", | 540 | dprintk("RPC: wake_up_first(%p \"%s\")\n", |
535 | queue, rpc_qname(queue)); | 541 | queue, rpc_qname(queue)); |
536 | spin_lock_bh(&queue->lock); | 542 | spin_lock_bh(&queue->lock); |
537 | task = __rpc_find_next_queued(queue); | 543 | task = __rpc_find_next_queued(queue); |
538 | if (task != NULL) { | 544 | if (task != NULL) { |
539 | if (func(task, data)) | 545 | if (func(task, data)) |
540 | rpc_wake_up_task_queue_locked(queue, task); | 546 | rpc_wake_up_task_queue_locked(queue, task); |
541 | else | 547 | else |
542 | task = NULL; | 548 | task = NULL; |
543 | } | 549 | } |
544 | spin_unlock_bh(&queue->lock); | 550 | spin_unlock_bh(&queue->lock); |
545 | 551 | ||
546 | return task; | 552 | return task; |
547 | } | 553 | } |
548 | EXPORT_SYMBOL_GPL(rpc_wake_up_first); | 554 | EXPORT_SYMBOL_GPL(rpc_wake_up_first); |
549 | 555 | ||
550 | static bool rpc_wake_up_next_func(struct rpc_task *task, void *data) | 556 | static bool rpc_wake_up_next_func(struct rpc_task *task, void *data) |
551 | { | 557 | { |
552 | return true; | 558 | return true; |
553 | } | 559 | } |
554 | 560 | ||
555 | /* | 561 | /* |
556 | * Wake up the next task on the wait queue. | 562 | * Wake up the next task on the wait queue. |
557 | */ | 563 | */ |
558 | struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue) | 564 | struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue) |
559 | { | 565 | { |
560 | return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL); | 566 | return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL); |
561 | } | 567 | } |
562 | EXPORT_SYMBOL_GPL(rpc_wake_up_next); | 568 | EXPORT_SYMBOL_GPL(rpc_wake_up_next); |
563 | 569 | ||
564 | /** | 570 | /** |
565 | * rpc_wake_up - wake up all rpc_tasks | 571 | * rpc_wake_up - wake up all rpc_tasks |
566 | * @queue: rpc_wait_queue on which the tasks are sleeping | 572 | * @queue: rpc_wait_queue on which the tasks are sleeping |
567 | * | 573 | * |
568 | * Grabs queue->lock | 574 | * Grabs queue->lock |
569 | */ | 575 | */ |
570 | void rpc_wake_up(struct rpc_wait_queue *queue) | 576 | void rpc_wake_up(struct rpc_wait_queue *queue) |
571 | { | 577 | { |
572 | struct list_head *head; | 578 | struct list_head *head; |
573 | 579 | ||
574 | spin_lock_bh(&queue->lock); | 580 | spin_lock_bh(&queue->lock); |
575 | head = &queue->tasks[queue->maxpriority]; | 581 | head = &queue->tasks[queue->maxpriority]; |
576 | for (;;) { | 582 | for (;;) { |
577 | while (!list_empty(head)) { | 583 | while (!list_empty(head)) { |
578 | struct rpc_task *task; | 584 | struct rpc_task *task; |
579 | task = list_first_entry(head, | 585 | task = list_first_entry(head, |
580 | struct rpc_task, | 586 | struct rpc_task, |
581 | u.tk_wait.list); | 587 | u.tk_wait.list); |
582 | rpc_wake_up_task_queue_locked(queue, task); | 588 | rpc_wake_up_task_queue_locked(queue, task); |
583 | } | 589 | } |
584 | if (head == &queue->tasks[0]) | 590 | if (head == &queue->tasks[0]) |
585 | break; | 591 | break; |
586 | head--; | 592 | head--; |
587 | } | 593 | } |
588 | spin_unlock_bh(&queue->lock); | 594 | spin_unlock_bh(&queue->lock); |
589 | } | 595 | } |
590 | EXPORT_SYMBOL_GPL(rpc_wake_up); | 596 | EXPORT_SYMBOL_GPL(rpc_wake_up); |
591 | 597 | ||
592 | /** | 598 | /** |
593 | * rpc_wake_up_status - wake up all rpc_tasks and set their status value. | 599 | * rpc_wake_up_status - wake up all rpc_tasks and set their status value. |
594 | * @queue: rpc_wait_queue on which the tasks are sleeping | 600 | * @queue: rpc_wait_queue on which the tasks are sleeping |
595 | * @status: status value to set | 601 | * @status: status value to set |
596 | * | 602 | * |
597 | * Grabs queue->lock | 603 | * Grabs queue->lock |
598 | */ | 604 | */ |
599 | void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | 605 | void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) |
600 | { | 606 | { |
601 | struct list_head *head; | 607 | struct list_head *head; |
602 | 608 | ||
603 | spin_lock_bh(&queue->lock); | 609 | spin_lock_bh(&queue->lock); |
604 | head = &queue->tasks[queue->maxpriority]; | 610 | head = &queue->tasks[queue->maxpriority]; |
605 | for (;;) { | 611 | for (;;) { |
606 | while (!list_empty(head)) { | 612 | while (!list_empty(head)) { |
607 | struct rpc_task *task; | 613 | struct rpc_task *task; |
608 | task = list_first_entry(head, | 614 | task = list_first_entry(head, |
609 | struct rpc_task, | 615 | struct rpc_task, |
610 | u.tk_wait.list); | 616 | u.tk_wait.list); |
611 | task->tk_status = status; | 617 | task->tk_status = status; |
612 | rpc_wake_up_task_queue_locked(queue, task); | 618 | rpc_wake_up_task_queue_locked(queue, task); |
613 | } | 619 | } |
614 | if (head == &queue->tasks[0]) | 620 | if (head == &queue->tasks[0]) |
615 | break; | 621 | break; |
616 | head--; | 622 | head--; |
617 | } | 623 | } |
618 | spin_unlock_bh(&queue->lock); | 624 | spin_unlock_bh(&queue->lock); |
619 | } | 625 | } |
620 | EXPORT_SYMBOL_GPL(rpc_wake_up_status); | 626 | EXPORT_SYMBOL_GPL(rpc_wake_up_status); |
621 | 627 | ||
622 | static void __rpc_queue_timer_fn(unsigned long ptr) | 628 | static void __rpc_queue_timer_fn(unsigned long ptr) |
623 | { | 629 | { |
624 | struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr; | 630 | struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr; |
625 | struct rpc_task *task, *n; | 631 | struct rpc_task *task, *n; |
626 | unsigned long expires, now, timeo; | 632 | unsigned long expires, now, timeo; |
627 | 633 | ||
628 | spin_lock(&queue->lock); | 634 | spin_lock(&queue->lock); |
629 | expires = now = jiffies; | 635 | expires = now = jiffies; |
630 | list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { | 636 | list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { |
631 | timeo = task->u.tk_wait.expires; | 637 | timeo = task->u.tk_wait.expires; |
632 | if (time_after_eq(now, timeo)) { | 638 | if (time_after_eq(now, timeo)) { |
633 | dprintk("RPC: %5u timeout\n", task->tk_pid); | 639 | dprintk("RPC: %5u timeout\n", task->tk_pid); |
634 | task->tk_status = -ETIMEDOUT; | 640 | task->tk_status = -ETIMEDOUT; |
635 | rpc_wake_up_task_queue_locked(queue, task); | 641 | rpc_wake_up_task_queue_locked(queue, task); |
636 | continue; | 642 | continue; |
637 | } | 643 | } |
638 | if (expires == now || time_after(expires, timeo)) | 644 | if (expires == now || time_after(expires, timeo)) |
639 | expires = timeo; | 645 | expires = timeo; |
640 | } | 646 | } |
641 | if (!list_empty(&queue->timer_list.list)) | 647 | if (!list_empty(&queue->timer_list.list)) |
642 | rpc_set_queue_timer(queue, expires); | 648 | rpc_set_queue_timer(queue, expires); |
643 | spin_unlock(&queue->lock); | 649 | spin_unlock(&queue->lock); |
644 | } | 650 | } |
645 | 651 | ||
646 | static void __rpc_atrun(struct rpc_task *task) | 652 | static void __rpc_atrun(struct rpc_task *task) |
647 | { | 653 | { |
648 | task->tk_status = 0; | 654 | task->tk_status = 0; |
649 | } | 655 | } |
650 | 656 | ||
651 | /* | 657 | /* |
652 | * Run a task at a later time | 658 | * Run a task at a later time |
653 | */ | 659 | */ |
654 | void rpc_delay(struct rpc_task *task, unsigned long delay) | 660 | void rpc_delay(struct rpc_task *task, unsigned long delay) |
655 | { | 661 | { |
656 | task->tk_timeout = delay; | 662 | task->tk_timeout = delay; |
657 | rpc_sleep_on(&delay_queue, task, __rpc_atrun); | 663 | rpc_sleep_on(&delay_queue, task, __rpc_atrun); |
658 | } | 664 | } |
659 | EXPORT_SYMBOL_GPL(rpc_delay); | 665 | EXPORT_SYMBOL_GPL(rpc_delay); |
660 | 666 | ||
661 | /* | 667 | /* |
662 | * Helper to call task->tk_ops->rpc_call_prepare | 668 | * Helper to call task->tk_ops->rpc_call_prepare |
663 | */ | 669 | */ |
664 | void rpc_prepare_task(struct rpc_task *task) | 670 | void rpc_prepare_task(struct rpc_task *task) |
665 | { | 671 | { |
666 | task->tk_ops->rpc_call_prepare(task, task->tk_calldata); | 672 | task->tk_ops->rpc_call_prepare(task, task->tk_calldata); |
667 | } | 673 | } |
668 | 674 | ||
669 | static void | 675 | static void |
670 | rpc_init_task_statistics(struct rpc_task *task) | 676 | rpc_init_task_statistics(struct rpc_task *task) |
671 | { | 677 | { |
672 | /* Initialize retry counters */ | 678 | /* Initialize retry counters */ |
673 | task->tk_garb_retry = 2; | 679 | task->tk_garb_retry = 2; |
674 | task->tk_cred_retry = 2; | 680 | task->tk_cred_retry = 2; |
675 | task->tk_rebind_retry = 2; | 681 | task->tk_rebind_retry = 2; |
676 | 682 | ||
677 | /* starting timestamp */ | 683 | /* starting timestamp */ |
678 | task->tk_start = ktime_get(); | 684 | task->tk_start = ktime_get(); |
679 | } | 685 | } |
680 | 686 | ||
681 | static void | 687 | static void |
682 | rpc_reset_task_statistics(struct rpc_task *task) | 688 | rpc_reset_task_statistics(struct rpc_task *task) |
683 | { | 689 | { |
684 | task->tk_timeouts = 0; | 690 | task->tk_timeouts = 0; |
685 | task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT); | 691 | task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT); |
686 | 692 | ||
687 | rpc_init_task_statistics(task); | 693 | rpc_init_task_statistics(task); |
688 | } | 694 | } |
689 | 695 | ||
690 | /* | 696 | /* |
691 | * Helper that calls task->tk_ops->rpc_call_done if it exists | 697 | * Helper that calls task->tk_ops->rpc_call_done if it exists |
692 | */ | 698 | */ |
693 | void rpc_exit_task(struct rpc_task *task) | 699 | void rpc_exit_task(struct rpc_task *task) |
694 | { | 700 | { |
695 | task->tk_action = NULL; | 701 | task->tk_action = NULL; |
696 | if (task->tk_ops->rpc_call_done != NULL) { | 702 | if (task->tk_ops->rpc_call_done != NULL) { |
697 | task->tk_ops->rpc_call_done(task, task->tk_calldata); | 703 | task->tk_ops->rpc_call_done(task, task->tk_calldata); |
698 | if (task->tk_action != NULL) { | 704 | if (task->tk_action != NULL) { |
699 | WARN_ON(RPC_ASSASSINATED(task)); | 705 | WARN_ON(RPC_ASSASSINATED(task)); |
700 | /* Always release the RPC slot and buffer memory */ | 706 | /* Always release the RPC slot and buffer memory */ |
701 | xprt_release(task); | 707 | xprt_release(task); |
702 | rpc_reset_task_statistics(task); | 708 | rpc_reset_task_statistics(task); |
703 | } | 709 | } |
704 | } | 710 | } |
705 | } | 711 | } |
706 | 712 | ||
707 | void rpc_exit(struct rpc_task *task, int status) | 713 | void rpc_exit(struct rpc_task *task, int status) |
708 | { | 714 | { |
709 | task->tk_status = status; | 715 | task->tk_status = status; |
710 | task->tk_action = rpc_exit_task; | 716 | task->tk_action = rpc_exit_task; |
711 | if (RPC_IS_QUEUED(task)) | 717 | if (RPC_IS_QUEUED(task)) |
712 | rpc_wake_up_queued_task(task->tk_waitqueue, task); | 718 | rpc_wake_up_queued_task(task->tk_waitqueue, task); |
713 | } | 719 | } |
714 | EXPORT_SYMBOL_GPL(rpc_exit); | 720 | EXPORT_SYMBOL_GPL(rpc_exit); |
715 | 721 | ||
716 | void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) | 722 | void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) |
717 | { | 723 | { |
718 | if (ops->rpc_release != NULL) | 724 | if (ops->rpc_release != NULL) |
719 | ops->rpc_release(calldata); | 725 | ops->rpc_release(calldata); |
720 | } | 726 | } |
721 | 727 | ||
722 | /* | 728 | /* |
723 | * This is the RPC `scheduler' (or rather, the finite state machine). | 729 | * This is the RPC `scheduler' (or rather, the finite state machine). |
724 | */ | 730 | */ |
725 | static void __rpc_execute(struct rpc_task *task) | 731 | static void __rpc_execute(struct rpc_task *task) |
726 | { | 732 | { |
727 | struct rpc_wait_queue *queue; | 733 | struct rpc_wait_queue *queue; |
728 | int task_is_async = RPC_IS_ASYNC(task); | 734 | int task_is_async = RPC_IS_ASYNC(task); |
729 | int status = 0; | 735 | int status = 0; |
730 | 736 | ||
731 | dprintk("RPC: %5u __rpc_execute flags=0x%x\n", | 737 | dprintk("RPC: %5u __rpc_execute flags=0x%x\n", |
732 | task->tk_pid, task->tk_flags); | 738 | task->tk_pid, task->tk_flags); |
733 | 739 | ||
734 | WARN_ON_ONCE(RPC_IS_QUEUED(task)); | 740 | WARN_ON_ONCE(RPC_IS_QUEUED(task)); |
735 | if (RPC_IS_QUEUED(task)) | 741 | if (RPC_IS_QUEUED(task)) |
736 | return; | 742 | return; |
737 | 743 | ||
738 | for (;;) { | 744 | for (;;) { |
739 | void (*do_action)(struct rpc_task *); | 745 | void (*do_action)(struct rpc_task *); |
740 | 746 | ||
741 | /* | 747 | /* |
742 | * Execute any pending callback first. | 748 | * Execute any pending callback first. |
743 | */ | 749 | */ |
744 | do_action = task->tk_callback; | 750 | do_action = task->tk_callback; |
745 | task->tk_callback = NULL; | 751 | task->tk_callback = NULL; |
746 | if (do_action == NULL) { | 752 | if (do_action == NULL) { |
747 | /* | 753 | /* |
748 | * Perform the next FSM step. | 754 | * Perform the next FSM step. |
749 | * tk_action may be NULL if the task has been killed. | 755 | * tk_action may be NULL if the task has been killed. |
750 | * In particular, note that rpc_killall_tasks may | 756 | * In particular, note that rpc_killall_tasks may |
751 | * do this at any time, so beware when dereferencing. | 757 | * do this at any time, so beware when dereferencing. |
752 | */ | 758 | */ |
753 | do_action = task->tk_action; | 759 | do_action = task->tk_action; |
754 | if (do_action == NULL) | 760 | if (do_action == NULL) |
755 | break; | 761 | break; |
756 | } | 762 | } |
757 | trace_rpc_task_run_action(task->tk_client, task, task->tk_action); | 763 | trace_rpc_task_run_action(task->tk_client, task, task->tk_action); |
758 | do_action(task); | 764 | do_action(task); |
759 | 765 | ||
760 | /* | 766 | /* |
761 | * Lockless check for whether task is sleeping or not. | 767 | * Lockless check for whether task is sleeping or not. |
762 | */ | 768 | */ |
763 | if (!RPC_IS_QUEUED(task)) | 769 | if (!RPC_IS_QUEUED(task)) |
764 | continue; | 770 | continue; |
765 | /* | 771 | /* |
766 | * The queue->lock protects against races with | 772 | * The queue->lock protects against races with |
767 | * rpc_make_runnable(). | 773 | * rpc_make_runnable(). |
768 | * | 774 | * |
769 | * Note that once we clear RPC_TASK_RUNNING on an asynchronous | 775 | * Note that once we clear RPC_TASK_RUNNING on an asynchronous |
770 | * rpc_task, rpc_make_runnable() can assign it to a | 776 | * rpc_task, rpc_make_runnable() can assign it to a |
771 | * different workqueue. We therefore cannot assume that the | 777 | * different workqueue. We therefore cannot assume that the |
772 | * rpc_task pointer may still be dereferenced. | 778 | * rpc_task pointer may still be dereferenced. |
773 | */ | 779 | */ |
774 | queue = task->tk_waitqueue; | 780 | queue = task->tk_waitqueue; |
775 | spin_lock_bh(&queue->lock); | 781 | spin_lock_bh(&queue->lock); |
776 | if (!RPC_IS_QUEUED(task)) { | 782 | if (!RPC_IS_QUEUED(task)) { |
777 | spin_unlock_bh(&queue->lock); | 783 | spin_unlock_bh(&queue->lock); |
778 | continue; | 784 | continue; |
779 | } | 785 | } |
780 | rpc_clear_running(task); | 786 | rpc_clear_running(task); |
781 | spin_unlock_bh(&queue->lock); | 787 | spin_unlock_bh(&queue->lock); |
782 | if (task_is_async) | 788 | if (task_is_async) |
783 | return; | 789 | return; |
784 | 790 | ||
785 | /* sync task: sleep here */ | 791 | /* sync task: sleep here */ |
786 | dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); | 792 | dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); |
787 | status = out_of_line_wait_on_bit(&task->tk_runstate, | 793 | status = out_of_line_wait_on_bit(&task->tk_runstate, |
788 | RPC_TASK_QUEUED, rpc_wait_bit_killable, | 794 | RPC_TASK_QUEUED, rpc_wait_bit_killable, |
789 | TASK_KILLABLE); | 795 | TASK_KILLABLE); |
790 | if (status == -ERESTARTSYS) { | 796 | if (status == -ERESTARTSYS) { |
791 | /* | 797 | /* |
792 | * When a sync task receives a signal, it exits with | 798 | * When a sync task receives a signal, it exits with |
793 | * -ERESTARTSYS. In order to catch any callbacks that | 799 | * -ERESTARTSYS. In order to catch any callbacks that |
794 | * clean up after sleeping on some queue, we don't | 800 | * clean up after sleeping on some queue, we don't |
795 | * break the loop here, but go around once more. | 801 | * break the loop here, but go around once more. |
796 | */ | 802 | */ |
797 | dprintk("RPC: %5u got signal\n", task->tk_pid); | 803 | dprintk("RPC: %5u got signal\n", task->tk_pid); |
798 | task->tk_flags |= RPC_TASK_KILLED; | 804 | task->tk_flags |= RPC_TASK_KILLED; |
799 | rpc_exit(task, -ERESTARTSYS); | 805 | rpc_exit(task, -ERESTARTSYS); |
800 | } | 806 | } |
801 | rpc_set_running(task); | 807 | rpc_set_running(task); |
802 | dprintk("RPC: %5u sync task resuming\n", task->tk_pid); | 808 | dprintk("RPC: %5u sync task resuming\n", task->tk_pid); |
803 | } | 809 | } |
804 | 810 | ||
805 | dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, | 811 | dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, |
806 | task->tk_status); | 812 | task->tk_status); |
807 | /* Release all resources associated with the task */ | 813 | /* Release all resources associated with the task */ |
808 | rpc_release_task(task); | 814 | rpc_release_task(task); |
809 | } | 815 | } |
810 | 816 | ||
811 | /* | 817 | /* |
812 | * User-visible entry point to the scheduler. | 818 | * User-visible entry point to the scheduler. |
813 | * | 819 | * |
814 | * This may be called recursively if e.g. an async NFS task updates | 820 | * This may be called recursively if e.g. an async NFS task updates |
815 | * the attributes and finds that dirty pages must be flushed. | 821 | * the attributes and finds that dirty pages must be flushed. |
816 | * NOTE: Upon exit of this function the task is guaranteed to be | 822 | * NOTE: Upon exit of this function the task is guaranteed to be |
817 | * released. In particular note that tk_release() will have | 823 | * released. In particular note that tk_release() will have |
818 | * been called, so your task memory may have been freed. | 824 | * been called, so your task memory may have been freed. |
819 | */ | 825 | */ |
820 | void rpc_execute(struct rpc_task *task) | 826 | void rpc_execute(struct rpc_task *task) |
821 | { | 827 | { |
822 | rpc_set_active(task); | 828 | rpc_set_active(task); |
823 | rpc_make_runnable(task); | 829 | rpc_make_runnable(task); |
824 | if (!RPC_IS_ASYNC(task)) | 830 | if (!RPC_IS_ASYNC(task)) |
825 | __rpc_execute(task); | 831 | __rpc_execute(task); |
826 | } | 832 | } |
827 | 833 | ||
828 | static void rpc_async_schedule(struct work_struct *work) | 834 | static void rpc_async_schedule(struct work_struct *work) |
829 | { | 835 | { |
830 | current->flags |= PF_FSTRANS; | 836 | current->flags |= PF_FSTRANS; |
831 | __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); | 837 | __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); |
832 | current->flags &= ~PF_FSTRANS; | 838 | current->flags &= ~PF_FSTRANS; |
833 | } | 839 | } |
834 | 840 | ||
835 | /** | 841 | /** |
836 | * rpc_malloc - allocate an RPC buffer | 842 | * rpc_malloc - allocate an RPC buffer |
837 | * @task: RPC task that will use this buffer | 843 | * @task: RPC task that will use this buffer |
838 | * @size: requested byte size | 844 | * @size: requested byte size |
839 | * | 845 | * |
840 | * To prevent rpciod from hanging, this allocator never sleeps, | 846 | * To prevent rpciod from hanging, this allocator never sleeps, |
841 | * returning NULL if the request cannot be serviced immediately. | 847 | * returning NULL if the request cannot be serviced immediately. |
842 | * The caller can arrange to sleep in a way that is safe for rpciod. | 848 | * The caller can arrange to sleep in a way that is safe for rpciod. |
843 | * | 849 | * |
844 | * Most requests are 'small' (under 2KiB) and can be serviced from a | 850 | * Most requests are 'small' (under 2KiB) and can be serviced from a |
845 | * mempool, ensuring that NFS reads and writes can always proceed, | 851 | * mempool, ensuring that NFS reads and writes can always proceed, |
846 | * and that there is good locality of reference for these buffers. | 852 | * and that there is good locality of reference for these buffers. |
847 | * | 853 | * |
848 | * In order to avoid memory starvation triggering more writebacks of | 854 | * In order to avoid memory starvation triggering more writebacks of |
849 | * NFS requests, we avoid using GFP_KERNEL. | 855 | * NFS requests, we avoid using GFP_KERNEL. |
850 | */ | 856 | */ |
851 | void *rpc_malloc(struct rpc_task *task, size_t size) | 857 | void *rpc_malloc(struct rpc_task *task, size_t size) |
852 | { | 858 | { |
853 | struct rpc_buffer *buf; | 859 | struct rpc_buffer *buf; |
854 | gfp_t gfp = GFP_NOWAIT; | 860 | gfp_t gfp = GFP_NOWAIT; |
855 | 861 | ||
856 | if (RPC_IS_SWAPPER(task)) | 862 | if (RPC_IS_SWAPPER(task)) |
857 | gfp |= __GFP_MEMALLOC; | 863 | gfp |= __GFP_MEMALLOC; |
858 | 864 | ||
859 | size += sizeof(struct rpc_buffer); | 865 | size += sizeof(struct rpc_buffer); |
860 | if (size <= RPC_BUFFER_MAXSIZE) | 866 | if (size <= RPC_BUFFER_MAXSIZE) |
861 | buf = mempool_alloc(rpc_buffer_mempool, gfp); | 867 | buf = mempool_alloc(rpc_buffer_mempool, gfp); |
862 | else | 868 | else |
863 | buf = kmalloc(size, gfp); | 869 | buf = kmalloc(size, gfp); |
864 | 870 | ||
865 | if (!buf) | 871 | if (!buf) |
866 | return NULL; | 872 | return NULL; |
867 | 873 | ||
868 | buf->len = size; | 874 | buf->len = size; |
869 | dprintk("RPC: %5u allocated buffer of size %zu at %p\n", | 875 | dprintk("RPC: %5u allocated buffer of size %zu at %p\n", |
870 | task->tk_pid, size, buf); | 876 | task->tk_pid, size, buf); |
871 | return &buf->data; | 877 | return &buf->data; |
872 | } | 878 | } |
873 | EXPORT_SYMBOL_GPL(rpc_malloc); | 879 | EXPORT_SYMBOL_GPL(rpc_malloc); |
874 | 880 | ||
875 | /** | 881 | /** |
876 | * rpc_free - free buffer allocated via rpc_malloc | 882 | * rpc_free - free buffer allocated via rpc_malloc |
877 | * @buffer: buffer to free | 883 | * @buffer: buffer to free |
878 | * | 884 | * |
879 | */ | 885 | */ |
880 | void rpc_free(void *buffer) | 886 | void rpc_free(void *buffer) |
881 | { | 887 | { |
882 | size_t size; | 888 | size_t size; |
883 | struct rpc_buffer *buf; | 889 | struct rpc_buffer *buf; |
884 | 890 | ||
885 | if (!buffer) | 891 | if (!buffer) |
886 | return; | 892 | return; |
887 | 893 | ||
888 | buf = container_of(buffer, struct rpc_buffer, data); | 894 | buf = container_of(buffer, struct rpc_buffer, data); |
889 | size = buf->len; | 895 | size = buf->len; |
890 | 896 | ||
891 | dprintk("RPC: freeing buffer of size %zu at %p\n", | 897 | dprintk("RPC: freeing buffer of size %zu at %p\n", |
892 | size, buf); | 898 | size, buf); |
893 | 899 | ||
894 | if (size <= RPC_BUFFER_MAXSIZE) | 900 | if (size <= RPC_BUFFER_MAXSIZE) |
895 | mempool_free(buf, rpc_buffer_mempool); | 901 | mempool_free(buf, rpc_buffer_mempool); |
896 | else | 902 | else |
897 | kfree(buf); | 903 | kfree(buf); |
898 | } | 904 | } |
899 | EXPORT_SYMBOL_GPL(rpc_free); | 905 | EXPORT_SYMBOL_GPL(rpc_free); |
900 | 906 | ||
901 | /* | 907 | /* |
902 | * Creation and deletion of RPC task structures | 908 | * Creation and deletion of RPC task structures |
903 | */ | 909 | */ |
904 | static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) | 910 | static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) |
905 | { | 911 | { |
906 | memset(task, 0, sizeof(*task)); | 912 | memset(task, 0, sizeof(*task)); |
907 | atomic_set(&task->tk_count, 1); | 913 | atomic_set(&task->tk_count, 1); |
908 | task->tk_flags = task_setup_data->flags; | 914 | task->tk_flags = task_setup_data->flags; |
909 | task->tk_ops = task_setup_data->callback_ops; | 915 | task->tk_ops = task_setup_data->callback_ops; |
910 | task->tk_calldata = task_setup_data->callback_data; | 916 | task->tk_calldata = task_setup_data->callback_data; |
911 | INIT_LIST_HEAD(&task->tk_task); | 917 | INIT_LIST_HEAD(&task->tk_task); |
912 | 918 | ||
913 | task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; | 919 | task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; |
914 | task->tk_owner = current->tgid; | 920 | task->tk_owner = current->tgid; |
915 | 921 | ||
916 | /* Initialize workqueue for async tasks */ | 922 | /* Initialize workqueue for async tasks */ |
917 | task->tk_workqueue = task_setup_data->workqueue; | 923 | task->tk_workqueue = task_setup_data->workqueue; |
918 | 924 | ||
919 | if (task->tk_ops->rpc_call_prepare != NULL) | 925 | if (task->tk_ops->rpc_call_prepare != NULL) |
920 | task->tk_action = rpc_prepare_task; | 926 | task->tk_action = rpc_prepare_task; |
921 | 927 | ||
922 | rpc_init_task_statistics(task); | 928 | rpc_init_task_statistics(task); |
923 | 929 | ||
924 | dprintk("RPC: new task initialized, procpid %u\n", | 930 | dprintk("RPC: new task initialized, procpid %u\n", |
925 | task_pid_nr(current)); | 931 | task_pid_nr(current)); |
926 | } | 932 | } |
927 | 933 | ||
928 | static struct rpc_task * | 934 | static struct rpc_task * |
929 | rpc_alloc_task(void) | 935 | rpc_alloc_task(void) |
930 | { | 936 | { |
931 | return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO); | 937 | return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO); |
932 | } | 938 | } |
933 | 939 | ||
934 | /* | 940 | /* |
935 | * Create a new task for the specified client. | 941 | * Create a new task for the specified client. |
936 | */ | 942 | */ |
937 | struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) | 943 | struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) |
938 | { | 944 | { |
939 | struct rpc_task *task = setup_data->task; | 945 | struct rpc_task *task = setup_data->task; |
940 | unsigned short flags = 0; | 946 | unsigned short flags = 0; |
941 | 947 | ||
942 | if (task == NULL) { | 948 | if (task == NULL) { |
943 | task = rpc_alloc_task(); | 949 | task = rpc_alloc_task(); |
944 | if (task == NULL) { | 950 | if (task == NULL) { |
945 | rpc_release_calldata(setup_data->callback_ops, | 951 | rpc_release_calldata(setup_data->callback_ops, |
946 | setup_data->callback_data); | 952 | setup_data->callback_data); |
947 | return ERR_PTR(-ENOMEM); | 953 | return ERR_PTR(-ENOMEM); |
948 | } | 954 | } |
949 | flags = RPC_TASK_DYNAMIC; | 955 | flags = RPC_TASK_DYNAMIC; |
950 | } | 956 | } |
951 | 957 | ||
952 | rpc_init_task(task, setup_data); | 958 | rpc_init_task(task, setup_data); |
953 | task->tk_flags |= flags; | 959 | task->tk_flags |= flags; |
954 | dprintk("RPC: allocated task %p\n", task); | 960 | dprintk("RPC: allocated task %p\n", task); |
955 | return task; | 961 | return task; |
956 | } | 962 | } |
957 | 963 | ||
958 | /* | 964 | /* |
959 | * rpc_free_task - release rpc task and perform cleanups | 965 | * rpc_free_task - release rpc task and perform cleanups |
960 | * | 966 | * |
961 | * Note that we free up the rpc_task _after_ rpc_release_calldata() | 967 | * Note that we free up the rpc_task _after_ rpc_release_calldata() |
962 | * in order to work around a workqueue dependency issue. | 968 | * in order to work around a workqueue dependency issue. |
963 | * | 969 | * |
964 | * Tejun Heo states: | 970 | * Tejun Heo states: |
965 | * "Workqueue currently considers two work items to be the same if they're | 971 | * "Workqueue currently considers two work items to be the same if they're |
966 | * on the same address and won't execute them concurrently - ie. it | 972 | * on the same address and won't execute them concurrently - ie. it |
967 | * makes a work item which is queued again while being executed wait | 973 | * makes a work item which is queued again while being executed wait |
968 | * for the previous execution to complete. | 974 | * for the previous execution to complete. |
969 | * | 975 | * |
970 | * If a work function frees the work item, and then waits for an event | 976 | * If a work function frees the work item, and then waits for an event |
971 | * which should be performed by another work item and *that* work item | 977 | * which should be performed by another work item and *that* work item |
972 | * recycles the freed work item, it can create a false dependency loop. | 978 | * recycles the freed work item, it can create a false dependency loop. |
973 | * There really is no reliable way to detect this short of verifying | 979 | * There really is no reliable way to detect this short of verifying |
974 | * every memory free." | 980 | * every memory free." |
975 | * | 981 | * |
976 | */ | 982 | */ |
977 | static void rpc_free_task(struct rpc_task *task) | 983 | static void rpc_free_task(struct rpc_task *task) |
978 | { | 984 | { |
979 | unsigned short tk_flags = task->tk_flags; | 985 | unsigned short tk_flags = task->tk_flags; |
980 | 986 | ||
981 | rpc_release_calldata(task->tk_ops, task->tk_calldata); | 987 | rpc_release_calldata(task->tk_ops, task->tk_calldata); |
982 | 988 | ||
983 | if (tk_flags & RPC_TASK_DYNAMIC) { | 989 | if (tk_flags & RPC_TASK_DYNAMIC) { |
984 | dprintk("RPC: %5u freeing task\n", task->tk_pid); | 990 | dprintk("RPC: %5u freeing task\n", task->tk_pid); |
985 | mempool_free(task, rpc_task_mempool); | 991 | mempool_free(task, rpc_task_mempool); |
986 | } | 992 | } |
987 | } | 993 | } |
988 | 994 | ||
989 | static void rpc_async_release(struct work_struct *work) | 995 | static void rpc_async_release(struct work_struct *work) |
990 | { | 996 | { |
991 | rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); | 997 | rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); |
992 | } | 998 | } |
993 | 999 | ||
994 | static void rpc_release_resources_task(struct rpc_task *task) | 1000 | static void rpc_release_resources_task(struct rpc_task *task) |
995 | { | 1001 | { |
996 | xprt_release(task); | 1002 | xprt_release(task); |
997 | if (task->tk_msg.rpc_cred) { | 1003 | if (task->tk_msg.rpc_cred) { |
998 | put_rpccred(task->tk_msg.rpc_cred); | 1004 | put_rpccred(task->tk_msg.rpc_cred); |
999 | task->tk_msg.rpc_cred = NULL; | 1005 | task->tk_msg.rpc_cred = NULL; |
1000 | } | 1006 | } |
1001 | rpc_task_release_client(task); | 1007 | rpc_task_release_client(task); |
1002 | } | 1008 | } |
1003 | 1009 | ||
1004 | static void rpc_final_put_task(struct rpc_task *task, | 1010 | static void rpc_final_put_task(struct rpc_task *task, |
1005 | struct workqueue_struct *q) | 1011 | struct workqueue_struct *q) |
1006 | { | 1012 | { |
1007 | if (q != NULL) { | 1013 | if (q != NULL) { |
1008 | INIT_WORK(&task->u.tk_work, rpc_async_release); | 1014 | INIT_WORK(&task->u.tk_work, rpc_async_release); |
1009 | queue_work(q, &task->u.tk_work); | 1015 | queue_work(q, &task->u.tk_work); |
1010 | } else | 1016 | } else |
1011 | rpc_free_task(task); | 1017 | rpc_free_task(task); |
1012 | } | 1018 | } |
1013 | 1019 | ||
1014 | static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) | 1020 | static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) |
1015 | { | 1021 | { |
1016 | if (atomic_dec_and_test(&task->tk_count)) { | 1022 | if (atomic_dec_and_test(&task->tk_count)) { |
1017 | rpc_release_resources_task(task); | 1023 | rpc_release_resources_task(task); |
1018 | rpc_final_put_task(task, q); | 1024 | rpc_final_put_task(task, q); |
1019 | } | 1025 | } |
1020 | } | 1026 | } |
1021 | 1027 | ||
1022 | void rpc_put_task(struct rpc_task *task) | 1028 | void rpc_put_task(struct rpc_task *task) |
1023 | { | 1029 | { |
1024 | rpc_do_put_task(task, NULL); | 1030 | rpc_do_put_task(task, NULL); |
1025 | } | 1031 | } |
1026 | EXPORT_SYMBOL_GPL(rpc_put_task); | 1032 | EXPORT_SYMBOL_GPL(rpc_put_task); |
1027 | 1033 | ||
1028 | void rpc_put_task_async(struct rpc_task *task) | 1034 | void rpc_put_task_async(struct rpc_task *task) |
1029 | { | 1035 | { |
1030 | rpc_do_put_task(task, task->tk_workqueue); | 1036 | rpc_do_put_task(task, task->tk_workqueue); |
1031 | } | 1037 | } |
1032 | EXPORT_SYMBOL_GPL(rpc_put_task_async); | 1038 | EXPORT_SYMBOL_GPL(rpc_put_task_async); |
1033 | 1039 | ||
1034 | static void rpc_release_task(struct rpc_task *task) | 1040 | static void rpc_release_task(struct rpc_task *task) |
1035 | { | 1041 | { |
1036 | dprintk("RPC: %5u release task\n", task->tk_pid); | 1042 | dprintk("RPC: %5u release task\n", task->tk_pid); |
1037 | 1043 | ||
1038 | WARN_ON_ONCE(RPC_IS_QUEUED(task)); | 1044 | WARN_ON_ONCE(RPC_IS_QUEUED(task)); |
1039 | 1045 | ||
1040 | rpc_release_resources_task(task); | 1046 | rpc_release_resources_task(task); |
1041 | 1047 | ||
1042 | /* | 1048 | /* |
1043 | * Note: at this point we have been removed from rpc_clnt->cl_tasks, | 1049 | * Note: at this point we have been removed from rpc_clnt->cl_tasks, |
1044 | * so it should be safe to use task->tk_count as a test for whether | 1050 | * so it should be safe to use task->tk_count as a test for whether |
1045 | * or not any other processes still hold references to our rpc_task. | 1051 | * or not any other processes still hold references to our rpc_task. |
1046 | */ | 1052 | */ |
1047 | if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { | 1053 | if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { |
1048 | /* Wake up anyone who may be waiting for task completion */ | 1054 | /* Wake up anyone who may be waiting for task completion */ |
1049 | if (!rpc_complete_task(task)) | 1055 | if (!rpc_complete_task(task)) |
1050 | return; | 1056 | return; |
1051 | } else { | 1057 | } else { |
1052 | if (!atomic_dec_and_test(&task->tk_count)) | 1058 | if (!atomic_dec_and_test(&task->tk_count)) |
1053 | return; | 1059 | return; |
1054 | } | 1060 | } |
1055 | rpc_final_put_task(task, task->tk_workqueue); | 1061 | rpc_final_put_task(task, task->tk_workqueue); |
1056 | } | 1062 | } |
1057 | 1063 | ||
1058 | int rpciod_up(void) | 1064 | int rpciod_up(void) |
1059 | { | 1065 | { |
1060 | return try_module_get(THIS_MODULE) ? 0 : -EINVAL; | 1066 | return try_module_get(THIS_MODULE) ? 0 : -EINVAL; |
1061 | } | 1067 | } |
1062 | 1068 | ||
1063 | void rpciod_down(void) | 1069 | void rpciod_down(void) |
1064 | { | 1070 | { |
1065 | module_put(THIS_MODULE); | 1071 | module_put(THIS_MODULE); |
1066 | } | 1072 | } |
1067 | 1073 | ||
1068 | /* | 1074 | /* |
1069 | * Start up the rpciod workqueue. | 1075 | * Start up the rpciod workqueue. |
1070 | */ | 1076 | */ |
1071 | static int rpciod_start(void) | 1077 | static int rpciod_start(void) |
1072 | { | 1078 | { |
1073 | struct workqueue_struct *wq; | 1079 | struct workqueue_struct *wq; |
1074 | 1080 | ||
1075 | /* | 1081 | /* |
1076 | * Create the rpciod thread and wait for it to start. | 1082 | * Create the rpciod thread and wait for it to start. |
1077 | */ | 1083 | */ |
1078 | dprintk("RPC: creating workqueue rpciod\n"); | 1084 | dprintk("RPC: creating workqueue rpciod\n"); |
1079 | wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 1); | 1085 | wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 1); |
1080 | rpciod_workqueue = wq; | 1086 | rpciod_workqueue = wq; |
1081 | return rpciod_workqueue != NULL; | 1087 | return rpciod_workqueue != NULL; |
1082 | } | 1088 | } |
1083 | 1089 | ||
1084 | static void rpciod_stop(void) | 1090 | static void rpciod_stop(void) |
1085 | { | 1091 | { |
1086 | struct workqueue_struct *wq = NULL; | 1092 | struct workqueue_struct *wq = NULL; |
1087 | 1093 | ||
1088 | if (rpciod_workqueue == NULL) | 1094 | if (rpciod_workqueue == NULL) |
1089 | return; | 1095 | return; |
1090 | dprintk("RPC: destroying workqueue rpciod\n"); | 1096 | dprintk("RPC: destroying workqueue rpciod\n"); |
1091 | 1097 | ||
1092 | wq = rpciod_workqueue; | 1098 | wq = rpciod_workqueue; |
1093 | rpciod_workqueue = NULL; | 1099 | rpciod_workqueue = NULL; |
1094 | destroy_workqueue(wq); | 1100 | destroy_workqueue(wq); |
1095 | } | 1101 | } |
1096 | 1102 | ||
1097 | void | 1103 | void |
1098 | rpc_destroy_mempool(void) | 1104 | rpc_destroy_mempool(void) |
1099 | { | 1105 | { |
1100 | rpciod_stop(); | 1106 | rpciod_stop(); |
1101 | if (rpc_buffer_mempool) | 1107 | if (rpc_buffer_mempool) |
1102 | mempool_destroy(rpc_buffer_mempool); | 1108 | mempool_destroy(rpc_buffer_mempool); |
1103 | if (rpc_task_mempool) | 1109 | if (rpc_task_mempool) |
1104 | mempool_destroy(rpc_task_mempool); | 1110 | mempool_destroy(rpc_task_mempool); |
1105 | if (rpc_task_slabp) | 1111 | if (rpc_task_slabp) |
1106 | kmem_cache_destroy(rpc_task_slabp); | 1112 | kmem_cache_destroy(rpc_task_slabp); |
1107 | if (rpc_buffer_slabp) | 1113 | if (rpc_buffer_slabp) |
1108 | kmem_cache_destroy(rpc_buffer_slabp); | 1114 | kmem_cache_destroy(rpc_buffer_slabp); |
1109 | rpc_destroy_wait_queue(&delay_queue); | 1115 | rpc_destroy_wait_queue(&delay_queue); |
1110 | } | 1116 | } |
1111 | 1117 | ||
1112 | int | 1118 | int |
1113 | rpc_init_mempool(void) | 1119 | rpc_init_mempool(void) |
1114 | { | 1120 | { |
1115 | /* | 1121 | /* |
1116 | * The following is not strictly a mempool initialisation, | 1122 | * The following is not strictly a mempool initialisation, |
1117 | * but there is no harm in doing it here | 1123 | * but there is no harm in doing it here |
1118 | */ | 1124 | */ |
1119 | rpc_init_wait_queue(&delay_queue, "delayq"); | 1125 | rpc_init_wait_queue(&delay_queue, "delayq"); |
1120 | if (!rpciod_start()) | 1126 | if (!rpciod_start()) |
1121 | goto err_nomem; | 1127 | goto err_nomem; |
1122 | 1128 | ||
1123 | rpc_task_slabp = kmem_cache_create("rpc_tasks", | 1129 | rpc_task_slabp = kmem_cache_create("rpc_tasks", |
1124 | sizeof(struct rpc_task), | 1130 | sizeof(struct rpc_task), |
1125 | 0, SLAB_HWCACHE_ALIGN, | 1131 | 0, SLAB_HWCACHE_ALIGN, |
1126 | NULL); | 1132 | NULL); |
1127 | if (!rpc_task_slabp) | 1133 | if (!rpc_task_slabp) |
1128 | goto err_nomem; | 1134 | goto err_nomem; |
1129 | rpc_buffer_slabp = kmem_cache_create("rpc_buffers", | 1135 | rpc_buffer_slabp = kmem_cache_create("rpc_buffers", |
1130 | RPC_BUFFER_MAXSIZE, | 1136 | RPC_BUFFER_MAXSIZE, |
1131 | 0, SLAB_HWCACHE_ALIGN, | 1137 | 0, SLAB_HWCACHE_ALIGN, |
1132 | NULL); | 1138 | NULL); |
1133 | if (!rpc_buffer_slabp) | 1139 | if (!rpc_buffer_slabp) |
1134 | goto err_nomem; | 1140 | goto err_nomem; |
1135 | rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, | 1141 | rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, |
1136 | rpc_task_slabp); | 1142 | rpc_task_slabp); |
1137 | if (!rpc_task_mempool) | 1143 | if (!rpc_task_mempool) |
1138 | goto err_nomem; | 1144 | goto err_nomem; |
1139 | rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, | 1145 | rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, |
1140 | rpc_buffer_slabp); | 1146 | rpc_buffer_slabp); |
1141 | if (!rpc_buffer_mempool) | 1147 | if (!rpc_buffer_mempool) |
1142 | goto err_nomem; | 1148 | goto err_nomem; |
1143 | return 0; | 1149 | return 0; |
1144 | err_nomem: | 1150 | err_nomem: |
1145 | rpc_destroy_mempool(); | 1151 | rpc_destroy_mempool(); |
1146 | return -ENOMEM; | 1152 | return -ENOMEM; |
1147 | } | 1153 | } |
1148 | 1154 |