Commit 9c9cc93ad2a5d9972672e03685af20e8cea1e5a4
Committed by
Trond Myklebust
1 parent
8ae20abdd1
Exists in
master
and in
7 other branches
SUNRPC: remove dead variable 'rpciod_running'
rpciod_running is not used at all, but due to the way DECLARE_MUTEX_LOCKED works we don't get a warning for it. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Showing 1 changed file with 0 additions and 2 deletions Inline Diff
net/sunrpc/sched.c
1 | /* | 1 | /* |
2 | * linux/net/sunrpc/sched.c | 2 | * linux/net/sunrpc/sched.c |
3 | * | 3 | * |
4 | * Scheduling for synchronous and asynchronous RPC requests. | 4 | * Scheduling for synchronous and asynchronous RPC requests. |
5 | * | 5 | * |
6 | * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> | 6 | * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> |
7 | * | 7 | * |
8 | * TCP NFS related read + write fixes | 8 | * TCP NFS related read + write fixes |
9 | * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> | 9 | * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | 13 | ||
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/mempool.h> | 17 | #include <linux/mempool.h> |
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | #include <linux/smp_lock.h> | 19 | #include <linux/smp_lock.h> |
20 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
21 | #include <linux/mutex.h> | 21 | #include <linux/mutex.h> |
22 | 22 | ||
23 | #include <linux/sunrpc/clnt.h> | 23 | #include <linux/sunrpc/clnt.h> |
24 | 24 | ||
25 | #ifdef RPC_DEBUG | 25 | #ifdef RPC_DEBUG |
26 | #define RPCDBG_FACILITY RPCDBG_SCHED | 26 | #define RPCDBG_FACILITY RPCDBG_SCHED |
27 | #define RPC_TASK_MAGIC_ID 0xf00baa | 27 | #define RPC_TASK_MAGIC_ID 0xf00baa |
28 | static int rpc_task_id; | 28 | static int rpc_task_id; |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * RPC slabs and memory pools | 32 | * RPC slabs and memory pools |
33 | */ | 33 | */ |
34 | #define RPC_BUFFER_MAXSIZE (2048) | 34 | #define RPC_BUFFER_MAXSIZE (2048) |
35 | #define RPC_BUFFER_POOLSIZE (8) | 35 | #define RPC_BUFFER_POOLSIZE (8) |
36 | #define RPC_TASK_POOLSIZE (8) | 36 | #define RPC_TASK_POOLSIZE (8) |
37 | static struct kmem_cache *rpc_task_slabp __read_mostly; | 37 | static struct kmem_cache *rpc_task_slabp __read_mostly; |
38 | static struct kmem_cache *rpc_buffer_slabp __read_mostly; | 38 | static struct kmem_cache *rpc_buffer_slabp __read_mostly; |
39 | static mempool_t *rpc_task_mempool __read_mostly; | 39 | static mempool_t *rpc_task_mempool __read_mostly; |
40 | static mempool_t *rpc_buffer_mempool __read_mostly; | 40 | static mempool_t *rpc_buffer_mempool __read_mostly; |
41 | 41 | ||
42 | static void __rpc_default_timer(struct rpc_task *task); | 42 | static void __rpc_default_timer(struct rpc_task *task); |
43 | static void rpciod_killall(void); | 43 | static void rpciod_killall(void); |
44 | static void rpc_async_schedule(struct work_struct *); | 44 | static void rpc_async_schedule(struct work_struct *); |
45 | static void rpc_release_task(struct rpc_task *task); | 45 | static void rpc_release_task(struct rpc_task *task); |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * RPC tasks sit here while waiting for conditions to improve. | 48 | * RPC tasks sit here while waiting for conditions to improve. |
49 | */ | 49 | */ |
50 | static RPC_WAITQ(delay_queue, "delayq"); | 50 | static RPC_WAITQ(delay_queue, "delayq"); |
51 | 51 | ||
52 | /* | 52 | /* |
53 | * All RPC tasks are linked into this list | 53 | * All RPC tasks are linked into this list |
54 | */ | 54 | */ |
55 | static LIST_HEAD(all_tasks); | 55 | static LIST_HEAD(all_tasks); |
56 | 56 | ||
57 | /* | 57 | /* |
58 | * rpciod-related stuff | 58 | * rpciod-related stuff |
59 | */ | 59 | */ |
60 | static DEFINE_MUTEX(rpciod_mutex); | 60 | static DEFINE_MUTEX(rpciod_mutex); |
61 | static unsigned int rpciod_users; | 61 | static unsigned int rpciod_users; |
62 | struct workqueue_struct *rpciod_workqueue; | 62 | struct workqueue_struct *rpciod_workqueue; |
63 | 63 | ||
64 | /* | 64 | /* |
65 | * Spinlock for other critical sections of code. | 65 | * Spinlock for other critical sections of code. |
66 | */ | 66 | */ |
67 | static DEFINE_SPINLOCK(rpc_sched_lock); | 67 | static DEFINE_SPINLOCK(rpc_sched_lock); |
68 | 68 | ||
69 | /* | 69 | /* |
70 | * Disable the timer for a given RPC task. Should be called with | 70 | * Disable the timer for a given RPC task. Should be called with |
71 | * queue->lock and bh_disabled in order to avoid races within | 71 | * queue->lock and bh_disabled in order to avoid races within |
72 | * rpc_run_timer(). | 72 | * rpc_run_timer(). |
73 | */ | 73 | */ |
74 | static inline void | 74 | static inline void |
75 | __rpc_disable_timer(struct rpc_task *task) | 75 | __rpc_disable_timer(struct rpc_task *task) |
76 | { | 76 | { |
77 | dprintk("RPC: %5u disabling timer\n", task->tk_pid); | 77 | dprintk("RPC: %5u disabling timer\n", task->tk_pid); |
78 | task->tk_timeout_fn = NULL; | 78 | task->tk_timeout_fn = NULL; |
79 | task->tk_timeout = 0; | 79 | task->tk_timeout = 0; |
80 | } | 80 | } |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * Run a timeout function. | 83 | * Run a timeout function. |
84 | * We use the callback in order to allow __rpc_wake_up_task() | 84 | * We use the callback in order to allow __rpc_wake_up_task() |
85 | * and friends to disable the timer synchronously on SMP systems | 85 | * and friends to disable the timer synchronously on SMP systems |
86 | * without calling del_timer_sync(). The latter could cause a | 86 | * without calling del_timer_sync(). The latter could cause a |
87 | * deadlock if called while we're holding spinlocks... | 87 | * deadlock if called while we're holding spinlocks... |
88 | */ | 88 | */ |
89 | static void rpc_run_timer(struct rpc_task *task) | 89 | static void rpc_run_timer(struct rpc_task *task) |
90 | { | 90 | { |
91 | void (*callback)(struct rpc_task *); | 91 | void (*callback)(struct rpc_task *); |
92 | 92 | ||
93 | callback = task->tk_timeout_fn; | 93 | callback = task->tk_timeout_fn; |
94 | task->tk_timeout_fn = NULL; | 94 | task->tk_timeout_fn = NULL; |
95 | if (callback && RPC_IS_QUEUED(task)) { | 95 | if (callback && RPC_IS_QUEUED(task)) { |
96 | dprintk("RPC: %5u running timer\n", task->tk_pid); | 96 | dprintk("RPC: %5u running timer\n", task->tk_pid); |
97 | callback(task); | 97 | callback(task); |
98 | } | 98 | } |
99 | smp_mb__before_clear_bit(); | 99 | smp_mb__before_clear_bit(); |
100 | clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate); | 100 | clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate); |
101 | smp_mb__after_clear_bit(); | 101 | smp_mb__after_clear_bit(); |
102 | } | 102 | } |
103 | 103 | ||
104 | /* | 104 | /* |
105 | * Set up a timer for the current task. | 105 | * Set up a timer for the current task. |
106 | */ | 106 | */ |
107 | static inline void | 107 | static inline void |
108 | __rpc_add_timer(struct rpc_task *task, rpc_action timer) | 108 | __rpc_add_timer(struct rpc_task *task, rpc_action timer) |
109 | { | 109 | { |
110 | if (!task->tk_timeout) | 110 | if (!task->tk_timeout) |
111 | return; | 111 | return; |
112 | 112 | ||
113 | dprintk("RPC: %5u setting alarm for %lu ms\n", | 113 | dprintk("RPC: %5u setting alarm for %lu ms\n", |
114 | task->tk_pid, task->tk_timeout * 1000 / HZ); | 114 | task->tk_pid, task->tk_timeout * 1000 / HZ); |
115 | 115 | ||
116 | if (timer) | 116 | if (timer) |
117 | task->tk_timeout_fn = timer; | 117 | task->tk_timeout_fn = timer; |
118 | else | 118 | else |
119 | task->tk_timeout_fn = __rpc_default_timer; | 119 | task->tk_timeout_fn = __rpc_default_timer; |
120 | set_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate); | 120 | set_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate); |
121 | mod_timer(&task->tk_timer, jiffies + task->tk_timeout); | 121 | mod_timer(&task->tk_timer, jiffies + task->tk_timeout); |
122 | } | 122 | } |
123 | 123 | ||
124 | /* | 124 | /* |
125 | * Delete any timer for the current task. Because we use del_timer_sync(), | 125 | * Delete any timer for the current task. Because we use del_timer_sync(), |
126 | * this function should never be called while holding queue->lock. | 126 | * this function should never be called while holding queue->lock. |
127 | */ | 127 | */ |
128 | static void | 128 | static void |
129 | rpc_delete_timer(struct rpc_task *task) | 129 | rpc_delete_timer(struct rpc_task *task) |
130 | { | 130 | { |
131 | if (RPC_IS_QUEUED(task)) | 131 | if (RPC_IS_QUEUED(task)) |
132 | return; | 132 | return; |
133 | if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) { | 133 | if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) { |
134 | del_singleshot_timer_sync(&task->tk_timer); | 134 | del_singleshot_timer_sync(&task->tk_timer); |
135 | dprintk("RPC: %5u deleting timer\n", task->tk_pid); | 135 | dprintk("RPC: %5u deleting timer\n", task->tk_pid); |
136 | } | 136 | } |
137 | } | 137 | } |
138 | 138 | ||
139 | /* | 139 | /* |
140 | * Add new request to a priority queue. | 140 | * Add new request to a priority queue. |
141 | */ | 141 | */ |
142 | static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task) | 142 | static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task) |
143 | { | 143 | { |
144 | struct list_head *q; | 144 | struct list_head *q; |
145 | struct rpc_task *t; | 145 | struct rpc_task *t; |
146 | 146 | ||
147 | INIT_LIST_HEAD(&task->u.tk_wait.links); | 147 | INIT_LIST_HEAD(&task->u.tk_wait.links); |
148 | q = &queue->tasks[task->tk_priority]; | 148 | q = &queue->tasks[task->tk_priority]; |
149 | if (unlikely(task->tk_priority > queue->maxpriority)) | 149 | if (unlikely(task->tk_priority > queue->maxpriority)) |
150 | q = &queue->tasks[queue->maxpriority]; | 150 | q = &queue->tasks[queue->maxpriority]; |
151 | list_for_each_entry(t, q, u.tk_wait.list) { | 151 | list_for_each_entry(t, q, u.tk_wait.list) { |
152 | if (t->tk_cookie == task->tk_cookie) { | 152 | if (t->tk_cookie == task->tk_cookie) { |
153 | list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); | 153 | list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); |
154 | return; | 154 | return; |
155 | } | 155 | } |
156 | } | 156 | } |
157 | list_add_tail(&task->u.tk_wait.list, q); | 157 | list_add_tail(&task->u.tk_wait.list, q); |
158 | } | 158 | } |
159 | 159 | ||
160 | /* | 160 | /* |
161 | * Add new request to wait queue. | 161 | * Add new request to wait queue. |
162 | * | 162 | * |
163 | * Swapper tasks always get inserted at the head of the queue. | 163 | * Swapper tasks always get inserted at the head of the queue. |
164 | * This should avoid many nasty memory deadlocks and hopefully | 164 | * This should avoid many nasty memory deadlocks and hopefully |
165 | * improve overall performance. | 165 | * improve overall performance. |
166 | * Everyone else gets appended to the queue to ensure proper FIFO behavior. | 166 | * Everyone else gets appended to the queue to ensure proper FIFO behavior. |
167 | */ | 167 | */ |
168 | static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) | 168 | static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) |
169 | { | 169 | { |
170 | BUG_ON (RPC_IS_QUEUED(task)); | 170 | BUG_ON (RPC_IS_QUEUED(task)); |
171 | 171 | ||
172 | if (RPC_IS_PRIORITY(queue)) | 172 | if (RPC_IS_PRIORITY(queue)) |
173 | __rpc_add_wait_queue_priority(queue, task); | 173 | __rpc_add_wait_queue_priority(queue, task); |
174 | else if (RPC_IS_SWAPPER(task)) | 174 | else if (RPC_IS_SWAPPER(task)) |
175 | list_add(&task->u.tk_wait.list, &queue->tasks[0]); | 175 | list_add(&task->u.tk_wait.list, &queue->tasks[0]); |
176 | else | 176 | else |
177 | list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); | 177 | list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); |
178 | task->u.tk_wait.rpc_waitq = queue; | 178 | task->u.tk_wait.rpc_waitq = queue; |
179 | queue->qlen++; | 179 | queue->qlen++; |
180 | rpc_set_queued(task); | 180 | rpc_set_queued(task); |
181 | 181 | ||
182 | dprintk("RPC: %5u added to queue %p \"%s\"\n", | 182 | dprintk("RPC: %5u added to queue %p \"%s\"\n", |
183 | task->tk_pid, queue, rpc_qname(queue)); | 183 | task->tk_pid, queue, rpc_qname(queue)); |
184 | } | 184 | } |
185 | 185 | ||
186 | /* | 186 | /* |
187 | * Remove request from a priority queue. | 187 | * Remove request from a priority queue. |
188 | */ | 188 | */ |
189 | static void __rpc_remove_wait_queue_priority(struct rpc_task *task) | 189 | static void __rpc_remove_wait_queue_priority(struct rpc_task *task) |
190 | { | 190 | { |
191 | struct rpc_task *t; | 191 | struct rpc_task *t; |
192 | 192 | ||
193 | if (!list_empty(&task->u.tk_wait.links)) { | 193 | if (!list_empty(&task->u.tk_wait.links)) { |
194 | t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list); | 194 | t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list); |
195 | list_move(&t->u.tk_wait.list, &task->u.tk_wait.list); | 195 | list_move(&t->u.tk_wait.list, &task->u.tk_wait.list); |
196 | list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links); | 196 | list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links); |
197 | } | 197 | } |
198 | list_del(&task->u.tk_wait.list); | 198 | list_del(&task->u.tk_wait.list); |
199 | } | 199 | } |
200 | 200 | ||
201 | /* | 201 | /* |
202 | * Remove request from queue. | 202 | * Remove request from queue. |
203 | * Note: must be called with spin lock held. | 203 | * Note: must be called with spin lock held. |
204 | */ | 204 | */ |
205 | static void __rpc_remove_wait_queue(struct rpc_task *task) | 205 | static void __rpc_remove_wait_queue(struct rpc_task *task) |
206 | { | 206 | { |
207 | struct rpc_wait_queue *queue; | 207 | struct rpc_wait_queue *queue; |
208 | queue = task->u.tk_wait.rpc_waitq; | 208 | queue = task->u.tk_wait.rpc_waitq; |
209 | 209 | ||
210 | if (RPC_IS_PRIORITY(queue)) | 210 | if (RPC_IS_PRIORITY(queue)) |
211 | __rpc_remove_wait_queue_priority(task); | 211 | __rpc_remove_wait_queue_priority(task); |
212 | else | 212 | else |
213 | list_del(&task->u.tk_wait.list); | 213 | list_del(&task->u.tk_wait.list); |
214 | queue->qlen--; | 214 | queue->qlen--; |
215 | dprintk("RPC: %5u removed from queue %p \"%s\"\n", | 215 | dprintk("RPC: %5u removed from queue %p \"%s\"\n", |
216 | task->tk_pid, queue, rpc_qname(queue)); | 216 | task->tk_pid, queue, rpc_qname(queue)); |
217 | } | 217 | } |
218 | 218 | ||
219 | static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) | 219 | static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) |
220 | { | 220 | { |
221 | queue->priority = priority; | 221 | queue->priority = priority; |
222 | queue->count = 1 << (priority * 2); | 222 | queue->count = 1 << (priority * 2); |
223 | } | 223 | } |
224 | 224 | ||
225 | static inline void rpc_set_waitqueue_cookie(struct rpc_wait_queue *queue, unsigned long cookie) | 225 | static inline void rpc_set_waitqueue_cookie(struct rpc_wait_queue *queue, unsigned long cookie) |
226 | { | 226 | { |
227 | queue->cookie = cookie; | 227 | queue->cookie = cookie; |
228 | queue->nr = RPC_BATCH_COUNT; | 228 | queue->nr = RPC_BATCH_COUNT; |
229 | } | 229 | } |
230 | 230 | ||
231 | static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) | 231 | static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) |
232 | { | 232 | { |
233 | rpc_set_waitqueue_priority(queue, queue->maxpriority); | 233 | rpc_set_waitqueue_priority(queue, queue->maxpriority); |
234 | rpc_set_waitqueue_cookie(queue, 0); | 234 | rpc_set_waitqueue_cookie(queue, 0); |
235 | } | 235 | } |
236 | 236 | ||
237 | static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, int maxprio) | 237 | static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, int maxprio) |
238 | { | 238 | { |
239 | int i; | 239 | int i; |
240 | 240 | ||
241 | spin_lock_init(&queue->lock); | 241 | spin_lock_init(&queue->lock); |
242 | for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) | 242 | for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) |
243 | INIT_LIST_HEAD(&queue->tasks[i]); | 243 | INIT_LIST_HEAD(&queue->tasks[i]); |
244 | queue->maxpriority = maxprio; | 244 | queue->maxpriority = maxprio; |
245 | rpc_reset_waitqueue_priority(queue); | 245 | rpc_reset_waitqueue_priority(queue); |
246 | #ifdef RPC_DEBUG | 246 | #ifdef RPC_DEBUG |
247 | queue->name = qname; | 247 | queue->name = qname; |
248 | #endif | 248 | #endif |
249 | } | 249 | } |
250 | 250 | ||
251 | void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) | 251 | void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) |
252 | { | 252 | { |
253 | __rpc_init_priority_wait_queue(queue, qname, RPC_PRIORITY_HIGH); | 253 | __rpc_init_priority_wait_queue(queue, qname, RPC_PRIORITY_HIGH); |
254 | } | 254 | } |
255 | 255 | ||
256 | void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) | 256 | void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) |
257 | { | 257 | { |
258 | __rpc_init_priority_wait_queue(queue, qname, 0); | 258 | __rpc_init_priority_wait_queue(queue, qname, 0); |
259 | } | 259 | } |
260 | EXPORT_SYMBOL(rpc_init_wait_queue); | 260 | EXPORT_SYMBOL(rpc_init_wait_queue); |
261 | 261 | ||
262 | static int rpc_wait_bit_interruptible(void *word) | 262 | static int rpc_wait_bit_interruptible(void *word) |
263 | { | 263 | { |
264 | if (signal_pending(current)) | 264 | if (signal_pending(current)) |
265 | return -ERESTARTSYS; | 265 | return -ERESTARTSYS; |
266 | schedule(); | 266 | schedule(); |
267 | return 0; | 267 | return 0; |
268 | } | 268 | } |
269 | 269 | ||
270 | static void rpc_set_active(struct rpc_task *task) | 270 | static void rpc_set_active(struct rpc_task *task) |
271 | { | 271 | { |
272 | if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0) | 272 | if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0) |
273 | return; | 273 | return; |
274 | spin_lock(&rpc_sched_lock); | 274 | spin_lock(&rpc_sched_lock); |
275 | #ifdef RPC_DEBUG | 275 | #ifdef RPC_DEBUG |
276 | task->tk_magic = RPC_TASK_MAGIC_ID; | 276 | task->tk_magic = RPC_TASK_MAGIC_ID; |
277 | task->tk_pid = rpc_task_id++; | 277 | task->tk_pid = rpc_task_id++; |
278 | #endif | 278 | #endif |
279 | /* Add to global list of all tasks */ | 279 | /* Add to global list of all tasks */ |
280 | list_add_tail(&task->tk_task, &all_tasks); | 280 | list_add_tail(&task->tk_task, &all_tasks); |
281 | spin_unlock(&rpc_sched_lock); | 281 | spin_unlock(&rpc_sched_lock); |
282 | } | 282 | } |
283 | 283 | ||
284 | /* | 284 | /* |
285 | * Mark an RPC call as having completed by clearing the 'active' bit | 285 | * Mark an RPC call as having completed by clearing the 'active' bit |
286 | */ | 286 | */ |
287 | static void rpc_mark_complete_task(struct rpc_task *task) | 287 | static void rpc_mark_complete_task(struct rpc_task *task) |
288 | { | 288 | { |
289 | smp_mb__before_clear_bit(); | 289 | smp_mb__before_clear_bit(); |
290 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); | 290 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); |
291 | smp_mb__after_clear_bit(); | 291 | smp_mb__after_clear_bit(); |
292 | wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); | 292 | wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); |
293 | } | 293 | } |
294 | 294 | ||
295 | /* | 295 | /* |
296 | * Allow callers to wait for completion of an RPC call | 296 | * Allow callers to wait for completion of an RPC call |
297 | */ | 297 | */ |
298 | int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) | 298 | int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) |
299 | { | 299 | { |
300 | if (action == NULL) | 300 | if (action == NULL) |
301 | action = rpc_wait_bit_interruptible; | 301 | action = rpc_wait_bit_interruptible; |
302 | return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, | 302 | return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, |
303 | action, TASK_INTERRUPTIBLE); | 303 | action, TASK_INTERRUPTIBLE); |
304 | } | 304 | } |
305 | EXPORT_SYMBOL(__rpc_wait_for_completion_task); | 305 | EXPORT_SYMBOL(__rpc_wait_for_completion_task); |
306 | 306 | ||
307 | /* | 307 | /* |
308 | * Make an RPC task runnable. | 308 | * Make an RPC task runnable. |
309 | * | 309 | * |
310 | * Note: If the task is ASYNC, this must be called with | 310 | * Note: If the task is ASYNC, this must be called with |
311 | * the spinlock held to protect the wait queue operation. | 311 | * the spinlock held to protect the wait queue operation. |
312 | */ | 312 | */ |
313 | static void rpc_make_runnable(struct rpc_task *task) | 313 | static void rpc_make_runnable(struct rpc_task *task) |
314 | { | 314 | { |
315 | BUG_ON(task->tk_timeout_fn); | 315 | BUG_ON(task->tk_timeout_fn); |
316 | rpc_clear_queued(task); | 316 | rpc_clear_queued(task); |
317 | if (rpc_test_and_set_running(task)) | 317 | if (rpc_test_and_set_running(task)) |
318 | return; | 318 | return; |
319 | /* We might have raced */ | 319 | /* We might have raced */ |
320 | if (RPC_IS_QUEUED(task)) { | 320 | if (RPC_IS_QUEUED(task)) { |
321 | rpc_clear_running(task); | 321 | rpc_clear_running(task); |
322 | return; | 322 | return; |
323 | } | 323 | } |
324 | if (RPC_IS_ASYNC(task)) { | 324 | if (RPC_IS_ASYNC(task)) { |
325 | int status; | 325 | int status; |
326 | 326 | ||
327 | INIT_WORK(&task->u.tk_work, rpc_async_schedule); | 327 | INIT_WORK(&task->u.tk_work, rpc_async_schedule); |
328 | status = queue_work(task->tk_workqueue, &task->u.tk_work); | 328 | status = queue_work(task->tk_workqueue, &task->u.tk_work); |
329 | if (status < 0) { | 329 | if (status < 0) { |
330 | printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); | 330 | printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); |
331 | task->tk_status = status; | 331 | task->tk_status = status; |
332 | return; | 332 | return; |
333 | } | 333 | } |
334 | } else | 334 | } else |
335 | wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); | 335 | wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); |
336 | } | 336 | } |
337 | 337 | ||
338 | /* | 338 | /* |
339 | * Prepare for sleeping on a wait queue. | 339 | * Prepare for sleeping on a wait queue. |
340 | * By always appending tasks to the list we ensure FIFO behavior. | 340 | * By always appending tasks to the list we ensure FIFO behavior. |
341 | * NB: An RPC task will only receive interrupt-driven events as long | 341 | * NB: An RPC task will only receive interrupt-driven events as long |
342 | * as it's on a wait queue. | 342 | * as it's on a wait queue. |
343 | */ | 343 | */ |
344 | static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | 344 | static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, |
345 | rpc_action action, rpc_action timer) | 345 | rpc_action action, rpc_action timer) |
346 | { | 346 | { |
347 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", | 347 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", |
348 | task->tk_pid, rpc_qname(q), jiffies); | 348 | task->tk_pid, rpc_qname(q), jiffies); |
349 | 349 | ||
350 | if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) { | 350 | if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) { |
351 | printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n"); | 351 | printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n"); |
352 | return; | 352 | return; |
353 | } | 353 | } |
354 | 354 | ||
355 | __rpc_add_wait_queue(q, task); | 355 | __rpc_add_wait_queue(q, task); |
356 | 356 | ||
357 | BUG_ON(task->tk_callback != NULL); | 357 | BUG_ON(task->tk_callback != NULL); |
358 | task->tk_callback = action; | 358 | task->tk_callback = action; |
359 | __rpc_add_timer(task, timer); | 359 | __rpc_add_timer(task, timer); |
360 | } | 360 | } |
361 | 361 | ||
362 | void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | 362 | void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, |
363 | rpc_action action, rpc_action timer) | 363 | rpc_action action, rpc_action timer) |
364 | { | 364 | { |
365 | /* Mark the task as being activated if so needed */ | 365 | /* Mark the task as being activated if so needed */ |
366 | rpc_set_active(task); | 366 | rpc_set_active(task); |
367 | 367 | ||
368 | /* | 368 | /* |
369 | * Protect the queue operations. | 369 | * Protect the queue operations. |
370 | */ | 370 | */ |
371 | spin_lock_bh(&q->lock); | 371 | spin_lock_bh(&q->lock); |
372 | __rpc_sleep_on(q, task, action, timer); | 372 | __rpc_sleep_on(q, task, action, timer); |
373 | spin_unlock_bh(&q->lock); | 373 | spin_unlock_bh(&q->lock); |
374 | } | 374 | } |
375 | 375 | ||
376 | /** | 376 | /** |
377 | * __rpc_do_wake_up_task - wake up a single rpc_task | 377 | * __rpc_do_wake_up_task - wake up a single rpc_task |
378 | * @task: task to be woken up | 378 | * @task: task to be woken up |
379 | * | 379 | * |
380 | * Caller must hold queue->lock, and have cleared the task queued flag. | 380 | * Caller must hold queue->lock, and have cleared the task queued flag. |
381 | */ | 381 | */ |
382 | static void __rpc_do_wake_up_task(struct rpc_task *task) | 382 | static void __rpc_do_wake_up_task(struct rpc_task *task) |
383 | { | 383 | { |
384 | dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", | 384 | dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", |
385 | task->tk_pid, jiffies); | 385 | task->tk_pid, jiffies); |
386 | 386 | ||
387 | #ifdef RPC_DEBUG | 387 | #ifdef RPC_DEBUG |
388 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | 388 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); |
389 | #endif | 389 | #endif |
390 | /* Has the task been executed yet? If not, we cannot wake it up! */ | 390 | /* Has the task been executed yet? If not, we cannot wake it up! */ |
391 | if (!RPC_IS_ACTIVATED(task)) { | 391 | if (!RPC_IS_ACTIVATED(task)) { |
392 | printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); | 392 | printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); |
393 | return; | 393 | return; |
394 | } | 394 | } |
395 | 395 | ||
396 | __rpc_disable_timer(task); | 396 | __rpc_disable_timer(task); |
397 | __rpc_remove_wait_queue(task); | 397 | __rpc_remove_wait_queue(task); |
398 | 398 | ||
399 | rpc_make_runnable(task); | 399 | rpc_make_runnable(task); |
400 | 400 | ||
401 | dprintk("RPC: __rpc_wake_up_task done\n"); | 401 | dprintk("RPC: __rpc_wake_up_task done\n"); |
402 | } | 402 | } |
403 | 403 | ||
404 | /* | 404 | /* |
405 | * Wake up the specified task | 405 | * Wake up the specified task |
406 | */ | 406 | */ |
407 | static void __rpc_wake_up_task(struct rpc_task *task) | 407 | static void __rpc_wake_up_task(struct rpc_task *task) |
408 | { | 408 | { |
409 | if (rpc_start_wakeup(task)) { | 409 | if (rpc_start_wakeup(task)) { |
410 | if (RPC_IS_QUEUED(task)) | 410 | if (RPC_IS_QUEUED(task)) |
411 | __rpc_do_wake_up_task(task); | 411 | __rpc_do_wake_up_task(task); |
412 | rpc_finish_wakeup(task); | 412 | rpc_finish_wakeup(task); |
413 | } | 413 | } |
414 | } | 414 | } |
415 | 415 | ||
416 | /* | 416 | /* |
417 | * Default timeout handler if none specified by user | 417 | * Default timeout handler if none specified by user |
418 | */ | 418 | */ |
419 | static void | 419 | static void |
420 | __rpc_default_timer(struct rpc_task *task) | 420 | __rpc_default_timer(struct rpc_task *task) |
421 | { | 421 | { |
422 | dprintk("RPC: %5u timeout (default timer)\n", task->tk_pid); | 422 | dprintk("RPC: %5u timeout (default timer)\n", task->tk_pid); |
423 | task->tk_status = -ETIMEDOUT; | 423 | task->tk_status = -ETIMEDOUT; |
424 | rpc_wake_up_task(task); | 424 | rpc_wake_up_task(task); |
425 | } | 425 | } |
426 | 426 | ||
427 | /* | 427 | /* |
428 | * Wake up the specified task | 428 | * Wake up the specified task |
429 | */ | 429 | */ |
430 | void rpc_wake_up_task(struct rpc_task *task) | 430 | void rpc_wake_up_task(struct rpc_task *task) |
431 | { | 431 | { |
432 | rcu_read_lock_bh(); | 432 | rcu_read_lock_bh(); |
433 | if (rpc_start_wakeup(task)) { | 433 | if (rpc_start_wakeup(task)) { |
434 | if (RPC_IS_QUEUED(task)) { | 434 | if (RPC_IS_QUEUED(task)) { |
435 | struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq; | 435 | struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq; |
436 | 436 | ||
437 | /* Note: we're already in a bh-safe context */ | 437 | /* Note: we're already in a bh-safe context */ |
438 | spin_lock(&queue->lock); | 438 | spin_lock(&queue->lock); |
439 | __rpc_do_wake_up_task(task); | 439 | __rpc_do_wake_up_task(task); |
440 | spin_unlock(&queue->lock); | 440 | spin_unlock(&queue->lock); |
441 | } | 441 | } |
442 | rpc_finish_wakeup(task); | 442 | rpc_finish_wakeup(task); |
443 | } | 443 | } |
444 | rcu_read_unlock_bh(); | 444 | rcu_read_unlock_bh(); |
445 | } | 445 | } |
446 | 446 | ||
447 | /* | 447 | /* |
448 | * Wake up the next task on a priority queue. | 448 | * Wake up the next task on a priority queue. |
449 | */ | 449 | */ |
450 | static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue) | 450 | static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue) |
451 | { | 451 | { |
452 | struct list_head *q; | 452 | struct list_head *q; |
453 | struct rpc_task *task; | 453 | struct rpc_task *task; |
454 | 454 | ||
455 | /* | 455 | /* |
456 | * Service a batch of tasks from a single cookie. | 456 | * Service a batch of tasks from a single cookie. |
457 | */ | 457 | */ |
458 | q = &queue->tasks[queue->priority]; | 458 | q = &queue->tasks[queue->priority]; |
459 | if (!list_empty(q)) { | 459 | if (!list_empty(q)) { |
460 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); | 460 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); |
461 | if (queue->cookie == task->tk_cookie) { | 461 | if (queue->cookie == task->tk_cookie) { |
462 | if (--queue->nr) | 462 | if (--queue->nr) |
463 | goto out; | 463 | goto out; |
464 | list_move_tail(&task->u.tk_wait.list, q); | 464 | list_move_tail(&task->u.tk_wait.list, q); |
465 | } | 465 | } |
466 | /* | 466 | /* |
467 | * Check if we need to switch queues. | 467 | * Check if we need to switch queues. |
468 | */ | 468 | */ |
469 | if (--queue->count) | 469 | if (--queue->count) |
470 | goto new_cookie; | 470 | goto new_cookie; |
471 | } | 471 | } |
472 | 472 | ||
473 | /* | 473 | /* |
474 | * Service the next queue. | 474 | * Service the next queue. |
475 | */ | 475 | */ |
476 | do { | 476 | do { |
477 | if (q == &queue->tasks[0]) | 477 | if (q == &queue->tasks[0]) |
478 | q = &queue->tasks[queue->maxpriority]; | 478 | q = &queue->tasks[queue->maxpriority]; |
479 | else | 479 | else |
480 | q = q - 1; | 480 | q = q - 1; |
481 | if (!list_empty(q)) { | 481 | if (!list_empty(q)) { |
482 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); | 482 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); |
483 | goto new_queue; | 483 | goto new_queue; |
484 | } | 484 | } |
485 | } while (q != &queue->tasks[queue->priority]); | 485 | } while (q != &queue->tasks[queue->priority]); |
486 | 486 | ||
487 | rpc_reset_waitqueue_priority(queue); | 487 | rpc_reset_waitqueue_priority(queue); |
488 | return NULL; | 488 | return NULL; |
489 | 489 | ||
490 | new_queue: | 490 | new_queue: |
491 | rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); | 491 | rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); |
492 | new_cookie: | 492 | new_cookie: |
493 | rpc_set_waitqueue_cookie(queue, task->tk_cookie); | 493 | rpc_set_waitqueue_cookie(queue, task->tk_cookie); |
494 | out: | 494 | out: |
495 | __rpc_wake_up_task(task); | 495 | __rpc_wake_up_task(task); |
496 | return task; | 496 | return task; |
497 | } | 497 | } |
498 | 498 | ||
499 | /* | 499 | /* |
500 | * Wake up the next task on the wait queue. | 500 | * Wake up the next task on the wait queue. |
501 | */ | 501 | */ |
502 | struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) | 502 | struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) |
503 | { | 503 | { |
504 | struct rpc_task *task = NULL; | 504 | struct rpc_task *task = NULL; |
505 | 505 | ||
506 | dprintk("RPC: wake_up_next(%p \"%s\")\n", | 506 | dprintk("RPC: wake_up_next(%p \"%s\")\n", |
507 | queue, rpc_qname(queue)); | 507 | queue, rpc_qname(queue)); |
508 | rcu_read_lock_bh(); | 508 | rcu_read_lock_bh(); |
509 | spin_lock(&queue->lock); | 509 | spin_lock(&queue->lock); |
510 | if (RPC_IS_PRIORITY(queue)) | 510 | if (RPC_IS_PRIORITY(queue)) |
511 | task = __rpc_wake_up_next_priority(queue); | 511 | task = __rpc_wake_up_next_priority(queue); |
512 | else { | 512 | else { |
513 | task_for_first(task, &queue->tasks[0]) | 513 | task_for_first(task, &queue->tasks[0]) |
514 | __rpc_wake_up_task(task); | 514 | __rpc_wake_up_task(task); |
515 | } | 515 | } |
516 | spin_unlock(&queue->lock); | 516 | spin_unlock(&queue->lock); |
517 | rcu_read_unlock_bh(); | 517 | rcu_read_unlock_bh(); |
518 | 518 | ||
519 | return task; | 519 | return task; |
520 | } | 520 | } |
521 | 521 | ||
522 | /** | 522 | /** |
523 | * rpc_wake_up - wake up all rpc_tasks | 523 | * rpc_wake_up - wake up all rpc_tasks |
524 | * @queue: rpc_wait_queue on which the tasks are sleeping | 524 | * @queue: rpc_wait_queue on which the tasks are sleeping |
525 | * | 525 | * |
526 | * Grabs queue->lock | 526 | * Grabs queue->lock |
527 | */ | 527 | */ |
528 | void rpc_wake_up(struct rpc_wait_queue *queue) | 528 | void rpc_wake_up(struct rpc_wait_queue *queue) |
529 | { | 529 | { |
530 | struct rpc_task *task, *next; | 530 | struct rpc_task *task, *next; |
531 | struct list_head *head; | 531 | struct list_head *head; |
532 | 532 | ||
533 | rcu_read_lock_bh(); | 533 | rcu_read_lock_bh(); |
534 | spin_lock(&queue->lock); | 534 | spin_lock(&queue->lock); |
535 | head = &queue->tasks[queue->maxpriority]; | 535 | head = &queue->tasks[queue->maxpriority]; |
536 | for (;;) { | 536 | for (;;) { |
537 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) | 537 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) |
538 | __rpc_wake_up_task(task); | 538 | __rpc_wake_up_task(task); |
539 | if (head == &queue->tasks[0]) | 539 | if (head == &queue->tasks[0]) |
540 | break; | 540 | break; |
541 | head--; | 541 | head--; |
542 | } | 542 | } |
543 | spin_unlock(&queue->lock); | 543 | spin_unlock(&queue->lock); |
544 | rcu_read_unlock_bh(); | 544 | rcu_read_unlock_bh(); |
545 | } | 545 | } |
546 | 546 | ||
547 | /** | 547 | /** |
548 | * rpc_wake_up_status - wake up all rpc_tasks and set their status value. | 548 | * rpc_wake_up_status - wake up all rpc_tasks and set their status value. |
549 | * @queue: rpc_wait_queue on which the tasks are sleeping | 549 | * @queue: rpc_wait_queue on which the tasks are sleeping |
550 | * @status: status value to set | 550 | * @status: status value to set |
551 | * | 551 | * |
552 | * Grabs queue->lock | 552 | * Grabs queue->lock |
553 | */ | 553 | */ |
554 | void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | 554 | void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) |
555 | { | 555 | { |
556 | struct rpc_task *task, *next; | 556 | struct rpc_task *task, *next; |
557 | struct list_head *head; | 557 | struct list_head *head; |
558 | 558 | ||
559 | rcu_read_lock_bh(); | 559 | rcu_read_lock_bh(); |
560 | spin_lock(&queue->lock); | 560 | spin_lock(&queue->lock); |
561 | head = &queue->tasks[queue->maxpriority]; | 561 | head = &queue->tasks[queue->maxpriority]; |
562 | for (;;) { | 562 | for (;;) { |
563 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) { | 563 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) { |
564 | task->tk_status = status; | 564 | task->tk_status = status; |
565 | __rpc_wake_up_task(task); | 565 | __rpc_wake_up_task(task); |
566 | } | 566 | } |
567 | if (head == &queue->tasks[0]) | 567 | if (head == &queue->tasks[0]) |
568 | break; | 568 | break; |
569 | head--; | 569 | head--; |
570 | } | 570 | } |
571 | spin_unlock(&queue->lock); | 571 | spin_unlock(&queue->lock); |
572 | rcu_read_unlock_bh(); | 572 | rcu_read_unlock_bh(); |
573 | } | 573 | } |
574 | 574 | ||
575 | static void __rpc_atrun(struct rpc_task *task) | 575 | static void __rpc_atrun(struct rpc_task *task) |
576 | { | 576 | { |
577 | rpc_wake_up_task(task); | 577 | rpc_wake_up_task(task); |
578 | } | 578 | } |
579 | 579 | ||
580 | /* | 580 | /* |
581 | * Run a task at a later time | 581 | * Run a task at a later time |
582 | */ | 582 | */ |
583 | void rpc_delay(struct rpc_task *task, unsigned long delay) | 583 | void rpc_delay(struct rpc_task *task, unsigned long delay) |
584 | { | 584 | { |
585 | task->tk_timeout = delay; | 585 | task->tk_timeout = delay; |
586 | rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun); | 586 | rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun); |
587 | } | 587 | } |
588 | 588 | ||
589 | /* | 589 | /* |
590 | * Helper to call task->tk_ops->rpc_call_prepare | 590 | * Helper to call task->tk_ops->rpc_call_prepare |
591 | */ | 591 | */ |
592 | static void rpc_prepare_task(struct rpc_task *task) | 592 | static void rpc_prepare_task(struct rpc_task *task) |
593 | { | 593 | { |
594 | lock_kernel(); | 594 | lock_kernel(); |
595 | task->tk_ops->rpc_call_prepare(task, task->tk_calldata); | 595 | task->tk_ops->rpc_call_prepare(task, task->tk_calldata); |
596 | unlock_kernel(); | 596 | unlock_kernel(); |
597 | } | 597 | } |
598 | 598 | ||
599 | /* | 599 | /* |
600 | * Helper that calls task->tk_ops->rpc_call_done if it exists | 600 | * Helper that calls task->tk_ops->rpc_call_done if it exists |
601 | */ | 601 | */ |
602 | void rpc_exit_task(struct rpc_task *task) | 602 | void rpc_exit_task(struct rpc_task *task) |
603 | { | 603 | { |
604 | task->tk_action = NULL; | 604 | task->tk_action = NULL; |
605 | if (task->tk_ops->rpc_call_done != NULL) { | 605 | if (task->tk_ops->rpc_call_done != NULL) { |
606 | lock_kernel(); | 606 | lock_kernel(); |
607 | task->tk_ops->rpc_call_done(task, task->tk_calldata); | 607 | task->tk_ops->rpc_call_done(task, task->tk_calldata); |
608 | unlock_kernel(); | 608 | unlock_kernel(); |
609 | if (task->tk_action != NULL) { | 609 | if (task->tk_action != NULL) { |
610 | WARN_ON(RPC_ASSASSINATED(task)); | 610 | WARN_ON(RPC_ASSASSINATED(task)); |
611 | /* Always release the RPC slot and buffer memory */ | 611 | /* Always release the RPC slot and buffer memory */ |
612 | xprt_release(task); | 612 | xprt_release(task); |
613 | } | 613 | } |
614 | } | 614 | } |
615 | } | 615 | } |
616 | EXPORT_SYMBOL(rpc_exit_task); | 616 | EXPORT_SYMBOL(rpc_exit_task); |
617 | 617 | ||
618 | void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) | 618 | void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) |
619 | { | 619 | { |
620 | if (ops->rpc_release != NULL) { | 620 | if (ops->rpc_release != NULL) { |
621 | lock_kernel(); | 621 | lock_kernel(); |
622 | ops->rpc_release(calldata); | 622 | ops->rpc_release(calldata); |
623 | unlock_kernel(); | 623 | unlock_kernel(); |
624 | } | 624 | } |
625 | } | 625 | } |
626 | 626 | ||
627 | /* | 627 | /* |
628 | * This is the RPC `scheduler' (or rather, the finite state machine). | 628 | * This is the RPC `scheduler' (or rather, the finite state machine). |
629 | */ | 629 | */ |
630 | static void __rpc_execute(struct rpc_task *task) | 630 | static void __rpc_execute(struct rpc_task *task) |
631 | { | 631 | { |
632 | int status = 0; | 632 | int status = 0; |
633 | 633 | ||
634 | dprintk("RPC: %5u __rpc_execute flags=0x%x\n", | 634 | dprintk("RPC: %5u __rpc_execute flags=0x%x\n", |
635 | task->tk_pid, task->tk_flags); | 635 | task->tk_pid, task->tk_flags); |
636 | 636 | ||
637 | BUG_ON(RPC_IS_QUEUED(task)); | 637 | BUG_ON(RPC_IS_QUEUED(task)); |
638 | 638 | ||
639 | for (;;) { | 639 | for (;;) { |
640 | /* | 640 | /* |
641 | * Garbage collection of pending timers... | 641 | * Garbage collection of pending timers... |
642 | */ | 642 | */ |
643 | rpc_delete_timer(task); | 643 | rpc_delete_timer(task); |
644 | 644 | ||
645 | /* | 645 | /* |
646 | * Execute any pending callback. | 646 | * Execute any pending callback. |
647 | */ | 647 | */ |
648 | if (RPC_DO_CALLBACK(task)) { | 648 | if (RPC_DO_CALLBACK(task)) { |
649 | /* Define a callback save pointer */ | 649 | /* Define a callback save pointer */ |
650 | void (*save_callback)(struct rpc_task *); | 650 | void (*save_callback)(struct rpc_task *); |
651 | 651 | ||
652 | /* | 652 | /* |
653 | * If a callback exists, save it, reset it, | 653 | * If a callback exists, save it, reset it, |
654 | * call it. | 654 | * call it. |
655 | * The save is needed to stop from resetting | 655 | * The save is needed to stop from resetting |
656 | * another callback set within the callback handler | 656 | * another callback set within the callback handler |
657 | * - Dave | 657 | * - Dave |
658 | */ | 658 | */ |
659 | save_callback=task->tk_callback; | 659 | save_callback=task->tk_callback; |
660 | task->tk_callback=NULL; | 660 | task->tk_callback=NULL; |
661 | save_callback(task); | 661 | save_callback(task); |
662 | } | 662 | } |
663 | 663 | ||
664 | /* | 664 | /* |
665 | * Perform the next FSM step. | 665 | * Perform the next FSM step. |
666 | * tk_action may be NULL when the task has been killed | 666 | * tk_action may be NULL when the task has been killed |
667 | * by someone else. | 667 | * by someone else. |
668 | */ | 668 | */ |
669 | if (!RPC_IS_QUEUED(task)) { | 669 | if (!RPC_IS_QUEUED(task)) { |
670 | if (task->tk_action == NULL) | 670 | if (task->tk_action == NULL) |
671 | break; | 671 | break; |
672 | task->tk_action(task); | 672 | task->tk_action(task); |
673 | } | 673 | } |
674 | 674 | ||
675 | /* | 675 | /* |
676 | * Lockless check for whether task is sleeping or not. | 676 | * Lockless check for whether task is sleeping or not. |
677 | */ | 677 | */ |
678 | if (!RPC_IS_QUEUED(task)) | 678 | if (!RPC_IS_QUEUED(task)) |
679 | continue; | 679 | continue; |
680 | rpc_clear_running(task); | 680 | rpc_clear_running(task); |
681 | if (RPC_IS_ASYNC(task)) { | 681 | if (RPC_IS_ASYNC(task)) { |
682 | /* Careful! we may have raced... */ | 682 | /* Careful! we may have raced... */ |
683 | if (RPC_IS_QUEUED(task)) | 683 | if (RPC_IS_QUEUED(task)) |
684 | return; | 684 | return; |
685 | if (rpc_test_and_set_running(task)) | 685 | if (rpc_test_and_set_running(task)) |
686 | return; | 686 | return; |
687 | continue; | 687 | continue; |
688 | } | 688 | } |
689 | 689 | ||
690 | /* sync task: sleep here */ | 690 | /* sync task: sleep here */ |
691 | dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); | 691 | dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); |
692 | /* Note: Caller should be using rpc_clnt_sigmask() */ | 692 | /* Note: Caller should be using rpc_clnt_sigmask() */ |
693 | status = out_of_line_wait_on_bit(&task->tk_runstate, | 693 | status = out_of_line_wait_on_bit(&task->tk_runstate, |
694 | RPC_TASK_QUEUED, rpc_wait_bit_interruptible, | 694 | RPC_TASK_QUEUED, rpc_wait_bit_interruptible, |
695 | TASK_INTERRUPTIBLE); | 695 | TASK_INTERRUPTIBLE); |
696 | if (status == -ERESTARTSYS) { | 696 | if (status == -ERESTARTSYS) { |
697 | /* | 697 | /* |
698 | * When a sync task receives a signal, it exits with | 698 | * When a sync task receives a signal, it exits with |
699 | * -ERESTARTSYS. In order to catch any callbacks that | 699 | * -ERESTARTSYS. In order to catch any callbacks that |
700 | * clean up after sleeping on some queue, we don't | 700 | * clean up after sleeping on some queue, we don't |
701 | * break the loop here, but go around once more. | 701 | * break the loop here, but go around once more. |
702 | */ | 702 | */ |
703 | dprintk("RPC: %5u got signal\n", task->tk_pid); | 703 | dprintk("RPC: %5u got signal\n", task->tk_pid); |
704 | task->tk_flags |= RPC_TASK_KILLED; | 704 | task->tk_flags |= RPC_TASK_KILLED; |
705 | rpc_exit(task, -ERESTARTSYS); | 705 | rpc_exit(task, -ERESTARTSYS); |
706 | rpc_wake_up_task(task); | 706 | rpc_wake_up_task(task); |
707 | } | 707 | } |
708 | rpc_set_running(task); | 708 | rpc_set_running(task); |
709 | dprintk("RPC: %5u sync task resuming\n", task->tk_pid); | 709 | dprintk("RPC: %5u sync task resuming\n", task->tk_pid); |
710 | } | 710 | } |
711 | 711 | ||
712 | dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, | 712 | dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, |
713 | task->tk_status); | 713 | task->tk_status); |
714 | /* Release all resources associated with the task */ | 714 | /* Release all resources associated with the task */ |
715 | rpc_release_task(task); | 715 | rpc_release_task(task); |
716 | } | 716 | } |
717 | 717 | ||
718 | /* | 718 | /* |
719 | * User-visible entry point to the scheduler. | 719 | * User-visible entry point to the scheduler. |
720 | * | 720 | * |
721 | * This may be called recursively if e.g. an async NFS task updates | 721 | * This may be called recursively if e.g. an async NFS task updates |
722 | * the attributes and finds that dirty pages must be flushed. | 722 | * the attributes and finds that dirty pages must be flushed. |
723 | * NOTE: Upon exit of this function the task is guaranteed to be | 723 | * NOTE: Upon exit of this function the task is guaranteed to be |
724 | * released. In particular note that tk_release() will have | 724 | * released. In particular note that tk_release() will have |
725 | * been called, so your task memory may have been freed. | 725 | * been called, so your task memory may have been freed. |
726 | */ | 726 | */ |
727 | void rpc_execute(struct rpc_task *task) | 727 | void rpc_execute(struct rpc_task *task) |
728 | { | 728 | { |
729 | rpc_set_active(task); | 729 | rpc_set_active(task); |
730 | rpc_set_running(task); | 730 | rpc_set_running(task); |
731 | __rpc_execute(task); | 731 | __rpc_execute(task); |
732 | } | 732 | } |
733 | 733 | ||
734 | static void rpc_async_schedule(struct work_struct *work) | 734 | static void rpc_async_schedule(struct work_struct *work) |
735 | { | 735 | { |
736 | __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); | 736 | __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); |
737 | } | 737 | } |
738 | 738 | ||
739 | struct rpc_buffer { | 739 | struct rpc_buffer { |
740 | size_t len; | 740 | size_t len; |
741 | char data[]; | 741 | char data[]; |
742 | }; | 742 | }; |
743 | 743 | ||
744 | /** | 744 | /** |
745 | * rpc_malloc - allocate an RPC buffer | 745 | * rpc_malloc - allocate an RPC buffer |
746 | * @task: RPC task that will use this buffer | 746 | * @task: RPC task that will use this buffer |
747 | * @size: requested byte size | 747 | * @size: requested byte size |
748 | * | 748 | * |
749 | * To prevent rpciod from hanging, this allocator never sleeps, | 749 | * To prevent rpciod from hanging, this allocator never sleeps, |
750 | * returning NULL if the request cannot be serviced immediately. | 750 | * returning NULL if the request cannot be serviced immediately. |
751 | * The caller can arrange to sleep in a way that is safe for rpciod. | 751 | * The caller can arrange to sleep in a way that is safe for rpciod. |
752 | * | 752 | * |
753 | * Most requests are 'small' (under 2KiB) and can be serviced from a | 753 | * Most requests are 'small' (under 2KiB) and can be serviced from a |
754 | * mempool, ensuring that NFS reads and writes can always proceed, | 754 | * mempool, ensuring that NFS reads and writes can always proceed, |
755 | * and that there is good locality of reference for these buffers. | 755 | * and that there is good locality of reference for these buffers. |
756 | * | 756 | * |
757 | * In order to avoid memory starvation triggering more writebacks of | 757 | * In order to avoid memory starvation triggering more writebacks of |
758 | * NFS requests, we avoid using GFP_KERNEL. | 758 | * NFS requests, we avoid using GFP_KERNEL. |
759 | */ | 759 | */ |
760 | void *rpc_malloc(struct rpc_task *task, size_t size) | 760 | void *rpc_malloc(struct rpc_task *task, size_t size) |
761 | { | 761 | { |
762 | struct rpc_buffer *buf; | 762 | struct rpc_buffer *buf; |
763 | gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT; | 763 | gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT; |
764 | 764 | ||
765 | size += sizeof(struct rpc_buffer); | 765 | size += sizeof(struct rpc_buffer); |
766 | if (size <= RPC_BUFFER_MAXSIZE) | 766 | if (size <= RPC_BUFFER_MAXSIZE) |
767 | buf = mempool_alloc(rpc_buffer_mempool, gfp); | 767 | buf = mempool_alloc(rpc_buffer_mempool, gfp); |
768 | else | 768 | else |
769 | buf = kmalloc(size, gfp); | 769 | buf = kmalloc(size, gfp); |
770 | 770 | ||
771 | if (!buf) | 771 | if (!buf) |
772 | return NULL; | 772 | return NULL; |
773 | 773 | ||
774 | buf->len = size; | 774 | buf->len = size; |
775 | dprintk("RPC: %5u allocated buffer of size %zu at %p\n", | 775 | dprintk("RPC: %5u allocated buffer of size %zu at %p\n", |
776 | task->tk_pid, size, buf); | 776 | task->tk_pid, size, buf); |
777 | return &buf->data; | 777 | return &buf->data; |
778 | } | 778 | } |
779 | 779 | ||
780 | /** | 780 | /** |
781 | * rpc_free - free buffer allocated via rpc_malloc | 781 | * rpc_free - free buffer allocated via rpc_malloc |
782 | * @buffer: buffer to free | 782 | * @buffer: buffer to free |
783 | * | 783 | * |
784 | */ | 784 | */ |
785 | void rpc_free(void *buffer) | 785 | void rpc_free(void *buffer) |
786 | { | 786 | { |
787 | size_t size; | 787 | size_t size; |
788 | struct rpc_buffer *buf; | 788 | struct rpc_buffer *buf; |
789 | 789 | ||
790 | if (!buffer) | 790 | if (!buffer) |
791 | return; | 791 | return; |
792 | 792 | ||
793 | buf = container_of(buffer, struct rpc_buffer, data); | 793 | buf = container_of(buffer, struct rpc_buffer, data); |
794 | size = buf->len; | 794 | size = buf->len; |
795 | 795 | ||
796 | dprintk("RPC: freeing buffer of size %zu at %p\n", | 796 | dprintk("RPC: freeing buffer of size %zu at %p\n", |
797 | size, buf); | 797 | size, buf); |
798 | 798 | ||
799 | if (size <= RPC_BUFFER_MAXSIZE) | 799 | if (size <= RPC_BUFFER_MAXSIZE) |
800 | mempool_free(buf, rpc_buffer_mempool); | 800 | mempool_free(buf, rpc_buffer_mempool); |
801 | else | 801 | else |
802 | kfree(buf); | 802 | kfree(buf); |
803 | } | 803 | } |
804 | 804 | ||
805 | /* | 805 | /* |
806 | * Creation and deletion of RPC task structures | 806 | * Creation and deletion of RPC task structures |
807 | */ | 807 | */ |
808 | void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata) | 808 | void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata) |
809 | { | 809 | { |
810 | memset(task, 0, sizeof(*task)); | 810 | memset(task, 0, sizeof(*task)); |
811 | init_timer(&task->tk_timer); | 811 | init_timer(&task->tk_timer); |
812 | task->tk_timer.data = (unsigned long) task; | 812 | task->tk_timer.data = (unsigned long) task; |
813 | task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer; | 813 | task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer; |
814 | atomic_set(&task->tk_count, 1); | 814 | atomic_set(&task->tk_count, 1); |
815 | task->tk_client = clnt; | 815 | task->tk_client = clnt; |
816 | task->tk_flags = flags; | 816 | task->tk_flags = flags; |
817 | task->tk_ops = tk_ops; | 817 | task->tk_ops = tk_ops; |
818 | if (tk_ops->rpc_call_prepare != NULL) | 818 | if (tk_ops->rpc_call_prepare != NULL) |
819 | task->tk_action = rpc_prepare_task; | 819 | task->tk_action = rpc_prepare_task; |
820 | task->tk_calldata = calldata; | 820 | task->tk_calldata = calldata; |
821 | 821 | ||
822 | /* Initialize retry counters */ | 822 | /* Initialize retry counters */ |
823 | task->tk_garb_retry = 2; | 823 | task->tk_garb_retry = 2; |
824 | task->tk_cred_retry = 2; | 824 | task->tk_cred_retry = 2; |
825 | 825 | ||
826 | task->tk_priority = RPC_PRIORITY_NORMAL; | 826 | task->tk_priority = RPC_PRIORITY_NORMAL; |
827 | task->tk_cookie = (unsigned long)current; | 827 | task->tk_cookie = (unsigned long)current; |
828 | 828 | ||
829 | /* Initialize workqueue for async tasks */ | 829 | /* Initialize workqueue for async tasks */ |
830 | task->tk_workqueue = rpciod_workqueue; | 830 | task->tk_workqueue = rpciod_workqueue; |
831 | 831 | ||
832 | if (clnt) { | 832 | if (clnt) { |
833 | atomic_inc(&clnt->cl_users); | 833 | atomic_inc(&clnt->cl_users); |
834 | if (clnt->cl_softrtry) | 834 | if (clnt->cl_softrtry) |
835 | task->tk_flags |= RPC_TASK_SOFT; | 835 | task->tk_flags |= RPC_TASK_SOFT; |
836 | if (!clnt->cl_intr) | 836 | if (!clnt->cl_intr) |
837 | task->tk_flags |= RPC_TASK_NOINTR; | 837 | task->tk_flags |= RPC_TASK_NOINTR; |
838 | } | 838 | } |
839 | 839 | ||
840 | BUG_ON(task->tk_ops == NULL); | 840 | BUG_ON(task->tk_ops == NULL); |
841 | 841 | ||
842 | /* starting timestamp */ | 842 | /* starting timestamp */ |
843 | task->tk_start = jiffies; | 843 | task->tk_start = jiffies; |
844 | 844 | ||
845 | dprintk("RPC: new task initialized, procpid %u\n", | 845 | dprintk("RPC: new task initialized, procpid %u\n", |
846 | current->pid); | 846 | current->pid); |
847 | } | 847 | } |
848 | 848 | ||
849 | static struct rpc_task * | 849 | static struct rpc_task * |
850 | rpc_alloc_task(void) | 850 | rpc_alloc_task(void) |
851 | { | 851 | { |
852 | return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); | 852 | return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); |
853 | } | 853 | } |
854 | 854 | ||
855 | static void rpc_free_task(struct rcu_head *rcu) | 855 | static void rpc_free_task(struct rcu_head *rcu) |
856 | { | 856 | { |
857 | struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu); | 857 | struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu); |
858 | dprintk("RPC: %5u freeing task\n", task->tk_pid); | 858 | dprintk("RPC: %5u freeing task\n", task->tk_pid); |
859 | mempool_free(task, rpc_task_mempool); | 859 | mempool_free(task, rpc_task_mempool); |
860 | } | 860 | } |
861 | 861 | ||
862 | /* | 862 | /* |
863 | * Create a new task for the specified client. We have to | 863 | * Create a new task for the specified client. We have to |
864 | * clean up after an allocation failure, as the client may | 864 | * clean up after an allocation failure, as the client may |
865 | * have specified "oneshot". | 865 | * have specified "oneshot". |
866 | */ | 866 | */ |
867 | struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata) | 867 | struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata) |
868 | { | 868 | { |
869 | struct rpc_task *task; | 869 | struct rpc_task *task; |
870 | 870 | ||
871 | task = rpc_alloc_task(); | 871 | task = rpc_alloc_task(); |
872 | if (!task) | 872 | if (!task) |
873 | goto cleanup; | 873 | goto cleanup; |
874 | 874 | ||
875 | rpc_init_task(task, clnt, flags, tk_ops, calldata); | 875 | rpc_init_task(task, clnt, flags, tk_ops, calldata); |
876 | 876 | ||
877 | dprintk("RPC: allocated task %p\n", task); | 877 | dprintk("RPC: allocated task %p\n", task); |
878 | task->tk_flags |= RPC_TASK_DYNAMIC; | 878 | task->tk_flags |= RPC_TASK_DYNAMIC; |
879 | out: | 879 | out: |
880 | return task; | 880 | return task; |
881 | 881 | ||
882 | cleanup: | 882 | cleanup: |
883 | /* Check whether to release the client */ | 883 | /* Check whether to release the client */ |
884 | if (clnt) { | 884 | if (clnt) { |
885 | printk("rpc_new_task: failed, users=%d, oneshot=%d\n", | 885 | printk("rpc_new_task: failed, users=%d, oneshot=%d\n", |
886 | atomic_read(&clnt->cl_users), clnt->cl_oneshot); | 886 | atomic_read(&clnt->cl_users), clnt->cl_oneshot); |
887 | atomic_inc(&clnt->cl_users); /* pretend we were used ... */ | 887 | atomic_inc(&clnt->cl_users); /* pretend we were used ... */ |
888 | rpc_release_client(clnt); | 888 | rpc_release_client(clnt); |
889 | } | 889 | } |
890 | goto out; | 890 | goto out; |
891 | } | 891 | } |
892 | 892 | ||
893 | 893 | ||
894 | void rpc_put_task(struct rpc_task *task) | 894 | void rpc_put_task(struct rpc_task *task) |
895 | { | 895 | { |
896 | const struct rpc_call_ops *tk_ops = task->tk_ops; | 896 | const struct rpc_call_ops *tk_ops = task->tk_ops; |
897 | void *calldata = task->tk_calldata; | 897 | void *calldata = task->tk_calldata; |
898 | 898 | ||
899 | if (!atomic_dec_and_test(&task->tk_count)) | 899 | if (!atomic_dec_and_test(&task->tk_count)) |
900 | return; | 900 | return; |
901 | /* Release resources */ | 901 | /* Release resources */ |
902 | if (task->tk_rqstp) | 902 | if (task->tk_rqstp) |
903 | xprt_release(task); | 903 | xprt_release(task); |
904 | if (task->tk_msg.rpc_cred) | 904 | if (task->tk_msg.rpc_cred) |
905 | rpcauth_unbindcred(task); | 905 | rpcauth_unbindcred(task); |
906 | if (task->tk_client) { | 906 | if (task->tk_client) { |
907 | rpc_release_client(task->tk_client); | 907 | rpc_release_client(task->tk_client); |
908 | task->tk_client = NULL; | 908 | task->tk_client = NULL; |
909 | } | 909 | } |
910 | if (task->tk_flags & RPC_TASK_DYNAMIC) | 910 | if (task->tk_flags & RPC_TASK_DYNAMIC) |
911 | call_rcu_bh(&task->u.tk_rcu, rpc_free_task); | 911 | call_rcu_bh(&task->u.tk_rcu, rpc_free_task); |
912 | rpc_release_calldata(tk_ops, calldata); | 912 | rpc_release_calldata(tk_ops, calldata); |
913 | } | 913 | } |
914 | EXPORT_SYMBOL(rpc_put_task); | 914 | EXPORT_SYMBOL(rpc_put_task); |
915 | 915 | ||
916 | static void rpc_release_task(struct rpc_task *task) | 916 | static void rpc_release_task(struct rpc_task *task) |
917 | { | 917 | { |
918 | #ifdef RPC_DEBUG | 918 | #ifdef RPC_DEBUG |
919 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | 919 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); |
920 | #endif | 920 | #endif |
921 | dprintk("RPC: %5u release task\n", task->tk_pid); | 921 | dprintk("RPC: %5u release task\n", task->tk_pid); |
922 | 922 | ||
923 | /* Remove from global task list */ | 923 | /* Remove from global task list */ |
924 | spin_lock(&rpc_sched_lock); | 924 | spin_lock(&rpc_sched_lock); |
925 | list_del(&task->tk_task); | 925 | list_del(&task->tk_task); |
926 | spin_unlock(&rpc_sched_lock); | 926 | spin_unlock(&rpc_sched_lock); |
927 | 927 | ||
928 | BUG_ON (RPC_IS_QUEUED(task)); | 928 | BUG_ON (RPC_IS_QUEUED(task)); |
929 | 929 | ||
930 | /* Synchronously delete any running timer */ | 930 | /* Synchronously delete any running timer */ |
931 | rpc_delete_timer(task); | 931 | rpc_delete_timer(task); |
932 | 932 | ||
933 | #ifdef RPC_DEBUG | 933 | #ifdef RPC_DEBUG |
934 | task->tk_magic = 0; | 934 | task->tk_magic = 0; |
935 | #endif | 935 | #endif |
936 | /* Wake up anyone who is waiting for task completion */ | 936 | /* Wake up anyone who is waiting for task completion */ |
937 | rpc_mark_complete_task(task); | 937 | rpc_mark_complete_task(task); |
938 | 938 | ||
939 | rpc_put_task(task); | 939 | rpc_put_task(task); |
940 | } | 940 | } |
941 | 941 | ||
942 | /** | 942 | /** |
943 | * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it | 943 | * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it |
944 | * @clnt: pointer to RPC client | 944 | * @clnt: pointer to RPC client |
945 | * @flags: RPC flags | 945 | * @flags: RPC flags |
946 | * @ops: RPC call ops | 946 | * @ops: RPC call ops |
947 | * @data: user call data | 947 | * @data: user call data |
948 | */ | 948 | */ |
949 | struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, | 949 | struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, |
950 | const struct rpc_call_ops *ops, | 950 | const struct rpc_call_ops *ops, |
951 | void *data) | 951 | void *data) |
952 | { | 952 | { |
953 | struct rpc_task *task; | 953 | struct rpc_task *task; |
954 | task = rpc_new_task(clnt, flags, ops, data); | 954 | task = rpc_new_task(clnt, flags, ops, data); |
955 | if (task == NULL) { | 955 | if (task == NULL) { |
956 | rpc_release_calldata(ops, data); | 956 | rpc_release_calldata(ops, data); |
957 | return ERR_PTR(-ENOMEM); | 957 | return ERR_PTR(-ENOMEM); |
958 | } | 958 | } |
959 | atomic_inc(&task->tk_count); | 959 | atomic_inc(&task->tk_count); |
960 | rpc_execute(task); | 960 | rpc_execute(task); |
961 | return task; | 961 | return task; |
962 | } | 962 | } |
963 | EXPORT_SYMBOL(rpc_run_task); | 963 | EXPORT_SYMBOL(rpc_run_task); |
964 | 964 | ||
965 | /* | 965 | /* |
966 | * Kill all tasks for the given client. | 966 | * Kill all tasks for the given client. |
967 | * XXX: kill their descendants as well? | 967 | * XXX: kill their descendants as well? |
968 | */ | 968 | */ |
969 | void rpc_killall_tasks(struct rpc_clnt *clnt) | 969 | void rpc_killall_tasks(struct rpc_clnt *clnt) |
970 | { | 970 | { |
971 | struct rpc_task *rovr; | 971 | struct rpc_task *rovr; |
972 | struct list_head *le; | 972 | struct list_head *le; |
973 | 973 | ||
974 | dprintk("RPC: killing all tasks for client %p\n", clnt); | 974 | dprintk("RPC: killing all tasks for client %p\n", clnt); |
975 | 975 | ||
976 | /* | 976 | /* |
977 | * Spin lock all_tasks to prevent changes... | 977 | * Spin lock all_tasks to prevent changes... |
978 | */ | 978 | */ |
979 | spin_lock(&rpc_sched_lock); | 979 | spin_lock(&rpc_sched_lock); |
980 | alltask_for_each(rovr, le, &all_tasks) { | 980 | alltask_for_each(rovr, le, &all_tasks) { |
981 | if (! RPC_IS_ACTIVATED(rovr)) | 981 | if (! RPC_IS_ACTIVATED(rovr)) |
982 | continue; | 982 | continue; |
983 | if (!clnt || rovr->tk_client == clnt) { | 983 | if (!clnt || rovr->tk_client == clnt) { |
984 | rovr->tk_flags |= RPC_TASK_KILLED; | 984 | rovr->tk_flags |= RPC_TASK_KILLED; |
985 | rpc_exit(rovr, -EIO); | 985 | rpc_exit(rovr, -EIO); |
986 | rpc_wake_up_task(rovr); | 986 | rpc_wake_up_task(rovr); |
987 | } | 987 | } |
988 | } | 988 | } |
989 | spin_unlock(&rpc_sched_lock); | 989 | spin_unlock(&rpc_sched_lock); |
990 | } | 990 | } |
991 | 991 | ||
992 | static DECLARE_MUTEX_LOCKED(rpciod_running); | ||
993 | |||
994 | static void rpciod_killall(void) | 992 | static void rpciod_killall(void) |
995 | { | 993 | { |
996 | unsigned long flags; | 994 | unsigned long flags; |
997 | 995 | ||
998 | while (!list_empty(&all_tasks)) { | 996 | while (!list_empty(&all_tasks)) { |
999 | clear_thread_flag(TIF_SIGPENDING); | 997 | clear_thread_flag(TIF_SIGPENDING); |
1000 | rpc_killall_tasks(NULL); | 998 | rpc_killall_tasks(NULL); |
1001 | flush_workqueue(rpciod_workqueue); | 999 | flush_workqueue(rpciod_workqueue); |
1002 | if (!list_empty(&all_tasks)) { | 1000 | if (!list_empty(&all_tasks)) { |
1003 | dprintk("RPC: rpciod_killall: waiting for tasks " | 1001 | dprintk("RPC: rpciod_killall: waiting for tasks " |
1004 | "to exit\n"); | 1002 | "to exit\n"); |
1005 | yield(); | 1003 | yield(); |
1006 | } | 1004 | } |
1007 | } | 1005 | } |
1008 | 1006 | ||
1009 | spin_lock_irqsave(¤t->sighand->siglock, flags); | 1007 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
1010 | recalc_sigpending(); | 1008 | recalc_sigpending(); |
1011 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | 1009 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
1012 | } | 1010 | } |
1013 | 1011 | ||
1014 | /* | 1012 | /* |
1015 | * Start up the rpciod process if it's not already running. | 1013 | * Start up the rpciod process if it's not already running. |
1016 | */ | 1014 | */ |
1017 | int | 1015 | int |
1018 | rpciod_up(void) | 1016 | rpciod_up(void) |
1019 | { | 1017 | { |
1020 | struct workqueue_struct *wq; | 1018 | struct workqueue_struct *wq; |
1021 | int error = 0; | 1019 | int error = 0; |
1022 | 1020 | ||
1023 | mutex_lock(&rpciod_mutex); | 1021 | mutex_lock(&rpciod_mutex); |
1024 | dprintk("RPC: rpciod_up: users %u\n", rpciod_users); | 1022 | dprintk("RPC: rpciod_up: users %u\n", rpciod_users); |
1025 | rpciod_users++; | 1023 | rpciod_users++; |
1026 | if (rpciod_workqueue) | 1024 | if (rpciod_workqueue) |
1027 | goto out; | 1025 | goto out; |
1028 | /* | 1026 | /* |
1029 | * If there's no pid, we should be the first user. | 1027 | * If there's no pid, we should be the first user. |
1030 | */ | 1028 | */ |
1031 | if (rpciod_users > 1) | 1029 | if (rpciod_users > 1) |
1032 | printk(KERN_WARNING "rpciod_up: no workqueue, %u users??\n", rpciod_users); | 1030 | printk(KERN_WARNING "rpciod_up: no workqueue, %u users??\n", rpciod_users); |
1033 | /* | 1031 | /* |
1034 | * Create the rpciod thread and wait for it to start. | 1032 | * Create the rpciod thread and wait for it to start. |
1035 | */ | 1033 | */ |
1036 | error = -ENOMEM; | 1034 | error = -ENOMEM; |
1037 | wq = create_workqueue("rpciod"); | 1035 | wq = create_workqueue("rpciod"); |
1038 | if (wq == NULL) { | 1036 | if (wq == NULL) { |
1039 | printk(KERN_WARNING "rpciod_up: create workqueue failed, error=%d\n", error); | 1037 | printk(KERN_WARNING "rpciod_up: create workqueue failed, error=%d\n", error); |
1040 | rpciod_users--; | 1038 | rpciod_users--; |
1041 | goto out; | 1039 | goto out; |
1042 | } | 1040 | } |
1043 | rpciod_workqueue = wq; | 1041 | rpciod_workqueue = wq; |
1044 | error = 0; | 1042 | error = 0; |
1045 | out: | 1043 | out: |
1046 | mutex_unlock(&rpciod_mutex); | 1044 | mutex_unlock(&rpciod_mutex); |
1047 | return error; | 1045 | return error; |
1048 | } | 1046 | } |
1049 | 1047 | ||
1050 | void | 1048 | void |
1051 | rpciod_down(void) | 1049 | rpciod_down(void) |
1052 | { | 1050 | { |
1053 | mutex_lock(&rpciod_mutex); | 1051 | mutex_lock(&rpciod_mutex); |
1054 | dprintk("RPC: rpciod_down sema %u\n", rpciod_users); | 1052 | dprintk("RPC: rpciod_down sema %u\n", rpciod_users); |
1055 | if (rpciod_users) { | 1053 | if (rpciod_users) { |
1056 | if (--rpciod_users) | 1054 | if (--rpciod_users) |
1057 | goto out; | 1055 | goto out; |
1058 | } else | 1056 | } else |
1059 | printk(KERN_WARNING "rpciod_down: no users??\n"); | 1057 | printk(KERN_WARNING "rpciod_down: no users??\n"); |
1060 | 1058 | ||
1061 | if (!rpciod_workqueue) { | 1059 | if (!rpciod_workqueue) { |
1062 | dprintk("RPC: rpciod_down: Nothing to do!\n"); | 1060 | dprintk("RPC: rpciod_down: Nothing to do!\n"); |
1063 | goto out; | 1061 | goto out; |
1064 | } | 1062 | } |
1065 | rpciod_killall(); | 1063 | rpciod_killall(); |
1066 | 1064 | ||
1067 | destroy_workqueue(rpciod_workqueue); | 1065 | destroy_workqueue(rpciod_workqueue); |
1068 | rpciod_workqueue = NULL; | 1066 | rpciod_workqueue = NULL; |
1069 | out: | 1067 | out: |
1070 | mutex_unlock(&rpciod_mutex); | 1068 | mutex_unlock(&rpciod_mutex); |
1071 | } | 1069 | } |
1072 | 1070 | ||
1073 | #ifdef RPC_DEBUG | 1071 | #ifdef RPC_DEBUG |
1074 | void rpc_show_tasks(void) | 1072 | void rpc_show_tasks(void) |
1075 | { | 1073 | { |
1076 | struct list_head *le; | 1074 | struct list_head *le; |
1077 | struct rpc_task *t; | 1075 | struct rpc_task *t; |
1078 | 1076 | ||
1079 | spin_lock(&rpc_sched_lock); | 1077 | spin_lock(&rpc_sched_lock); |
1080 | if (list_empty(&all_tasks)) { | 1078 | if (list_empty(&all_tasks)) { |
1081 | spin_unlock(&rpc_sched_lock); | 1079 | spin_unlock(&rpc_sched_lock); |
1082 | return; | 1080 | return; |
1083 | } | 1081 | } |
1084 | printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout " | 1082 | printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout " |
1085 | "-rpcwait -action- ---ops--\n"); | 1083 | "-rpcwait -action- ---ops--\n"); |
1086 | alltask_for_each(t, le, &all_tasks) { | 1084 | alltask_for_each(t, le, &all_tasks) { |
1087 | const char *rpc_waitq = "none"; | 1085 | const char *rpc_waitq = "none"; |
1088 | 1086 | ||
1089 | if (RPC_IS_QUEUED(t)) | 1087 | if (RPC_IS_QUEUED(t)) |
1090 | rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq); | 1088 | rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq); |
1091 | 1089 | ||
1092 | printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n", | 1090 | printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n", |
1093 | t->tk_pid, | 1091 | t->tk_pid, |
1094 | (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1), | 1092 | (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1), |
1095 | t->tk_flags, t->tk_status, | 1093 | t->tk_flags, t->tk_status, |
1096 | t->tk_client, | 1094 | t->tk_client, |
1097 | (t->tk_client ? t->tk_client->cl_prog : 0), | 1095 | (t->tk_client ? t->tk_client->cl_prog : 0), |
1098 | t->tk_rqstp, t->tk_timeout, | 1096 | t->tk_rqstp, t->tk_timeout, |
1099 | rpc_waitq, | 1097 | rpc_waitq, |
1100 | t->tk_action, t->tk_ops); | 1098 | t->tk_action, t->tk_ops); |
1101 | } | 1099 | } |
1102 | spin_unlock(&rpc_sched_lock); | 1100 | spin_unlock(&rpc_sched_lock); |
1103 | } | 1101 | } |
1104 | #endif | 1102 | #endif |
1105 | 1103 | ||
1106 | void | 1104 | void |
1107 | rpc_destroy_mempool(void) | 1105 | rpc_destroy_mempool(void) |
1108 | { | 1106 | { |
1109 | if (rpc_buffer_mempool) | 1107 | if (rpc_buffer_mempool) |
1110 | mempool_destroy(rpc_buffer_mempool); | 1108 | mempool_destroy(rpc_buffer_mempool); |
1111 | if (rpc_task_mempool) | 1109 | if (rpc_task_mempool) |
1112 | mempool_destroy(rpc_task_mempool); | 1110 | mempool_destroy(rpc_task_mempool); |
1113 | if (rpc_task_slabp) | 1111 | if (rpc_task_slabp) |
1114 | kmem_cache_destroy(rpc_task_slabp); | 1112 | kmem_cache_destroy(rpc_task_slabp); |
1115 | if (rpc_buffer_slabp) | 1113 | if (rpc_buffer_slabp) |
1116 | kmem_cache_destroy(rpc_buffer_slabp); | 1114 | kmem_cache_destroy(rpc_buffer_slabp); |
1117 | } | 1115 | } |
1118 | 1116 | ||
1119 | int | 1117 | int |
1120 | rpc_init_mempool(void) | 1118 | rpc_init_mempool(void) |
1121 | { | 1119 | { |
1122 | rpc_task_slabp = kmem_cache_create("rpc_tasks", | 1120 | rpc_task_slabp = kmem_cache_create("rpc_tasks", |
1123 | sizeof(struct rpc_task), | 1121 | sizeof(struct rpc_task), |
1124 | 0, SLAB_HWCACHE_ALIGN, | 1122 | 0, SLAB_HWCACHE_ALIGN, |
1125 | NULL, NULL); | 1123 | NULL, NULL); |
1126 | if (!rpc_task_slabp) | 1124 | if (!rpc_task_slabp) |
1127 | goto err_nomem; | 1125 | goto err_nomem; |
1128 | rpc_buffer_slabp = kmem_cache_create("rpc_buffers", | 1126 | rpc_buffer_slabp = kmem_cache_create("rpc_buffers", |
1129 | RPC_BUFFER_MAXSIZE, | 1127 | RPC_BUFFER_MAXSIZE, |
1130 | 0, SLAB_HWCACHE_ALIGN, | 1128 | 0, SLAB_HWCACHE_ALIGN, |
1131 | NULL, NULL); | 1129 | NULL, NULL); |
1132 | if (!rpc_buffer_slabp) | 1130 | if (!rpc_buffer_slabp) |
1133 | goto err_nomem; | 1131 | goto err_nomem; |
1134 | rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, | 1132 | rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, |
1135 | rpc_task_slabp); | 1133 | rpc_task_slabp); |
1136 | if (!rpc_task_mempool) | 1134 | if (!rpc_task_mempool) |
1137 | goto err_nomem; | 1135 | goto err_nomem; |
1138 | rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, | 1136 | rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, |
1139 | rpc_buffer_slabp); | 1137 | rpc_buffer_slabp); |
1140 | if (!rpc_buffer_mempool) | 1138 | if (!rpc_buffer_mempool) |
1141 | goto err_nomem; | 1139 | goto err_nomem; |
1142 | return 0; | 1140 | return 0; |
1143 | err_nomem: | 1141 | err_nomem: |
1144 | rpc_destroy_mempool(); | 1142 | rpc_destroy_mempool(); |
1145 | return -ENOMEM; | 1143 | return -ENOMEM; |
1146 | } | 1144 | } |
1147 | 1145 |