Commit 3b27bad7f7ceacca6d6c0ef647ffb38aa55a8336
1 parent
d9ba131d8f
Exists in
master
and in
4 other branches
SUNRPC: Allow caller of rpc_sleep_on() to select priority levels
Currently, the caller has to change the value of task->tk_priority if it wants to select on which priority level the task will sleep. This patch allows the caller to select a priority level at sleep time rather than always using task->tk_priority. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Showing 2 changed files with 33 additions and 9 deletions Side-by-side Diff
include/linux/sunrpc/sched.h
... | ... | @@ -227,6 +227,10 @@ |
227 | 227 | void rpc_destroy_wait_queue(struct rpc_wait_queue *); |
228 | 228 | void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *, |
229 | 229 | rpc_action action); |
230 | +void rpc_sleep_on_priority(struct rpc_wait_queue *, | |
231 | + struct rpc_task *, | |
232 | + rpc_action action, | |
233 | + int priority); | |
230 | 234 | void rpc_wake_up_queued_task(struct rpc_wait_queue *, |
231 | 235 | struct rpc_task *); |
232 | 236 | void rpc_wake_up(struct rpc_wait_queue *); |
net/sunrpc/sched.c
... | ... | @@ -97,14 +97,16 @@ |
97 | 97 | /* |
98 | 98 | * Add new request to a priority queue. |
99 | 99 | */ |
100 | -static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task) | |
100 | +static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, | |
101 | + struct rpc_task *task, | |
102 | + unsigned char queue_priority) | |
101 | 103 | { |
102 | 104 | struct list_head *q; |
103 | 105 | struct rpc_task *t; |
104 | 106 | |
105 | 107 | INIT_LIST_HEAD(&task->u.tk_wait.links); |
106 | - q = &queue->tasks[task->tk_priority]; | |
107 | - if (unlikely(task->tk_priority > queue->maxpriority)) | |
108 | + q = &queue->tasks[queue_priority]; | |
109 | + if (unlikely(queue_priority > queue->maxpriority)) | |
108 | 110 | q = &queue->tasks[queue->maxpriority]; |
109 | 111 | list_for_each_entry(t, q, u.tk_wait.list) { |
110 | 112 | if (t->tk_owner == task->tk_owner) { |
111 | 113 | |
... | ... | @@ -123,12 +125,14 @@ |
123 | 125 | * improve overall performance. |
124 | 126 | * Everyone else gets appended to the queue to ensure proper FIFO behavior. |
125 | 127 | */ |
126 | -static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) | |
128 | +static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, | |
129 | + struct rpc_task *task, | |
130 | + unsigned char queue_priority) | |
127 | 131 | { |
128 | 132 | BUG_ON (RPC_IS_QUEUED(task)); |
129 | 133 | |
130 | 134 | if (RPC_IS_PRIORITY(queue)) |
131 | - __rpc_add_wait_queue_priority(queue, task); | |
135 | + __rpc_add_wait_queue_priority(queue, task, queue_priority); | |
132 | 136 | else if (RPC_IS_SWAPPER(task)) |
133 | 137 | list_add(&task->u.tk_wait.list, &queue->tasks[0]); |
134 | 138 | else |
135 | 139 | |
... | ... | @@ -311,13 +315,15 @@ |
311 | 315 | * NB: An RPC task will only receive interrupt-driven events as long |
312 | 316 | * as it's on a wait queue. |
313 | 317 | */ |
314 | -static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |
315 | - rpc_action action) | |
318 | +static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, | |
319 | + struct rpc_task *task, | |
320 | + rpc_action action, | |
321 | + unsigned char queue_priority) | |
316 | 322 | { |
317 | 323 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", |
318 | 324 | task->tk_pid, rpc_qname(q), jiffies); |
319 | 325 | |
320 | - __rpc_add_wait_queue(q, task); | |
326 | + __rpc_add_wait_queue(q, task, queue_priority); | |
321 | 327 | |
322 | 328 | BUG_ON(task->tk_callback != NULL); |
323 | 329 | task->tk_callback = action; |
324 | 330 | |
... | ... | @@ -334,10 +340,24 @@ |
334 | 340 | * Protect the queue operations. |
335 | 341 | */ |
336 | 342 | spin_lock_bh(&q->lock); |
337 | - __rpc_sleep_on(q, task, action); | |
343 | + __rpc_sleep_on_priority(q, task, action, task->tk_priority); | |
338 | 344 | spin_unlock_bh(&q->lock); |
339 | 345 | } |
340 | 346 | EXPORT_SYMBOL_GPL(rpc_sleep_on); |
347 | + | |
348 | +void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, | |
349 | + rpc_action action, int priority) | |
350 | +{ | |
351 | + /* We shouldn't ever put an inactive task to sleep */ | |
352 | + BUG_ON(!RPC_IS_ACTIVATED(task)); | |
353 | + | |
354 | + /* | |
355 | + * Protect the queue operations. | |
356 | + */ | |
357 | + spin_lock_bh(&q->lock); | |
358 | + __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW); | |
359 | + spin_unlock_bh(&q->lock); | |
360 | +} | |
341 | 361 | |
342 | 362 | /** |
343 | 363 | * __rpc_do_wake_up_task - wake up a single rpc_task |