Commit 4f878e8475a465ddbd951e06a23317303f1b5b30
1 parent
4e3f9c5042
Exists in
master
and in
7 other branches
Btrfs: reduce worker thread spin_lock_irq hold times
This changes the btrfs worker threads to batch work items into a local list. It allows us to pull work items in large chunks and significantly reduces the number of times we need to take the worker thread spinlock. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Showing 1 changed file with 60 additions and 14 deletions Side-by-side Diff
fs/btrfs/async-thread.c
... | ... | @@ -196,31 +196,73 @@ |
196 | 196 | return freeit; |
197 | 197 | } |
198 | 198 | |
199 | +static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker, | |
200 | + struct list_head *prio_head, | |
201 | + struct list_head *head) | |
202 | +{ | |
203 | + struct btrfs_work *work = NULL; | |
204 | + struct list_head *cur = NULL; | |
205 | + | |
206 | + if(!list_empty(prio_head)) | |
207 | + cur = prio_head->next; | |
208 | + | |
209 | + smp_mb(); | |
210 | + if (!list_empty(&worker->prio_pending)) | |
211 | + goto refill; | |
212 | + | |
213 | + if (!list_empty(head)) | |
214 | + cur = head->next; | |
215 | + | |
216 | + if (cur) | |
217 | + goto out; | |
218 | + | |
219 | +refill: | |
220 | + spin_lock_irq(&worker->lock); | |
221 | + list_splice_tail_init(&worker->prio_pending, prio_head); | |
222 | + list_splice_tail_init(&worker->pending, head); | |
223 | + | |
224 | + if (!list_empty(prio_head)) | |
225 | + cur = prio_head->next; | |
226 | + else if (!list_empty(head)) | |
227 | + cur = head->next; | |
228 | + spin_unlock_irq(&worker->lock); | |
229 | + | |
230 | + if (!cur) | |
231 | + goto out_fail; | |
232 | + | |
233 | +out: | |
234 | + work = list_entry(cur, struct btrfs_work, list); | |
235 | + | |
236 | +out_fail: | |
237 | + return work; | |
238 | +} | |
239 | + | |
199 | 240 | /* |
200 | 241 | * main loop for servicing work items |
201 | 242 | */ |
202 | 243 | static int worker_loop(void *arg) |
203 | 244 | { |
204 | 245 | struct btrfs_worker_thread *worker = arg; |
205 | - struct list_head *cur; | |
246 | + struct list_head head; | |
247 | + struct list_head prio_head; | |
206 | 248 | struct btrfs_work *work; |
249 | + | |
250 | + INIT_LIST_HEAD(&head); | |
251 | + INIT_LIST_HEAD(&prio_head); | |
252 | + | |
207 | 253 | do { |
208 | - spin_lock_irq(&worker->lock); | |
209 | -again_locked: | |
254 | +again: | |
210 | 255 | while (1) { |
211 | - if (!list_empty(&worker->prio_pending)) | |
212 | - cur = worker->prio_pending.next; | |
213 | - else if (!list_empty(&worker->pending)) | |
214 | - cur = worker->pending.next; | |
215 | - else | |
256 | + | |
257 | + | |
258 | + work = get_next_work(worker, &prio_head, &head); | |
259 | + if (!work) | |
216 | 260 | break; |
217 | 261 | |
218 | - work = list_entry(cur, struct btrfs_work, list); | |
219 | 262 | list_del(&work->list); |
220 | 263 | clear_bit(WORK_QUEUED_BIT, &work->flags); |
221 | 264 | |
222 | 265 | work->worker = worker; |
223 | - spin_unlock_irq(&worker->lock); | |
224 | 266 | |
225 | 267 | work->func(work); |
226 | 268 | |
227 | 269 | |
... | ... | @@ -233,9 +275,11 @@ |
233 | 275 | |
234 | 276 | check_pending_worker_creates(worker); |
235 | 277 | |
236 | - spin_lock_irq(&worker->lock); | |
237 | - check_idle_worker(worker); | |
238 | 278 | } |
279 | + | |
280 | + spin_lock_irq(&worker->lock); | |
281 | + check_idle_worker(worker); | |
282 | + | |
239 | 283 | if (freezing(current)) { |
240 | 284 | worker->working = 0; |
241 | 285 | spin_unlock_irq(&worker->lock); |
... | ... | @@ -274,8 +318,10 @@ |
274 | 318 | spin_lock_irq(&worker->lock); |
275 | 319 | set_current_state(TASK_INTERRUPTIBLE); |
276 | 320 | if (!list_empty(&worker->pending) || |
277 | - !list_empty(&worker->prio_pending)) | |
278 | - goto again_locked; | |
321 | + !list_empty(&worker->prio_pending)) { | |
322 | + spin_unlock_irq(&worker->lock); | |
323 | + goto again; | |
324 | + } | |
279 | 325 | |
280 | 326 | /* |
281 | 327 | * this makes sure we get a wakeup when someone |