Blame view
fs/btrfs/async-thread.c
17.8 KB
8b7128429
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 |
/* * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/kthread.h> |
5a0e3ad6a
|
20 |
#include <linux/slab.h> |
8b7128429
|
21 22 |
#include <linux/list.h> #include <linux/spinlock.h> |
b51912c91
|
23 |
#include <linux/freezer.h> |
8b7128429
|
24 |
#include "async-thread.h" |
4a69a4100
|
25 26 27 |
#define WORK_QUEUED_BIT 0 #define WORK_DONE_BIT 1 #define WORK_ORDER_DONE_BIT 2 |
d313d7a31
|
28 |
#define WORK_HIGH_PRIO_BIT 3 |
4a69a4100
|
29 |
|
8b7128429
|
30 31 32 33 34 |
/* * container for the kthread task pointer and the list of pending work * One of these is allocated per thread. */ struct btrfs_worker_thread { |
35d8ba662
|
35 36 |
/* pool we belong to */ struct btrfs_workers *workers; |
8b7128429
|
37 38 |
/* list of struct btrfs_work that are waiting for service */ struct list_head pending; |
d313d7a31
|
39 |
struct list_head prio_pending; |
8b7128429
|
40 41 42 43 44 45 46 47 48 |
/* list of worker threads from struct btrfs_workers */ struct list_head worker_list; /* kthread */ struct task_struct *task; /* number of things on the pending list */ atomic_t num_pending; |
53863232e
|
49 |
|
9042846bc
|
50 51 |
/* reference counter for this struct */ atomic_t refs; |
4854ddd0e
|
52 |
unsigned long sequence; |
8b7128429
|
53 54 55 56 57 58 |
/* protects the pending list. */ spinlock_t lock; /* set to non-zero when this thread is already awake and kicking */ int working; |
35d8ba662
|
59 60 61 |
/* are we currently idle */ int idle; |
8b7128429
|
62 |
}; |
0dc3b84a7
|
63 |
static int __btrfs_start_workers(struct btrfs_workers *workers); |
8b7128429
|
64 |
/* |
61d92c328
|
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
* btrfs_start_workers uses kthread_run, which can block waiting for memory * for a very long time. It will actually throttle on page writeback, * and so it may not make progress until after our btrfs worker threads * process all of the pending work structs in their queue * * This means we can't use btrfs_start_workers from inside a btrfs worker * thread that is used as part of cleaning dirty memory, which pretty much * involves all of the worker threads. * * Instead we have a helper queue who never has more than one thread * where we scheduler thread start operations. This worker_start struct * is used to contain the work and hold a pointer to the queue that needs * another worker. */ struct worker_start { struct btrfs_work work; struct btrfs_workers *queue; }; static void start_new_worker_func(struct btrfs_work *work) { struct worker_start *start; start = container_of(work, struct worker_start, work); |
0dc3b84a7
|
88 |
__btrfs_start_workers(start->queue); |
61d92c328
|
89 90 |
kfree(start); } |
61d92c328
|
91 |
/* |
35d8ba662
|
92 93 94 95 96 97 98 99 100 101 |
* helper function to move a thread onto the idle list after it * has finished some requests. */ static void check_idle_worker(struct btrfs_worker_thread *worker) { if (!worker->idle && atomic_read(&worker->num_pending) < worker->workers->idle_thresh / 2) { unsigned long flags; spin_lock_irqsave(&worker->workers->lock, flags); worker->idle = 1; |
3e99d8eb3
|
102 103 104 105 106 107 |
/* the list may be empty if the worker is just starting */ if (!list_empty(&worker->worker_list)) { list_move(&worker->worker_list, &worker->workers->idle_list); } |
35d8ba662
|
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
spin_unlock_irqrestore(&worker->workers->lock, flags); } } /* * helper function to move a thread off the idle list after new * pending work is added. */ static void check_busy_worker(struct btrfs_worker_thread *worker) { if (worker->idle && atomic_read(&worker->num_pending) >= worker->workers->idle_thresh) { unsigned long flags; spin_lock_irqsave(&worker->workers->lock, flags); worker->idle = 0; |
3e99d8eb3
|
123 124 125 126 127 |
if (!list_empty(&worker->worker_list)) { list_move_tail(&worker->worker_list, &worker->workers->worker_list); } |
35d8ba662
|
128 129 130 |
spin_unlock_irqrestore(&worker->workers->lock, flags); } } |
9042846bc
|
131 132 133 |
static void check_pending_worker_creates(struct btrfs_worker_thread *worker) { struct btrfs_workers *workers = worker->workers; |
0dc3b84a7
|
134 |
struct worker_start *start; |
9042846bc
|
135 136 137 138 139 |
unsigned long flags; rmb(); if (!workers->atomic_start_pending) return; |
0dc3b84a7
|
140 141 142 143 144 145 |
start = kzalloc(sizeof(*start), GFP_NOFS); if (!start) return; start->work.func = start_new_worker_func; start->queue = workers; |
9042846bc
|
146 147 148 149 150 |
spin_lock_irqsave(&workers->lock, flags); if (!workers->atomic_start_pending) goto out; workers->atomic_start_pending = 0; |
61d92c328
|
151 152 |
if (workers->num_workers + workers->num_workers_starting >= workers->max_workers) |
9042846bc
|
153 |
goto out; |
61d92c328
|
154 |
workers->num_workers_starting += 1; |
9042846bc
|
155 |
spin_unlock_irqrestore(&workers->lock, flags); |
0dc3b84a7
|
156 |
btrfs_queue_worker(workers->atomic_worker_start, &start->work); |
9042846bc
|
157 158 159 |
return; out: |
0dc3b84a7
|
160 |
kfree(start); |
9042846bc
|
161 162 |
spin_unlock_irqrestore(&workers->lock, flags); } |
4a69a4100
|
163 164 165 |
static noinline int run_ordered_completions(struct btrfs_workers *workers, struct btrfs_work *work) { |
4a69a4100
|
166 167 168 169 |
if (!workers->ordered) return 0; set_bit(WORK_DONE_BIT, &work->flags); |
4e3f9c504
|
170 |
spin_lock(&workers->order_lock); |
4a69a4100
|
171 |
|
d313d7a31
|
172 173 174 175 176 177 178 179 180 181 |
while (1) { if (!list_empty(&workers->prio_order_list)) { work = list_entry(workers->prio_order_list.next, struct btrfs_work, order_list); } else if (!list_empty(&workers->order_list)) { work = list_entry(workers->order_list.next, struct btrfs_work, order_list); } else { break; } |
4a69a4100
|
182 183 184 185 186 187 188 189 190 191 |
if (!test_bit(WORK_DONE_BIT, &work->flags)) break; /* we are going to call the ordered done function, but * we leave the work item on the list as a barrier so * that later work items that are done don't have their * functions called before this one returns */ if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) break; |
4e3f9c504
|
192 |
spin_unlock(&workers->order_lock); |
4a69a4100
|
193 194 195 196 |
work->ordered_func(work); /* now take the lock again and call the freeing code */ |
4e3f9c504
|
197 |
spin_lock(&workers->order_lock); |
4a69a4100
|
198 199 200 |
list_del(&work->order_list); work->ordered_free(work); } |
4e3f9c504
|
201 |
spin_unlock(&workers->order_lock); |
4a69a4100
|
202 203 |
return 0; } |
9042846bc
|
204 205 206 207 208 209 210 211 212 213 214 |
static void put_worker(struct btrfs_worker_thread *worker) { if (atomic_dec_and_test(&worker->refs)) kfree(worker); } static int try_worker_shutdown(struct btrfs_worker_thread *worker) { int freeit = 0; spin_lock_irq(&worker->lock); |
627e421a3
|
215 |
spin_lock(&worker->workers->lock); |
9042846bc
|
216 217 218 219 220 |
if (worker->workers->num_workers > 1 && worker->idle && !worker->working && !list_empty(&worker->worker_list) && list_empty(&worker->prio_pending) && |
6e74057c4
|
221 222 |
list_empty(&worker->pending) && atomic_read(&worker->num_pending) == 0) { |
9042846bc
|
223 224 225 226 |
freeit = 1; list_del_init(&worker->worker_list); worker->workers->num_workers--; } |
627e421a3
|
227 |
spin_unlock(&worker->workers->lock); |
9042846bc
|
228 229 230 231 232 233 |
spin_unlock_irq(&worker->lock); if (freeit) put_worker(worker); return freeit; } |
4f878e847
|
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 |
static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker, struct list_head *prio_head, struct list_head *head) { struct btrfs_work *work = NULL; struct list_head *cur = NULL; if(!list_empty(prio_head)) cur = prio_head->next; smp_mb(); if (!list_empty(&worker->prio_pending)) goto refill; if (!list_empty(head)) cur = head->next; if (cur) goto out; refill: spin_lock_irq(&worker->lock); list_splice_tail_init(&worker->prio_pending, prio_head); list_splice_tail_init(&worker->pending, head); if (!list_empty(prio_head)) cur = prio_head->next; else if (!list_empty(head)) cur = head->next; spin_unlock_irq(&worker->lock); if (!cur) goto out_fail; out: work = list_entry(cur, struct btrfs_work, list); out_fail: return work; } |
35d8ba662
|
274 |
/* |
8b7128429
|
275 276 277 278 279 |
* main loop for servicing work items */ static int worker_loop(void *arg) { struct btrfs_worker_thread *worker = arg; |
4f878e847
|
280 281 |
struct list_head head; struct list_head prio_head; |
8b7128429
|
282 |
struct btrfs_work *work; |
4f878e847
|
283 284 285 |
INIT_LIST_HEAD(&head); INIT_LIST_HEAD(&prio_head); |
8b7128429
|
286 |
do { |
4f878e847
|
287 |
again: |
d313d7a31
|
288 |
while (1) { |
4f878e847
|
289 290 291 292 |
work = get_next_work(worker, &prio_head, &head); if (!work) |
d313d7a31
|
293 |
break; |
8b7128429
|
294 |
list_del(&work->list); |
4a69a4100
|
295 |
clear_bit(WORK_QUEUED_BIT, &work->flags); |
8b7128429
|
296 297 |
work->worker = worker; |
8b7128429
|
298 299 300 301 |
work->func(work); atomic_dec(&worker->num_pending); |
4a69a4100
|
302 303 304 305 306 |
/* * unless this is an ordered work queue, * 'work' was probably freed by func above. */ run_ordered_completions(worker->workers, work); |
9042846bc
|
307 |
check_pending_worker_creates(worker); |
8f3b65a3d
|
308 |
cond_resched(); |
8b7128429
|
309 |
} |
4f878e847
|
310 311 312 |
spin_lock_irq(&worker->lock); check_idle_worker(worker); |
8b7128429
|
313 |
if (freezing(current)) { |
b51912c91
|
314 315 |
worker->working = 0; spin_unlock_irq(&worker->lock); |
a0acae0e8
|
316 |
try_to_freeze(); |
8b7128429
|
317 |
} else { |
8b7128429
|
318 |
spin_unlock_irq(&worker->lock); |
b51912c91
|
319 320 321 322 323 324 325 |
if (!kthread_should_stop()) { cpu_relax(); /* * we've dropped the lock, did someone else * jump_in? */ smp_mb(); |
d313d7a31
|
326 327 |
if (!list_empty(&worker->pending) || !list_empty(&worker->prio_pending)) |
b51912c91
|
328 329 330 331 332 333 334 335 336 337 338 339 |
continue; /* * this short schedule allows more work to * come in without the queue functions * needing to go through wake_up_process() * * worker->working is still 1, so nobody * is going to try and wake us up */ schedule_timeout(1); smp_mb(); |
d313d7a31
|
340 341 |
if (!list_empty(&worker->pending) || !list_empty(&worker->prio_pending)) |
b51912c91
|
342 |
continue; |
b5555f771
|
343 344 |
if (kthread_should_stop()) break; |
b51912c91
|
345 346 347 |
/* still no more work?, sleep for real */ spin_lock_irq(&worker->lock); set_current_state(TASK_INTERRUPTIBLE); |
d313d7a31
|
348 |
if (!list_empty(&worker->pending) || |
4f878e847
|
349 350 |
!list_empty(&worker->prio_pending)) { spin_unlock_irq(&worker->lock); |
ed3b3d314
|
351 |
set_current_state(TASK_RUNNING); |
4f878e847
|
352 353 |
goto again; } |
b51912c91
|
354 355 356 357 358 359 360 |
/* * this makes sure we get a wakeup when someone * adds something new to the queue */ worker->working = 0; spin_unlock_irq(&worker->lock); |
9042846bc
|
361 362 363 364 365 366 367 |
if (!kthread_should_stop()) { schedule_timeout(HZ * 120); if (!worker->working && try_worker_shutdown(worker)) { return 0; } } |
b51912c91
|
368 |
} |
8b7128429
|
369 370 371 372 373 374 375 376 377 378 379 380 381 |
__set_current_state(TASK_RUNNING); } } while (!kthread_should_stop()); return 0; } /* * this will wait for all the worker threads to shutdown */ int btrfs_stop_workers(struct btrfs_workers *workers) { struct list_head *cur; struct btrfs_worker_thread *worker; |
9042846bc
|
382 |
int can_stop; |
8b7128429
|
383 |
|
9042846bc
|
384 |
spin_lock_irq(&workers->lock); |
35d8ba662
|
385 |
list_splice_init(&workers->idle_list, &workers->worker_list); |
d397712bc
|
386 |
while (!list_empty(&workers->worker_list)) { |
8b7128429
|
387 388 389 |
cur = workers->worker_list.next; worker = list_entry(cur, struct btrfs_worker_thread, worker_list); |
9042846bc
|
390 391 392 393 394 395 396 397 398 399 400 401 402 403 |
atomic_inc(&worker->refs); workers->num_workers -= 1; if (!list_empty(&worker->worker_list)) { list_del_init(&worker->worker_list); put_worker(worker); can_stop = 1; } else can_stop = 0; spin_unlock_irq(&workers->lock); if (can_stop) kthread_stop(worker->task); spin_lock_irq(&workers->lock); put_worker(worker); |
8b7128429
|
404 |
} |
9042846bc
|
405 |
spin_unlock_irq(&workers->lock); |
8b7128429
|
406 407 408 409 410 411 |
return 0; } /* * simple init on struct btrfs_workers */ |
61d92c328
|
412 413 |
void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max, struct btrfs_workers *async_helper) |
8b7128429
|
414 415 |
{ workers->num_workers = 0; |
61d92c328
|
416 |
workers->num_workers_starting = 0; |
8b7128429
|
417 |
INIT_LIST_HEAD(&workers->worker_list); |
35d8ba662
|
418 |
INIT_LIST_HEAD(&workers->idle_list); |
4a69a4100
|
419 |
INIT_LIST_HEAD(&workers->order_list); |
d313d7a31
|
420 |
INIT_LIST_HEAD(&workers->prio_order_list); |
8b7128429
|
421 |
spin_lock_init(&workers->lock); |
4e3f9c504
|
422 |
spin_lock_init(&workers->order_lock); |
8b7128429
|
423 |
workers->max_workers = max; |
61b494401
|
424 |
workers->idle_thresh = 32; |
5443be45f
|
425 |
workers->name = name; |
4a69a4100
|
426 |
workers->ordered = 0; |
9042846bc
|
427 |
workers->atomic_start_pending = 0; |
61d92c328
|
428 |
workers->atomic_worker_start = async_helper; |
8b7128429
|
429 430 431 432 433 434 |
} /* * starts new worker threads. This does not enforce the max worker * count in case you need to temporarily go past it. */ |
0dc3b84a7
|
435 |
static int __btrfs_start_workers(struct btrfs_workers *workers) |
8b7128429
|
436 437 438 |
{ struct btrfs_worker_thread *worker; int ret = 0; |
8b7128429
|
439 |
|
0dc3b84a7
|
440 441 442 443 444 |
worker = kzalloc(sizeof(*worker), GFP_NOFS); if (!worker) { ret = -ENOMEM; goto fail; } |
8b7128429
|
445 |
|
0dc3b84a7
|
446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 |
INIT_LIST_HEAD(&worker->pending); INIT_LIST_HEAD(&worker->prio_pending); INIT_LIST_HEAD(&worker->worker_list); spin_lock_init(&worker->lock); atomic_set(&worker->num_pending, 0); atomic_set(&worker->refs, 1); worker->workers = workers; worker->task = kthread_run(worker_loop, worker, "btrfs-%s-%d", workers->name, workers->num_workers + 1); if (IS_ERR(worker->task)) { ret = PTR_ERR(worker->task); kfree(worker); goto fail; |
8b7128429
|
461 |
} |
0dc3b84a7
|
462 463 464 465 466 467 468 |
spin_lock_irq(&workers->lock); list_add_tail(&worker->worker_list, &workers->idle_list); worker->idle = 1; workers->num_workers++; workers->num_workers_starting--; WARN_ON(workers->num_workers_starting < 0); spin_unlock_irq(&workers->lock); |
8b7128429
|
469 470 |
return 0; fail: |
0dc3b84a7
|
471 472 473 |
spin_lock_irq(&workers->lock); workers->num_workers_starting--; spin_unlock_irq(&workers->lock); |
8b7128429
|
474 475 |
return ret; } |
0dc3b84a7
|
476 |
int btrfs_start_workers(struct btrfs_workers *workers) |
61d92c328
|
477 478 |
{ spin_lock_irq(&workers->lock); |
0dc3b84a7
|
479 |
workers->num_workers_starting++; |
61d92c328
|
480 |
spin_unlock_irq(&workers->lock); |
0dc3b84a7
|
481 |
return __btrfs_start_workers(workers); |
61d92c328
|
482 |
} |
8b7128429
|
483 484 485 486 487 488 489 490 491 |
/* * run through the list and find a worker thread that doesn't have a lot * to do right now. This can return null if we aren't yet at the thread * count limit and all of the threads are busy. */ static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers) { struct btrfs_worker_thread *worker; struct list_head *next; |
61d92c328
|
492 493 494 495 |
int enforce_min; enforce_min = (workers->num_workers + workers->num_workers_starting) < workers->max_workers; |
8b7128429
|
496 |
|
8b7128429
|
497 |
/* |
35d8ba662
|
498 499 500 501 |
* if we find an idle thread, don't move it to the end of the * idle list. This improves the chance that the next submission * will reuse the same thread, and maybe catch it while it is still * working |
8b7128429
|
502 |
*/ |
35d8ba662
|
503 504 |
if (!list_empty(&workers->idle_list)) { next = workers->idle_list.next; |
8b7128429
|
505 506 |
worker = list_entry(next, struct btrfs_worker_thread, worker_list); |
35d8ba662
|
507 |
return worker; |
8b7128429
|
508 |
} |
35d8ba662
|
509 510 |
if (enforce_min || list_empty(&workers->worker_list)) return NULL; |
8b7128429
|
511 |
/* |
35d8ba662
|
512 |
* if we pick a busy task, move the task to the end of the list. |
d352ac681
|
513 514 515 |
* hopefully this will keep things somewhat evenly balanced. * Do the move in batches based on the sequence number. This groups * requests submitted at roughly the same time onto the same worker. |
8b7128429
|
516 |
*/ |
35d8ba662
|
517 518 |
next = workers->worker_list.next; worker = list_entry(next, struct btrfs_worker_thread, worker_list); |
4854ddd0e
|
519 |
worker->sequence++; |
d352ac681
|
520 |
|
53863232e
|
521 |
if (worker->sequence % workers->idle_thresh == 0) |
4854ddd0e
|
522 |
list_move_tail(next, &workers->worker_list); |
8b7128429
|
523 524 |
return worker; } |
d352ac681
|
525 526 527 528 529 |
/* * selects a worker thread to take the next job. This will either find * an idle worker, start a new worker up to the max count, or just return * one of the existing busy workers. */ |
8b7128429
|
530 531 532 533 |
static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) { struct btrfs_worker_thread *worker; unsigned long flags; |
9042846bc
|
534 |
struct list_head *fallback; |
0dc3b84a7
|
535 |
int ret; |
8b7128429
|
536 |
|
8b7128429
|
537 |
spin_lock_irqsave(&workers->lock, flags); |
8d532b2af
|
538 |
again: |
8b7128429
|
539 |
worker = next_worker(workers); |
8b7128429
|
540 541 |
if (!worker) { |
61d92c328
|
542 543 |
if (workers->num_workers + workers->num_workers_starting >= workers->max_workers) { |
9042846bc
|
544 545 546 547 |
goto fallback; } else if (workers->atomic_worker_start) { workers->atomic_start_pending = 1; goto fallback; |
8b7128429
|
548 |
} else { |
61d92c328
|
549 |
workers->num_workers_starting++; |
8b7128429
|
550 551 |
spin_unlock_irqrestore(&workers->lock, flags); /* we're below the limit, start another worker */ |
0dc3b84a7
|
552 |
ret = __btrfs_start_workers(workers); |
8d532b2af
|
553 |
spin_lock_irqsave(&workers->lock, flags); |
0dc3b84a7
|
554 555 |
if (ret) goto fallback; |
8b7128429
|
556 557 558 |
goto again; } } |
6e74057c4
|
559 |
goto found; |
9042846bc
|
560 561 562 563 564 565 566 567 568 569 570 571 572 573 |
fallback: fallback = NULL; /* * we have failed to find any workers, just * return the first one we can find. */ if (!list_empty(&workers->worker_list)) fallback = workers->worker_list.next; if (!list_empty(&workers->idle_list)) fallback = workers->idle_list.next; BUG_ON(!fallback); worker = list_entry(fallback, struct btrfs_worker_thread, worker_list); |
6e74057c4
|
574 575 576 577 578 579 |
found: /* * this makes sure the worker doesn't exit before it is placed * onto a busy/idle list */ atomic_inc(&worker->num_pending); |
9042846bc
|
580 581 |
spin_unlock_irqrestore(&workers->lock, flags); return worker; |
8b7128429
|
582 583 584 585 586 587 588 589 590 591 592 |
} /* * btrfs_requeue_work just puts the work item back on the tail of the list * it was taken from. It is intended for use with long running work functions * that make some progress and want to give the cpu up for others. */ int btrfs_requeue_work(struct btrfs_work *work) { struct btrfs_worker_thread *worker = work->worker; unsigned long flags; |
a68370515
|
593 |
int wake = 0; |
8b7128429
|
594 |
|
4a69a4100
|
595 |
if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) |
8b7128429
|
596 597 598 |
goto out; spin_lock_irqsave(&worker->lock, flags); |
d313d7a31
|
599 600 601 602 |
if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) list_add_tail(&work->list, &worker->prio_pending); else list_add_tail(&work->list, &worker->pending); |
b51912c91
|
603 |
atomic_inc(&worker->num_pending); |
75ccf47d1
|
604 605 606 607 608 |
/* by definition we're busy, take ourselves off the idle * list */ if (worker->idle) { |
29c5e8ce0
|
609 |
spin_lock(&worker->workers->lock); |
75ccf47d1
|
610 611 |
worker->idle = 0; list_move_tail(&worker->worker_list, |
6e74057c4
|
612 |
&worker->workers->worker_list); |
29c5e8ce0
|
613 |
spin_unlock(&worker->workers->lock); |
75ccf47d1
|
614 |
} |
a68370515
|
615 616 617 618 |
if (!worker->working) { wake = 1; worker->working = 1; } |
75ccf47d1
|
619 |
|
a68370515
|
620 621 |
if (wake) wake_up_process(worker->task); |
9042846bc
|
622 |
spin_unlock_irqrestore(&worker->lock, flags); |
8b7128429
|
623 |
out: |
a68370515
|
624 |
|
8b7128429
|
625 626 |
return 0; } |
d313d7a31
|
627 628 629 630 |
void btrfs_set_work_high_prio(struct btrfs_work *work) { set_bit(WORK_HIGH_PRIO_BIT, &work->flags); } |
8b7128429
|
631 632 633 |
/* * places a struct btrfs_work into the pending queue of one of the kthreads */ |
0dc3b84a7
|
634 |
void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) |
8b7128429
|
635 636 637 638 639 640 |
{ struct btrfs_worker_thread *worker; unsigned long flags; int wake = 0; /* don't requeue something already on a list */ |
4a69a4100
|
641 |
if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) |
0dc3b84a7
|
642 |
return; |
8b7128429
|
643 644 |
worker = find_worker(workers); |
4a69a4100
|
645 |
if (workers->ordered) { |
4e3f9c504
|
646 647 648 649 650 |
/* * you're not allowed to do ordered queues from an * interrupt handler */ spin_lock(&workers->order_lock); |
d313d7a31
|
651 652 653 654 655 656 |
if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) { list_add_tail(&work->order_list, &workers->prio_order_list); } else { list_add_tail(&work->order_list, &workers->order_list); } |
4e3f9c504
|
657 |
spin_unlock(&workers->order_lock); |
4a69a4100
|
658 659 660 |
} else { INIT_LIST_HEAD(&work->order_list); } |
8b7128429
|
661 662 |
spin_lock_irqsave(&worker->lock, flags); |
a68370515
|
663 |
|
d313d7a31
|
664 665 666 667 |
if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) list_add_tail(&work->list, &worker->prio_pending); else list_add_tail(&work->list, &worker->pending); |
35d8ba662
|
668 |
check_busy_worker(worker); |
8b7128429
|
669 670 671 672 673 674 675 676 |
/* * avoid calling into wake_up_process if this thread has already * been kicked */ if (!worker->working) wake = 1; worker->working = 1; |
8b7128429
|
677 678 |
if (wake) wake_up_process(worker->task); |
9042846bc
|
679 |
spin_unlock_irqrestore(&worker->lock, flags); |
8b7128429
|
680 |
} |