Blame view

fs/btrfs/async-thread.c 17.8 KB
8b7128429   Chris Mason   Btrfs: Add async ...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
  /*
   * Copyright (C) 2007 Oracle.  All rights reserved.
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public
   * License v2 as published by the Free Software Foundation.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   * General Public License for more details.
   *
   * You should have received a copy of the GNU General Public
   * License along with this program; if not, write to the
   * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
   * Boston, MA 021110-1307, USA.
   */
  
  #include <linux/kthread.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
20
  #include <linux/slab.h>
8b7128429   Chris Mason   Btrfs: Add async ...
21
22
  #include <linux/list.h>
  #include <linux/spinlock.h>
b51912c91   Chris Mason   Btrfs: async thre...
23
  #include <linux/freezer.h>
8b7128429   Chris Mason   Btrfs: Add async ...
24
  #include "async-thread.h"
4a69a4100   Chris Mason   Btrfs: Add ordere...
25
26
27
  #define WORK_QUEUED_BIT 0
  #define WORK_DONE_BIT 1
  #define WORK_ORDER_DONE_BIT 2
d313d7a31   Chris Mason   Btrfs: add a prio...
28
  #define WORK_HIGH_PRIO_BIT 3
4a69a4100   Chris Mason   Btrfs: Add ordere...
29

8b7128429   Chris Mason   Btrfs: Add async ...
30
31
32
33
34
  /*
   * container for the kthread task pointer and the list of pending work
   * One of these is allocated per thread.
   */
  struct btrfs_worker_thread {
35d8ba662   Chris Mason   Btrfs: Worker thr...
35
36
  	/* pool we belong to */
  	struct btrfs_workers *workers;
8b7128429   Chris Mason   Btrfs: Add async ...
37
38
  	/* list of struct btrfs_work that are waiting for service */
  	struct list_head pending;
d313d7a31   Chris Mason   Btrfs: add a prio...
39
  	struct list_head prio_pending;
8b7128429   Chris Mason   Btrfs: Add async ...
40
41
42
43
44
45
46
47
48
  
  	/* list of worker threads from struct btrfs_workers */
  	struct list_head worker_list;
  
  	/* kthread */
  	struct task_struct *task;
  
  	/* number of things on the pending list */
  	atomic_t num_pending;
53863232e   Chris Mason   Btrfs: Lower cont...
49

9042846bc   Chris Mason   Btrfs: Allow work...
50
51
  	/* reference counter for this struct */
  	atomic_t refs;
4854ddd0e   Chris Mason   Btrfs: Wait for k...
52
  	unsigned long sequence;
8b7128429   Chris Mason   Btrfs: Add async ...
53
54
55
56
57
58
  
  	/* protects the pending list. */
  	spinlock_t lock;
  
  	/* set to non-zero when this thread is already awake and kicking */
  	int working;
35d8ba662   Chris Mason   Btrfs: Worker thr...
59
60
61
  
  	/* are we currently idle */
  	int idle;
8b7128429   Chris Mason   Btrfs: Add async ...
62
  };
0dc3b84a7   Josef Bacik   Btrfs: fix num_wo...
63
  static int __btrfs_start_workers(struct btrfs_workers *workers);
8b7128429   Chris Mason   Btrfs: Add async ...
64
  /*
61d92c328   Chris Mason   Btrfs: fix deadlo...
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
   * btrfs_start_workers uses kthread_run, which can block waiting for memory
   * for a very long time.  It will actually throttle on page writeback,
   * and so it may not make progress until after our btrfs worker threads
   * process all of the pending work structs in their queue
   *
   * This means we can't use btrfs_start_workers from inside a btrfs worker
   * thread that is used as part of cleaning dirty memory, which pretty much
   * involves all of the worker threads.
   *
   * Instead we have a helper queue who never has more than one thread
   * where we scheduler thread start operations.  This worker_start struct
   * is used to contain the work and hold a pointer to the queue that needs
   * another worker.
   */
  struct worker_start {
  	struct btrfs_work work;
  	struct btrfs_workers *queue;
  };
  
  static void start_new_worker_func(struct btrfs_work *work)
  {
  	struct worker_start *start;
  	start = container_of(work, struct worker_start, work);
0dc3b84a7   Josef Bacik   Btrfs: fix num_wo...
88
  	__btrfs_start_workers(start->queue);
61d92c328   Chris Mason   Btrfs: fix deadlo...
89
90
  	kfree(start);
  }
61d92c328   Chris Mason   Btrfs: fix deadlo...
91
  /*
35d8ba662   Chris Mason   Btrfs: Worker thr...
92
93
94
95
96
97
98
99
100
101
   * helper function to move a thread onto the idle list after it
   * has finished some requests.
   */
  static void check_idle_worker(struct btrfs_worker_thread *worker)
  {
  	if (!worker->idle && atomic_read(&worker->num_pending) <
  	    worker->workers->idle_thresh / 2) {
  		unsigned long flags;
  		spin_lock_irqsave(&worker->workers->lock, flags);
  		worker->idle = 1;
3e99d8eb3   Chris Mason   Btrfs: fix async ...
102
103
104
105
106
107
  
  		/* the list may be empty if the worker is just starting */
  		if (!list_empty(&worker->worker_list)) {
  			list_move(&worker->worker_list,
  				 &worker->workers->idle_list);
  		}
35d8ba662   Chris Mason   Btrfs: Worker thr...
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
  		spin_unlock_irqrestore(&worker->workers->lock, flags);
  	}
  }
  
  /*
   * helper function to move a thread off the idle list after new
   * pending work is added.
   */
  static void check_busy_worker(struct btrfs_worker_thread *worker)
  {
  	if (worker->idle && atomic_read(&worker->num_pending) >=
  	    worker->workers->idle_thresh) {
  		unsigned long flags;
  		spin_lock_irqsave(&worker->workers->lock, flags);
  		worker->idle = 0;
3e99d8eb3   Chris Mason   Btrfs: fix async ...
123
124
125
126
127
  
  		if (!list_empty(&worker->worker_list)) {
  			list_move_tail(&worker->worker_list,
  				      &worker->workers->worker_list);
  		}
35d8ba662   Chris Mason   Btrfs: Worker thr...
128
129
130
  		spin_unlock_irqrestore(&worker->workers->lock, flags);
  	}
  }
9042846bc   Chris Mason   Btrfs: Allow work...
131
132
133
  static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
  {
  	struct btrfs_workers *workers = worker->workers;
0dc3b84a7   Josef Bacik   Btrfs: fix num_wo...
134
  	struct worker_start *start;
9042846bc   Chris Mason   Btrfs: Allow work...
135
136
137
138
139
  	unsigned long flags;
  
  	rmb();
  	if (!workers->atomic_start_pending)
  		return;
0dc3b84a7   Josef Bacik   Btrfs: fix num_wo...
140
141
142
143
144
145
  	start = kzalloc(sizeof(*start), GFP_NOFS);
  	if (!start)
  		return;
  
  	start->work.func = start_new_worker_func;
  	start->queue = workers;
9042846bc   Chris Mason   Btrfs: Allow work...
146
147
148
149
150
  	spin_lock_irqsave(&workers->lock, flags);
  	if (!workers->atomic_start_pending)
  		goto out;
  
  	workers->atomic_start_pending = 0;
61d92c328   Chris Mason   Btrfs: fix deadlo...
151
152
  	if (workers->num_workers + workers->num_workers_starting >=
  	    workers->max_workers)
9042846bc   Chris Mason   Btrfs: Allow work...
153
  		goto out;
61d92c328   Chris Mason   Btrfs: fix deadlo...
154
  	workers->num_workers_starting += 1;
9042846bc   Chris Mason   Btrfs: Allow work...
155
  	spin_unlock_irqrestore(&workers->lock, flags);
0dc3b84a7   Josef Bacik   Btrfs: fix num_wo...
156
  	btrfs_queue_worker(workers->atomic_worker_start, &start->work);
9042846bc   Chris Mason   Btrfs: Allow work...
157
158
159
  	return;
  
  out:
0dc3b84a7   Josef Bacik   Btrfs: fix num_wo...
160
  	kfree(start);
9042846bc   Chris Mason   Btrfs: Allow work...
161
162
  	spin_unlock_irqrestore(&workers->lock, flags);
  }
4a69a4100   Chris Mason   Btrfs: Add ordere...
163
164
165
  static noinline int run_ordered_completions(struct btrfs_workers *workers,
  					    struct btrfs_work *work)
  {
4a69a4100   Chris Mason   Btrfs: Add ordere...
166
167
168
169
  	if (!workers->ordered)
  		return 0;
  
  	set_bit(WORK_DONE_BIT, &work->flags);
4e3f9c504   Chris Mason   Btrfs: keep irqs ...
170
  	spin_lock(&workers->order_lock);
4a69a4100   Chris Mason   Btrfs: Add ordere...
171

d313d7a31   Chris Mason   Btrfs: add a prio...
172
173
174
175
176
177
178
179
180
181
  	while (1) {
  		if (!list_empty(&workers->prio_order_list)) {
  			work = list_entry(workers->prio_order_list.next,
  					  struct btrfs_work, order_list);
  		} else if (!list_empty(&workers->order_list)) {
  			work = list_entry(workers->order_list.next,
  					  struct btrfs_work, order_list);
  		} else {
  			break;
  		}
4a69a4100   Chris Mason   Btrfs: Add ordere...
182
183
184
185
186
187
188
189
190
191
  		if (!test_bit(WORK_DONE_BIT, &work->flags))
  			break;
  
  		/* we are going to call the ordered done function, but
  		 * we leave the work item on the list as a barrier so
  		 * that later work items that are done don't have their
  		 * functions called before this one returns
  		 */
  		if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
  			break;
4e3f9c504   Chris Mason   Btrfs: keep irqs ...
192
  		spin_unlock(&workers->order_lock);
4a69a4100   Chris Mason   Btrfs: Add ordere...
193
194
195
196
  
  		work->ordered_func(work);
  
  		/* now take the lock again and call the freeing code */
4e3f9c504   Chris Mason   Btrfs: keep irqs ...
197
  		spin_lock(&workers->order_lock);
4a69a4100   Chris Mason   Btrfs: Add ordere...
198
199
200
  		list_del(&work->order_list);
  		work->ordered_free(work);
  	}
4e3f9c504   Chris Mason   Btrfs: keep irqs ...
201
  	spin_unlock(&workers->order_lock);
4a69a4100   Chris Mason   Btrfs: Add ordere...
202
203
  	return 0;
  }
9042846bc   Chris Mason   Btrfs: Allow work...
204
205
206
207
208
209
210
211
212
213
214
  static void put_worker(struct btrfs_worker_thread *worker)
  {
  	if (atomic_dec_and_test(&worker->refs))
  		kfree(worker);
  }
  
  static int try_worker_shutdown(struct btrfs_worker_thread *worker)
  {
  	int freeit = 0;
  
  	spin_lock_irq(&worker->lock);
627e421a3   Chris Mason   Btrfs: fix worker...
215
  	spin_lock(&worker->workers->lock);
9042846bc   Chris Mason   Btrfs: Allow work...
216
217
218
219
220
  	if (worker->workers->num_workers > 1 &&
  	    worker->idle &&
  	    !worker->working &&
  	    !list_empty(&worker->worker_list) &&
  	    list_empty(&worker->prio_pending) &&
6e74057c4   Chris Mason   Btrfs: Fix async ...
221
222
  	    list_empty(&worker->pending) &&
  	    atomic_read(&worker->num_pending) == 0) {
9042846bc   Chris Mason   Btrfs: Allow work...
223
224
225
226
  		freeit = 1;
  		list_del_init(&worker->worker_list);
  		worker->workers->num_workers--;
  	}
627e421a3   Chris Mason   Btrfs: fix worker...
227
  	spin_unlock(&worker->workers->lock);
9042846bc   Chris Mason   Btrfs: Allow work...
228
229
230
231
232
233
  	spin_unlock_irq(&worker->lock);
  
  	if (freeit)
  		put_worker(worker);
  	return freeit;
  }
4f878e847   Chris Mason   Btrfs: reduce wor...
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
  static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker,
  					struct list_head *prio_head,
  					struct list_head *head)
  {
  	struct btrfs_work *work = NULL;
  	struct list_head *cur = NULL;
  
  	if(!list_empty(prio_head))
  		cur = prio_head->next;
  
  	smp_mb();
  	if (!list_empty(&worker->prio_pending))
  		goto refill;
  
  	if (!list_empty(head))
  		cur = head->next;
  
  	if (cur)
  		goto out;
  
  refill:
  	spin_lock_irq(&worker->lock);
  	list_splice_tail_init(&worker->prio_pending, prio_head);
  	list_splice_tail_init(&worker->pending, head);
  
  	if (!list_empty(prio_head))
  		cur = prio_head->next;
  	else if (!list_empty(head))
  		cur = head->next;
  	spin_unlock_irq(&worker->lock);
  
  	if (!cur)
  		goto out_fail;
  
  out:
  	work = list_entry(cur, struct btrfs_work, list);
  
  out_fail:
  	return work;
  }
35d8ba662   Chris Mason   Btrfs: Worker thr...
274
  /*
8b7128429   Chris Mason   Btrfs: Add async ...
275
276
277
278
279
   * main loop for servicing work items
   */
  static int worker_loop(void *arg)
  {
  	struct btrfs_worker_thread *worker = arg;
4f878e847   Chris Mason   Btrfs: reduce wor...
280
281
  	struct list_head head;
  	struct list_head prio_head;
8b7128429   Chris Mason   Btrfs: Add async ...
282
  	struct btrfs_work *work;
4f878e847   Chris Mason   Btrfs: reduce wor...
283
284
285
  
  	INIT_LIST_HEAD(&head);
  	INIT_LIST_HEAD(&prio_head);
8b7128429   Chris Mason   Btrfs: Add async ...
286
  	do {
4f878e847   Chris Mason   Btrfs: reduce wor...
287
  again:
d313d7a31   Chris Mason   Btrfs: add a prio...
288
  		while (1) {
4f878e847   Chris Mason   Btrfs: reduce wor...
289
290
291
292
  
  
  			work = get_next_work(worker, &prio_head, &head);
  			if (!work)
d313d7a31   Chris Mason   Btrfs: add a prio...
293
  				break;
8b7128429   Chris Mason   Btrfs: Add async ...
294
  			list_del(&work->list);
4a69a4100   Chris Mason   Btrfs: Add ordere...
295
  			clear_bit(WORK_QUEUED_BIT, &work->flags);
8b7128429   Chris Mason   Btrfs: Add async ...
296
297
  
  			work->worker = worker;
8b7128429   Chris Mason   Btrfs: Add async ...
298
299
300
301
  
  			work->func(work);
  
  			atomic_dec(&worker->num_pending);
4a69a4100   Chris Mason   Btrfs: Add ordere...
302
303
304
305
306
  			/*
  			 * unless this is an ordered work queue,
  			 * 'work' was probably freed by func above.
  			 */
  			run_ordered_completions(worker->workers, work);
9042846bc   Chris Mason   Btrfs: Allow work...
307
  			check_pending_worker_creates(worker);
8f3b65a3d   Chris Mason   Btrfs: add a cond...
308
  			cond_resched();
8b7128429   Chris Mason   Btrfs: Add async ...
309
  		}
4f878e847   Chris Mason   Btrfs: reduce wor...
310
311
312
  
  		spin_lock_irq(&worker->lock);
  		check_idle_worker(worker);
8b7128429   Chris Mason   Btrfs: Add async ...
313
  		if (freezing(current)) {
b51912c91   Chris Mason   Btrfs: async thre...
314
315
  			worker->working = 0;
  			spin_unlock_irq(&worker->lock);
a0acae0e8   Tejun Heo   freezer: unexport...
316
  			try_to_freeze();
8b7128429   Chris Mason   Btrfs: Add async ...
317
  		} else {
8b7128429   Chris Mason   Btrfs: Add async ...
318
  			spin_unlock_irq(&worker->lock);
b51912c91   Chris Mason   Btrfs: async thre...
319
320
321
322
323
324
325
  			if (!kthread_should_stop()) {
  				cpu_relax();
  				/*
  				 * we've dropped the lock, did someone else
  				 * jump_in?
  				 */
  				smp_mb();
d313d7a31   Chris Mason   Btrfs: add a prio...
326
327
  				if (!list_empty(&worker->pending) ||
  				    !list_empty(&worker->prio_pending))
b51912c91   Chris Mason   Btrfs: async thre...
328
329
330
331
332
333
334
335
336
337
338
339
  					continue;
  
  				/*
  				 * this short schedule allows more work to
  				 * come in without the queue functions
  				 * needing to go through wake_up_process()
  				 *
  				 * worker->working is still 1, so nobody
  				 * is going to try and wake us up
  				 */
  				schedule_timeout(1);
  				smp_mb();
d313d7a31   Chris Mason   Btrfs: add a prio...
340
341
  				if (!list_empty(&worker->pending) ||
  				    !list_empty(&worker->prio_pending))
b51912c91   Chris Mason   Btrfs: async thre...
342
  					continue;
b5555f771   Amit Gud   Btrfs: fix race i...
343
344
  				if (kthread_should_stop())
  					break;
b51912c91   Chris Mason   Btrfs: async thre...
345
346
347
  				/* still no more work?, sleep for real */
  				spin_lock_irq(&worker->lock);
  				set_current_state(TASK_INTERRUPTIBLE);
d313d7a31   Chris Mason   Btrfs: add a prio...
348
  				if (!list_empty(&worker->pending) ||
4f878e847   Chris Mason   Btrfs: reduce wor...
349
350
  				    !list_empty(&worker->prio_pending)) {
  					spin_unlock_irq(&worker->lock);
ed3b3d314   Chris Mason   Btrfs: don't walk...
351
  					set_current_state(TASK_RUNNING);
4f878e847   Chris Mason   Btrfs: reduce wor...
352
353
  					goto again;
  				}
b51912c91   Chris Mason   Btrfs: async thre...
354
355
356
357
358
359
360
  
  				/*
  				 * this makes sure we get a wakeup when someone
  				 * adds something new to the queue
  				 */
  				worker->working = 0;
  				spin_unlock_irq(&worker->lock);
9042846bc   Chris Mason   Btrfs: Allow work...
361
362
363
364
365
366
367
  				if (!kthread_should_stop()) {
  					schedule_timeout(HZ * 120);
  					if (!worker->working &&
  					    try_worker_shutdown(worker)) {
  						return 0;
  					}
  				}
b51912c91   Chris Mason   Btrfs: async thre...
368
  			}
8b7128429   Chris Mason   Btrfs: Add async ...
369
370
371
372
373
374
375
376
377
378
379
380
381
  			__set_current_state(TASK_RUNNING);
  		}
  	} while (!kthread_should_stop());
  	return 0;
  }
  
  /*
   * this will wait for all the worker threads to shutdown
   */
  int btrfs_stop_workers(struct btrfs_workers *workers)
  {
  	struct list_head *cur;
  	struct btrfs_worker_thread *worker;
9042846bc   Chris Mason   Btrfs: Allow work...
382
  	int can_stop;
8b7128429   Chris Mason   Btrfs: Add async ...
383

9042846bc   Chris Mason   Btrfs: Allow work...
384
  	spin_lock_irq(&workers->lock);
35d8ba662   Chris Mason   Btrfs: Worker thr...
385
  	list_splice_init(&workers->idle_list, &workers->worker_list);
d397712bc   Chris Mason   Btrfs: Fix checkp...
386
  	while (!list_empty(&workers->worker_list)) {
8b7128429   Chris Mason   Btrfs: Add async ...
387
388
389
  		cur = workers->worker_list.next;
  		worker = list_entry(cur, struct btrfs_worker_thread,
  				    worker_list);
9042846bc   Chris Mason   Btrfs: Allow work...
390
391
392
393
394
395
396
397
398
399
400
401
402
403
  
  		atomic_inc(&worker->refs);
  		workers->num_workers -= 1;
  		if (!list_empty(&worker->worker_list)) {
  			list_del_init(&worker->worker_list);
  			put_worker(worker);
  			can_stop = 1;
  		} else
  			can_stop = 0;
  		spin_unlock_irq(&workers->lock);
  		if (can_stop)
  			kthread_stop(worker->task);
  		spin_lock_irq(&workers->lock);
  		put_worker(worker);
8b7128429   Chris Mason   Btrfs: Add async ...
404
  	}
9042846bc   Chris Mason   Btrfs: Allow work...
405
  	spin_unlock_irq(&workers->lock);
8b7128429   Chris Mason   Btrfs: Add async ...
406
407
408
409
410
411
  	return 0;
  }
  
  /*
   * simple init on struct btrfs_workers
   */
61d92c328   Chris Mason   Btrfs: fix deadlo...
412
413
  void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
  			struct btrfs_workers *async_helper)
8b7128429   Chris Mason   Btrfs: Add async ...
414
415
  {
  	workers->num_workers = 0;
61d92c328   Chris Mason   Btrfs: fix deadlo...
416
  	workers->num_workers_starting = 0;
8b7128429   Chris Mason   Btrfs: Add async ...
417
  	INIT_LIST_HEAD(&workers->worker_list);
35d8ba662   Chris Mason   Btrfs: Worker thr...
418
  	INIT_LIST_HEAD(&workers->idle_list);
4a69a4100   Chris Mason   Btrfs: Add ordere...
419
  	INIT_LIST_HEAD(&workers->order_list);
d313d7a31   Chris Mason   Btrfs: add a prio...
420
  	INIT_LIST_HEAD(&workers->prio_order_list);
8b7128429   Chris Mason   Btrfs: Add async ...
421
  	spin_lock_init(&workers->lock);
4e3f9c504   Chris Mason   Btrfs: keep irqs ...
422
  	spin_lock_init(&workers->order_lock);
8b7128429   Chris Mason   Btrfs: Add async ...
423
  	workers->max_workers = max;
61b494401   Chris Mason   Btrfs: Fix stream...
424
  	workers->idle_thresh = 32;
5443be45f   Chris Mason   Btrfs: Give all t...
425
  	workers->name = name;
4a69a4100   Chris Mason   Btrfs: Add ordere...
426
  	workers->ordered = 0;
9042846bc   Chris Mason   Btrfs: Allow work...
427
  	workers->atomic_start_pending = 0;
61d92c328   Chris Mason   Btrfs: fix deadlo...
428
  	workers->atomic_worker_start = async_helper;
8b7128429   Chris Mason   Btrfs: Add async ...
429
430
431
432
433
434
  }
  
  /*
   * starts new worker threads.  This does not enforce the max worker
   * count in case you need to temporarily go past it.
   */
0dc3b84a7   Josef Bacik   Btrfs: fix num_wo...
435
  static int __btrfs_start_workers(struct btrfs_workers *workers)
8b7128429   Chris Mason   Btrfs: Add async ...
436
437
438
  {
  	struct btrfs_worker_thread *worker;
  	int ret = 0;
8b7128429   Chris Mason   Btrfs: Add async ...
439

0dc3b84a7   Josef Bacik   Btrfs: fix num_wo...
440
441
442
443
444
  	worker = kzalloc(sizeof(*worker), GFP_NOFS);
  	if (!worker) {
  		ret = -ENOMEM;
  		goto fail;
  	}
8b7128429   Chris Mason   Btrfs: Add async ...
445

0dc3b84a7   Josef Bacik   Btrfs: fix num_wo...
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
  	INIT_LIST_HEAD(&worker->pending);
  	INIT_LIST_HEAD(&worker->prio_pending);
  	INIT_LIST_HEAD(&worker->worker_list);
  	spin_lock_init(&worker->lock);
  
  	atomic_set(&worker->num_pending, 0);
  	atomic_set(&worker->refs, 1);
  	worker->workers = workers;
  	worker->task = kthread_run(worker_loop, worker,
  				   "btrfs-%s-%d", workers->name,
  				   workers->num_workers + 1);
  	if (IS_ERR(worker->task)) {
  		ret = PTR_ERR(worker->task);
  		kfree(worker);
  		goto fail;
8b7128429   Chris Mason   Btrfs: Add async ...
461
  	}
0dc3b84a7   Josef Bacik   Btrfs: fix num_wo...
462
463
464
465
466
467
468
  	spin_lock_irq(&workers->lock);
  	list_add_tail(&worker->worker_list, &workers->idle_list);
  	worker->idle = 1;
  	workers->num_workers++;
  	workers->num_workers_starting--;
  	WARN_ON(workers->num_workers_starting < 0);
  	spin_unlock_irq(&workers->lock);
8b7128429   Chris Mason   Btrfs: Add async ...
469
470
  	return 0;
  fail:
0dc3b84a7   Josef Bacik   Btrfs: fix num_wo...
471
472
473
  	spin_lock_irq(&workers->lock);
  	workers->num_workers_starting--;
  	spin_unlock_irq(&workers->lock);
8b7128429   Chris Mason   Btrfs: Add async ...
474
475
  	return ret;
  }
0dc3b84a7   Josef Bacik   Btrfs: fix num_wo...
476
  int btrfs_start_workers(struct btrfs_workers *workers)
61d92c328   Chris Mason   Btrfs: fix deadlo...
477
478
  {
  	spin_lock_irq(&workers->lock);
0dc3b84a7   Josef Bacik   Btrfs: fix num_wo...
479
  	workers->num_workers_starting++;
61d92c328   Chris Mason   Btrfs: fix deadlo...
480
  	spin_unlock_irq(&workers->lock);
0dc3b84a7   Josef Bacik   Btrfs: fix num_wo...
481
  	return __btrfs_start_workers(workers);
61d92c328   Chris Mason   Btrfs: fix deadlo...
482
  }
8b7128429   Chris Mason   Btrfs: Add async ...
483
484
485
486
487
488
489
490
491
  /*
   * run through the list and find a worker thread that doesn't have a lot
   * to do right now.  This can return null if we aren't yet at the thread
   * count limit and all of the threads are busy.
   */
  static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
  {
  	struct btrfs_worker_thread *worker;
  	struct list_head *next;
61d92c328   Chris Mason   Btrfs: fix deadlo...
492
493
494
495
  	int enforce_min;
  
  	enforce_min = (workers->num_workers + workers->num_workers_starting) <
  		workers->max_workers;
8b7128429   Chris Mason   Btrfs: Add async ...
496

8b7128429   Chris Mason   Btrfs: Add async ...
497
  	/*
35d8ba662   Chris Mason   Btrfs: Worker thr...
498
499
500
501
  	 * if we find an idle thread, don't move it to the end of the
  	 * idle list.  This improves the chance that the next submission
  	 * will reuse the same thread, and maybe catch it while it is still
  	 * working
8b7128429   Chris Mason   Btrfs: Add async ...
502
  	 */
35d8ba662   Chris Mason   Btrfs: Worker thr...
503
504
  	if (!list_empty(&workers->idle_list)) {
  		next = workers->idle_list.next;
8b7128429   Chris Mason   Btrfs: Add async ...
505
506
  		worker = list_entry(next, struct btrfs_worker_thread,
  				    worker_list);
35d8ba662   Chris Mason   Btrfs: Worker thr...
507
  		return worker;
8b7128429   Chris Mason   Btrfs: Add async ...
508
  	}
35d8ba662   Chris Mason   Btrfs: Worker thr...
509
510
  	if (enforce_min || list_empty(&workers->worker_list))
  		return NULL;
8b7128429   Chris Mason   Btrfs: Add async ...
511
  	/*
35d8ba662   Chris Mason   Btrfs: Worker thr...
512
  	 * if we pick a busy task, move the task to the end of the list.
d352ac681   Chris Mason   Btrfs: add and im...
513
514
515
  	 * hopefully this will keep things somewhat evenly balanced.
  	 * Do the move in batches based on the sequence number.  This groups
  	 * requests submitted at roughly the same time onto the same worker.
8b7128429   Chris Mason   Btrfs: Add async ...
516
  	 */
35d8ba662   Chris Mason   Btrfs: Worker thr...
517
518
  	next = workers->worker_list.next;
  	worker = list_entry(next, struct btrfs_worker_thread, worker_list);
4854ddd0e   Chris Mason   Btrfs: Wait for k...
519
  	worker->sequence++;
d352ac681   Chris Mason   Btrfs: add and im...
520

53863232e   Chris Mason   Btrfs: Lower cont...
521
  	if (worker->sequence % workers->idle_thresh == 0)
4854ddd0e   Chris Mason   Btrfs: Wait for k...
522
  		list_move_tail(next, &workers->worker_list);
8b7128429   Chris Mason   Btrfs: Add async ...
523
524
  	return worker;
  }
d352ac681   Chris Mason   Btrfs: add and im...
525
526
527
528
529
  /*
   * selects a worker thread to take the next job.  This will either find
   * an idle worker, start a new worker up to the max count, or just return
   * one of the existing busy workers.
   */
8b7128429   Chris Mason   Btrfs: Add async ...
530
531
532
533
  static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
  {
  	struct btrfs_worker_thread *worker;
  	unsigned long flags;
9042846bc   Chris Mason   Btrfs: Allow work...
534
  	struct list_head *fallback;
0dc3b84a7   Josef Bacik   Btrfs: fix num_wo...
535
  	int ret;
8b7128429   Chris Mason   Btrfs: Add async ...
536

8b7128429   Chris Mason   Btrfs: Add async ...
537
  	spin_lock_irqsave(&workers->lock, flags);
8d532b2af   Chris Mason   Btrfs: fix worker...
538
  again:
8b7128429   Chris Mason   Btrfs: Add async ...
539
  	worker = next_worker(workers);
8b7128429   Chris Mason   Btrfs: Add async ...
540
541
  
  	if (!worker) {
61d92c328   Chris Mason   Btrfs: fix deadlo...
542
543
  		if (workers->num_workers + workers->num_workers_starting >=
  		    workers->max_workers) {
9042846bc   Chris Mason   Btrfs: Allow work...
544
545
546
547
  			goto fallback;
  		} else if (workers->atomic_worker_start) {
  			workers->atomic_start_pending = 1;
  			goto fallback;
8b7128429   Chris Mason   Btrfs: Add async ...
548
  		} else {
61d92c328   Chris Mason   Btrfs: fix deadlo...
549
  			workers->num_workers_starting++;
8b7128429   Chris Mason   Btrfs: Add async ...
550
551
  			spin_unlock_irqrestore(&workers->lock, flags);
  			/* we're below the limit, start another worker */
0dc3b84a7   Josef Bacik   Btrfs: fix num_wo...
552
  			ret = __btrfs_start_workers(workers);
8d532b2af   Chris Mason   Btrfs: fix worker...
553
  			spin_lock_irqsave(&workers->lock, flags);
0dc3b84a7   Josef Bacik   Btrfs: fix num_wo...
554
555
  			if (ret)
  				goto fallback;
8b7128429   Chris Mason   Btrfs: Add async ...
556
557
558
  			goto again;
  		}
  	}
6e74057c4   Chris Mason   Btrfs: Fix async ...
559
  	goto found;
9042846bc   Chris Mason   Btrfs: Allow work...
560
561
562
563
564
565
566
567
568
569
570
571
572
573
  
  fallback:
  	fallback = NULL;
  	/*
  	 * we have failed to find any workers, just
  	 * return the first one we can find.
  	 */
  	if (!list_empty(&workers->worker_list))
  		fallback = workers->worker_list.next;
  	if (!list_empty(&workers->idle_list))
  		fallback = workers->idle_list.next;
  	BUG_ON(!fallback);
  	worker = list_entry(fallback,
  		  struct btrfs_worker_thread, worker_list);
6e74057c4   Chris Mason   Btrfs: Fix async ...
574
575
576
577
578
579
  found:
  	/*
  	 * this makes sure the worker doesn't exit before it is placed
  	 * onto a busy/idle list
  	 */
  	atomic_inc(&worker->num_pending);
9042846bc   Chris Mason   Btrfs: Allow work...
580
581
  	spin_unlock_irqrestore(&workers->lock, flags);
  	return worker;
8b7128429   Chris Mason   Btrfs: Add async ...
582
583
584
585
586
587
588
589
590
591
592
  }
  
  /*
   * btrfs_requeue_work just puts the work item back on the tail of the list
   * it was taken from.  It is intended for use with long running work functions
   * that make some progress and want to give the cpu up for others.
   */
  int btrfs_requeue_work(struct btrfs_work *work)
  {
  	struct btrfs_worker_thread *worker = work->worker;
  	unsigned long flags;
a68370515   Chris Mason   Btrfs: Catch miss...
593
  	int wake = 0;
8b7128429   Chris Mason   Btrfs: Add async ...
594

4a69a4100   Chris Mason   Btrfs: Add ordere...
595
  	if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
8b7128429   Chris Mason   Btrfs: Add async ...
596
597
598
  		goto out;
  
  	spin_lock_irqsave(&worker->lock, flags);
d313d7a31   Chris Mason   Btrfs: add a prio...
599
600
601
602
  	if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
  		list_add_tail(&work->list, &worker->prio_pending);
  	else
  		list_add_tail(&work->list, &worker->pending);
b51912c91   Chris Mason   Btrfs: async thre...
603
  	atomic_inc(&worker->num_pending);
75ccf47d1   Chris Mason   Btrfs: fix multi-...
604
605
606
607
608
  
  	/* by definition we're busy, take ourselves off the idle
  	 * list
  	 */
  	if (worker->idle) {
29c5e8ce0   Julia Lawall   Btrfs: convert ne...
609
  		spin_lock(&worker->workers->lock);
75ccf47d1   Chris Mason   Btrfs: fix multi-...
610
611
  		worker->idle = 0;
  		list_move_tail(&worker->worker_list,
6e74057c4   Chris Mason   Btrfs: Fix async ...
612
  			      &worker->workers->worker_list);
29c5e8ce0   Julia Lawall   Btrfs: convert ne...
613
  		spin_unlock(&worker->workers->lock);
75ccf47d1   Chris Mason   Btrfs: fix multi-...
614
  	}
a68370515   Chris Mason   Btrfs: Catch miss...
615
616
617
618
  	if (!worker->working) {
  		wake = 1;
  		worker->working = 1;
  	}
75ccf47d1   Chris Mason   Btrfs: fix multi-...
619

a68370515   Chris Mason   Btrfs: Catch miss...
620
621
  	if (wake)
  		wake_up_process(worker->task);
9042846bc   Chris Mason   Btrfs: Allow work...
622
  	spin_unlock_irqrestore(&worker->lock, flags);
8b7128429   Chris Mason   Btrfs: Add async ...
623
  out:
a68370515   Chris Mason   Btrfs: Catch miss...
624

8b7128429   Chris Mason   Btrfs: Add async ...
625
626
  	return 0;
  }
d313d7a31   Chris Mason   Btrfs: add a prio...
627
628
629
630
  void btrfs_set_work_high_prio(struct btrfs_work *work)
  {
  	set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
  }
8b7128429   Chris Mason   Btrfs: Add async ...
631
632
633
  /*
   * places a struct btrfs_work into the pending queue of one of the kthreads
   */
0dc3b84a7   Josef Bacik   Btrfs: fix num_wo...
634
  void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
8b7128429   Chris Mason   Btrfs: Add async ...
635
636
637
638
639
640
  {
  	struct btrfs_worker_thread *worker;
  	unsigned long flags;
  	int wake = 0;
  
  	/* don't requeue something already on a list */
4a69a4100   Chris Mason   Btrfs: Add ordere...
641
  	if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
0dc3b84a7   Josef Bacik   Btrfs: fix num_wo...
642
  		return;
8b7128429   Chris Mason   Btrfs: Add async ...
643
644
  
  	worker = find_worker(workers);
4a69a4100   Chris Mason   Btrfs: Add ordere...
645
  	if (workers->ordered) {
4e3f9c504   Chris Mason   Btrfs: keep irqs ...
646
647
648
649
650
  		/*
  		 * you're not allowed to do ordered queues from an
  		 * interrupt handler
  		 */
  		spin_lock(&workers->order_lock);
d313d7a31   Chris Mason   Btrfs: add a prio...
651
652
653
654
655
656
  		if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
  			list_add_tail(&work->order_list,
  				      &workers->prio_order_list);
  		} else {
  			list_add_tail(&work->order_list, &workers->order_list);
  		}
4e3f9c504   Chris Mason   Btrfs: keep irqs ...
657
  		spin_unlock(&workers->order_lock);
4a69a4100   Chris Mason   Btrfs: Add ordere...
658
659
660
  	} else {
  		INIT_LIST_HEAD(&work->order_list);
  	}
8b7128429   Chris Mason   Btrfs: Add async ...
661
662
  
  	spin_lock_irqsave(&worker->lock, flags);
a68370515   Chris Mason   Btrfs: Catch miss...
663

d313d7a31   Chris Mason   Btrfs: add a prio...
664
665
666
667
  	if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
  		list_add_tail(&work->list, &worker->prio_pending);
  	else
  		list_add_tail(&work->list, &worker->pending);
35d8ba662   Chris Mason   Btrfs: Worker thr...
668
  	check_busy_worker(worker);
8b7128429   Chris Mason   Btrfs: Add async ...
669
670
671
672
673
674
675
676
  
  	/*
  	 * avoid calling into wake_up_process if this thread has already
  	 * been kicked
  	 */
  	if (!worker->working)
  		wake = 1;
  	worker->working = 1;
8b7128429   Chris Mason   Btrfs: Add async ...
677
678
  	if (wake)
  		wake_up_process(worker->task);
9042846bc   Chris Mason   Btrfs: Allow work...
679
  	spin_unlock_irqrestore(&worker->lock, flags);
8b7128429   Chris Mason   Btrfs: Add async ...
680
  }