Blame view
fs/btrfs/async-thread.c
10.3 KB
c1d7c514f btrfs: replace GP... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
8b7128429 Btrfs: Add async ... |
2 3 |
/* * Copyright (C) 2007 Oracle. All rights reserved. |
08a9ff326 btrfs: Added btrf... |
4 |
* Copyright (C) 2014 Fujitsu. All rights reserved. |
8b7128429 Btrfs: Add async ... |
5 6 7 |
*/ #include <linux/kthread.h> |
5a0e3ad6a include cleanup: ... |
8 |
#include <linux/slab.h> |
8b7128429 Btrfs: Add async ... |
9 10 |
#include <linux/list.h> #include <linux/spinlock.h> |
b51912c91 Btrfs: async thre... |
11 |
#include <linux/freezer.h> |
8b7128429 Btrfs: Add async ... |
12 |
#include "async-thread.h" |
52483bc26 btrfs: Add ftrace... |
13 |
#include "ctree.h" |
8b7128429 Btrfs: Add async ... |
14 |
|
f64ce7b84 btrfs: async-thre... |
15 16 17 18 19 |
enum { WORK_DONE_BIT, WORK_ORDER_DONE_BIT, WORK_HIGH_PRIO_BIT, }; |
4a69a4100 Btrfs: Add ordere... |
20 |
|
0bd9289c2 btrfs: Add thresh... |
21 22 |
#define NO_THRESHOLD (-1) #define DFT_THRESHOLD (32) |
d458b0540 btrfs: Cleanup th... |
23 |
struct __btrfs_workqueue { |
08a9ff326 btrfs: Added btrf... |
24 |
struct workqueue_struct *normal_wq; |
cb001095c btrfs: plumb fs_i... |
25 26 27 |
/* File system this workqueue services */ struct btrfs_fs_info *fs_info; |
08a9ff326 btrfs: Added btrf... |
28 29 30 31 32 |
/* List head pointing to ordered work list */ struct list_head ordered_list; /* Spinlock for ordered_list */ spinlock_t list_lock; |
0bd9289c2 btrfs: Add thresh... |
33 34 35 |
/* Thresholding related variants */ atomic_t pending; |
c6dd6ea55 btrfs: async_thre... |
36 37 38 39 40 41 42 43 |
/* Up limit of concurrency workers */ int limit_active; /* Current number of concurrency workers */ int current_active; /* Threshold to change current_active */ |
0bd9289c2 btrfs: Add thresh... |
44 45 46 |
int thresh; unsigned int count; spinlock_t thres_lock; |
08a9ff326 btrfs: Added btrf... |
47 |
}; |
d458b0540 btrfs: Cleanup th... |
48 49 50 |
struct btrfs_workqueue { struct __btrfs_workqueue *normal; struct __btrfs_workqueue *high; |
1ca08976a btrfs: Add high p... |
51 |
}; |
e1f60a658 btrfs: add __pure... |
52 |
struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq) |
cb001095c btrfs: plumb fs_i... |
53 54 55 |
{ return wq->fs_info; } |
e1f60a658 btrfs: add __pure... |
56 |
struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) |
cb001095c btrfs: plumb fs_i... |
57 58 59 |
{ return work->wq->fs_info; } |
9a35b6372 btrfs: constify t... |
60 |
bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq) |
2939e1a86 btrfs: limit asyn... |
61 62 63 64 65 66 67 68 69 70 71 72 |
{ /* * We could compare wq->normal->pending with num_online_cpus() * to support "thresh == NO_THRESHOLD" case, but it requires * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's * postpone it until someone needs the support of that case. */ if (wq->normal->thresh == NO_THRESHOLD) return false; return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2; } |
9e0af2376 Btrfs: fix task h... |
73 |
static struct __btrfs_workqueue * |
cb001095c btrfs: plumb fs_i... |
74 75 |
__btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, unsigned int flags, int limit_active, int thresh) |
1ca08976a btrfs: Add high p... |
76 |
{ |
61dd5ae65 btrfs: use GFP_KE... |
77 |
struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL); |
1ca08976a btrfs: Add high p... |
78 |
|
5d99a998f btrfs: remove unl... |
79 |
if (!ret) |
1ca08976a btrfs: Add high p... |
80 |
return NULL; |
cb001095c btrfs: plumb fs_i... |
81 |
ret->fs_info = fs_info; |
c6dd6ea55 btrfs: async_thre... |
82 |
ret->limit_active = limit_active; |
0bd9289c2 btrfs: Add thresh... |
83 84 85 86 87 |
atomic_set(&ret->pending, 0); if (thresh == 0) thresh = DFT_THRESHOLD; /* For low threshold, disabling threshold is a better choice */ if (thresh < DFT_THRESHOLD) { |
c6dd6ea55 btrfs: async_thre... |
88 |
ret->current_active = limit_active; |
0bd9289c2 btrfs: Add thresh... |
89 90 |
ret->thresh = NO_THRESHOLD; } else { |
c6dd6ea55 btrfs: async_thre... |
91 92 93 94 95 96 |
/* * For threshold-able wq, let its concurrency grow on demand. * Use minimal max_active at alloc time to reduce resource * usage. */ ret->current_active = 1; |
0bd9289c2 btrfs: Add thresh... |
97 98 |
ret->thresh = thresh; } |
1ca08976a btrfs: Add high p... |
99 |
if (flags & WQ_HIGHPRI) |
ce3ded106 btrfs: simplify w... |
100 101 |
ret->normal_wq = alloc_workqueue("btrfs-%s-high", flags, ret->current_active, name); |
1ca08976a btrfs: Add high p... |
102 |
else |
ce3ded106 btrfs: simplify w... |
103 104 |
ret->normal_wq = alloc_workqueue("btrfs-%s", flags, ret->current_active, name); |
5d99a998f btrfs: remove unl... |
105 |
if (!ret->normal_wq) { |
1ca08976a btrfs: Add high p... |
106 107 108 109 110 111 |
kfree(ret); return NULL; } INIT_LIST_HEAD(&ret->ordered_list); spin_lock_init(&ret->list_lock); |
0bd9289c2 btrfs: Add thresh... |
112 |
spin_lock_init(&ret->thres_lock); |
c3a468915 btrfs: Add trace ... |
113 |
trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI); |
1ca08976a btrfs: Add high p... |
114 115 116 117 |
return ret; } static inline void |
d458b0540 btrfs: Cleanup th... |
118 |
__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq); |
1ca08976a btrfs: Add high p... |
119 |
|
cb001095c btrfs: plumb fs_i... |
120 121 |
struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, |
6f0110581 btrfs: use correc... |
122 |
unsigned int flags, |
c6dd6ea55 btrfs: async_thre... |
123 |
int limit_active, |
d458b0540 btrfs: Cleanup th... |
124 |
int thresh) |
08a9ff326 btrfs: Added btrf... |
125 |
{ |
61dd5ae65 btrfs: use GFP_KE... |
126 |
struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL); |
08a9ff326 btrfs: Added btrf... |
127 |
|
5d99a998f btrfs: remove unl... |
128 |
if (!ret) |
08a9ff326 btrfs: Added btrf... |
129 |
return NULL; |
cb001095c btrfs: plumb fs_i... |
130 131 |
ret->normal = __btrfs_alloc_workqueue(fs_info, name, flags & ~WQ_HIGHPRI, |
c6dd6ea55 btrfs: async_thre... |
132 |
limit_active, thresh); |
5d99a998f btrfs: remove unl... |
133 |
if (!ret->normal) { |
08a9ff326 btrfs: Added btrf... |
134 135 136 |
kfree(ret); return NULL; } |
1ca08976a btrfs: Add high p... |
137 |
if (flags & WQ_HIGHPRI) { |
cb001095c btrfs: plumb fs_i... |
138 139 |
ret->high = __btrfs_alloc_workqueue(fs_info, name, flags, limit_active, thresh); |
5d99a998f btrfs: remove unl... |
140 |
if (!ret->high) { |
1ca08976a btrfs: Add high p... |
141 142 143 144 145 |
__btrfs_destroy_workqueue(ret->normal); kfree(ret); return NULL; } } |
08a9ff326 btrfs: Added btrf... |
146 147 |
return ret; } |
0bd9289c2 btrfs: Add thresh... |
148 149 150 151 152 |
/* * Hook for threshold which will be called in btrfs_queue_work. * This hook WILL be called in IRQ handler context, * so workqueue_set_max_active MUST NOT be called in this hook */ |
d458b0540 btrfs: Cleanup th... |
153 |
static inline void thresh_queue_hook(struct __btrfs_workqueue *wq) |
0bd9289c2 btrfs: Add thresh... |
154 155 156 157 158 159 160 161 162 163 164 |
{ if (wq->thresh == NO_THRESHOLD) return; atomic_inc(&wq->pending); } /* * Hook for threshold which will be called before executing the work, * This hook is called in kthread content. * So workqueue_set_max_active is called here. */ |
d458b0540 btrfs: Cleanup th... |
165 |
static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) |
0bd9289c2 btrfs: Add thresh... |
166 |
{ |
c6dd6ea55 btrfs: async_thre... |
167 |
int new_current_active; |
0bd9289c2 btrfs: Add thresh... |
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
long pending; int need_change = 0; if (wq->thresh == NO_THRESHOLD) return; atomic_dec(&wq->pending); spin_lock(&wq->thres_lock); /* * Use wq->count to limit the calling frequency of * workqueue_set_max_active. */ wq->count++; wq->count %= (wq->thresh / 4); if (!wq->count) goto out; |
c6dd6ea55 btrfs: async_thre... |
184 |
new_current_active = wq->current_active; |
0bd9289c2 btrfs: Add thresh... |
185 186 187 188 189 190 191 |
/* * pending may be changed later, but it's OK since we really * don't need it so accurate to calculate new_max_active. */ pending = atomic_read(&wq->pending); if (pending > wq->thresh) |
c6dd6ea55 btrfs: async_thre... |
192 |
new_current_active++; |
0bd9289c2 btrfs: Add thresh... |
193 |
if (pending < wq->thresh / 2) |
c6dd6ea55 btrfs: async_thre... |
194 195 196 |
new_current_active--; new_current_active = clamp_val(new_current_active, 1, wq->limit_active); if (new_current_active != wq->current_active) { |
0bd9289c2 btrfs: Add thresh... |
197 |
need_change = 1; |
c6dd6ea55 btrfs: async_thre... |
198 |
wq->current_active = new_current_active; |
0bd9289c2 btrfs: Add thresh... |
199 200 201 202 203 |
} out: spin_unlock(&wq->thres_lock); if (need_change) { |
c6dd6ea55 btrfs: async_thre... |
204 |
workqueue_set_max_active(wq->normal_wq, wq->current_active); |
0bd9289c2 btrfs: Add thresh... |
205 206 |
} } |
c495dcd6f btrfs: don't prem... |
207 208 |
static void run_ordered_work(struct __btrfs_workqueue *wq, struct btrfs_work *self) |
08a9ff326 btrfs: Added btrf... |
209 210 |
{ struct list_head *list = &wq->ordered_list; |
d458b0540 btrfs: Cleanup th... |
211 |
struct btrfs_work *work; |
08a9ff326 btrfs: Added btrf... |
212 213 |
spinlock_t *lock = &wq->list_lock; unsigned long flags; |
c495dcd6f btrfs: don't prem... |
214 |
bool free_self = false; |
08a9ff326 btrfs: Added btrf... |
215 216 217 218 219 |
while (1) { spin_lock_irqsave(lock, flags); if (list_empty(list)) break; |
d458b0540 btrfs: Cleanup th... |
220 |
work = list_entry(list->next, struct btrfs_work, |
08a9ff326 btrfs: Added btrf... |
221 222 223 224 225 226 227 228 229 230 231 232 |
ordered_list); if (!test_bit(WORK_DONE_BIT, &work->flags)) break; /* * we are going to call the ordered done function, but * we leave the work item on the list as a barrier so * that later work items that are done don't have their * functions called before this one returns */ if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) break; |
52483bc26 btrfs: Add ftrace... |
233 |
trace_btrfs_ordered_sched(work); |
08a9ff326 btrfs: Added btrf... |
234 235 236 237 238 239 240 |
spin_unlock_irqrestore(lock, flags); work->ordered_func(work); /* now take the lock again and drop our item from the list */ spin_lock_irqsave(lock, flags); list_del(&work->ordered_list); spin_unlock_irqrestore(lock, flags); |
c495dcd6f btrfs: don't prem... |
241 242 243 244 245 246 247 248 249 250 251 252 253 254 |
if (work == self) { /* * This is the work item that the worker is currently * executing. * * The kernel workqueue code guarantees non-reentrancy * of work items. I.e., if a work item with the same * address and work function is queued twice, the second * execution is blocked until the first one finishes. A * work item may be freed and recycled with the same * work function; the workqueue code assumes that the * original work item cannot depend on the recycled work * item in that case (see find_worker_executing_work()). * |
a0cac0ec9 btrfs: get rid of... |
255 256 257 258 259 260 261 |
* Note that different types of Btrfs work can depend on * each other, and one type of work on one Btrfs * filesystem may even depend on the same type of work * on another Btrfs filesystem via, e.g., a loop device. * Therefore, we must not allow the current work item to * be recycled until we are really done, otherwise we * break the above assumption and can deadlock. |
c495dcd6f btrfs: don't prem... |
262 263 264 265 266 |
*/ free_self = true; } else { /* * We don't want to call the ordered free functions with |
c9eb55db8 btrfs: get rid of... |
267 |
* the lock held. |
c495dcd6f btrfs: don't prem... |
268 |
*/ |
c495dcd6f btrfs: don't prem... |
269 |
work->ordered_free(work); |
c9eb55db8 btrfs: get rid of... |
270 271 |
/* NB: work must not be dereferenced past this point. */ trace_btrfs_all_work_done(wq->fs_info, work); |
c495dcd6f btrfs: don't prem... |
272 |
} |
08a9ff326 btrfs: Added btrf... |
273 274 |
} spin_unlock_irqrestore(lock, flags); |
c495dcd6f btrfs: don't prem... |
275 276 |
if (free_self) { |
c495dcd6f btrfs: don't prem... |
277 |
self->ordered_free(self); |
c9eb55db8 btrfs: get rid of... |
278 279 |
/* NB: self must not be dereferenced past this point. */ trace_btrfs_all_work_done(wq->fs_info, self); |
c495dcd6f btrfs: don't prem... |
280 |
} |
08a9ff326 btrfs: Added btrf... |
281 |
} |
a0cac0ec9 btrfs: get rid of... |
282 |
static void btrfs_work_helper(struct work_struct *normal_work) |
08a9ff326 btrfs: Added btrf... |
283 |
{ |
a0cac0ec9 btrfs: get rid of... |
284 285 |
struct btrfs_work *work = container_of(normal_work, struct btrfs_work, normal_work); |
d458b0540 btrfs: Cleanup th... |
286 |
struct __btrfs_workqueue *wq; |
08a9ff326 btrfs: Added btrf... |
287 |
int need_order = 0; |
08a9ff326 btrfs: Added btrf... |
288 289 290 291 292 293 294 295 296 297 298 |
/* * We should not touch things inside work in the following cases: * 1) after work->func() if it has no ordered_free * Since the struct is freed in work->func(). * 2) after setting WORK_DONE_BIT * The work may be freed in other threads almost instantly. * So we save the needed things here. */ if (work->ordered_func) need_order = 1; wq = work->wq; |
52483bc26 btrfs: Add ftrace... |
299 |
trace_btrfs_work_sched(work); |
0bd9289c2 btrfs: Add thresh... |
300 |
thresh_exec_hook(wq); |
08a9ff326 btrfs: Added btrf... |
301 302 303 |
work->func(work); if (need_order) { set_bit(WORK_DONE_BIT, &work->flags); |
c495dcd6f btrfs: don't prem... |
304 |
run_ordered_work(wq, work); |
c9eb55db8 btrfs: get rid of... |
305 306 307 |
} else { /* NB: work must not be dereferenced past this point. */ trace_btrfs_all_work_done(wq->fs_info, work); |
08a9ff326 btrfs: Added btrf... |
308 309 |
} } |
a0cac0ec9 btrfs: get rid of... |
310 311 |
void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, btrfs_func_t ordered_func, btrfs_func_t ordered_free) |
08a9ff326 btrfs: Added btrf... |
312 313 314 315 |
{ work->func = func; work->ordered_func = ordered_func; work->ordered_free = ordered_free; |
a0cac0ec9 btrfs: get rid of... |
316 |
INIT_WORK(&work->normal_work, btrfs_work_helper); |
08a9ff326 btrfs: Added btrf... |
317 318 319 |
INIT_LIST_HEAD(&work->ordered_list); work->flags = 0; } |
d458b0540 btrfs: Cleanup th... |
320 321 |
static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq, struct btrfs_work *work) |
08a9ff326 btrfs: Added btrf... |
322 323 324 325 |
{ unsigned long flags; work->wq = wq; |
0bd9289c2 btrfs: Add thresh... |
326 |
thresh_queue_hook(wq); |
08a9ff326 btrfs: Added btrf... |
327 328 329 330 331 |
if (work->ordered_func) { spin_lock_irqsave(&wq->list_lock, flags); list_add_tail(&work->ordered_list, &wq->ordered_list); spin_unlock_irqrestore(&wq->list_lock, flags); } |
52483bc26 btrfs: Add ftrace... |
332 |
trace_btrfs_work_queued(work); |
0a95b8513 btrfs: async-thre... |
333 |
queue_work(wq->normal_wq, &work->normal_work); |
08a9ff326 btrfs: Added btrf... |
334 |
} |
d458b0540 btrfs: Cleanup th... |
335 336 |
void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work) |
1ca08976a btrfs: Add high p... |
337 |
{ |
d458b0540 btrfs: Cleanup th... |
338 |
struct __btrfs_workqueue *dest_wq; |
1ca08976a btrfs: Add high p... |
339 340 341 342 343 344 345 346 347 |
if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high) dest_wq = wq->high; else dest_wq = wq->normal; __btrfs_queue_work(dest_wq, work); } static inline void |
d458b0540 btrfs: Cleanup th... |
348 |
__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq) |
08a9ff326 btrfs: Added btrf... |
349 350 |
{ destroy_workqueue(wq->normal_wq); |
c3a468915 btrfs: Add trace ... |
351 |
trace_btrfs_workqueue_destroy(wq); |
08a9ff326 btrfs: Added btrf... |
352 353 |
kfree(wq); } |
d458b0540 btrfs: Cleanup th... |
354 |
void btrfs_destroy_workqueue(struct btrfs_workqueue *wq) |
1ca08976a btrfs: Add high p... |
355 356 357 358 359 360 |
{ if (!wq) return; if (wq->high) __btrfs_destroy_workqueue(wq->high); __btrfs_destroy_workqueue(wq->normal); |
ef66af101 Btrfs: add missin... |
361 |
kfree(wq); |
1ca08976a btrfs: Add high p... |
362 |
} |
c6dd6ea55 btrfs: async_thre... |
363 |
void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active) |
08a9ff326 btrfs: Added btrf... |
364 |
{ |
800ee2247 btrfs: fix crash ... |
365 366 |
if (!wq) return; |
c6dd6ea55 btrfs: async_thre... |
367 |
wq->normal->limit_active = limit_active; |
1ca08976a btrfs: Add high p... |
368 |
if (wq->high) |
c6dd6ea55 btrfs: async_thre... |
369 |
wq->high->limit_active = limit_active; |
1ca08976a btrfs: Add high p... |
370 |
} |
d458b0540 btrfs: Cleanup th... |
371 |
void btrfs_set_work_high_priority(struct btrfs_work *work) |
1ca08976a btrfs: Add high p... |
372 373 |
{ set_bit(WORK_HIGH_PRIO_BIT, &work->flags); |
08a9ff326 btrfs: Added btrf... |
374 |
} |
f0cc2cd70 Btrfs: fix crash ... |
375 376 377 378 379 380 381 382 |
void btrfs_flush_workqueue(struct btrfs_workqueue *wq) { if (wq->high) flush_workqueue(wq->high->normal_wq); flush_workqueue(wq->normal->normal_wq); } |