Blame view
mm/backing-dev.c
21.4 KB
3fcfab16c
|
1 2 3 |
#include <linux/wait.h> #include <linux/backing-dev.h> |
03ba3782e
|
4 5 |
#include <linux/kthread.h> #include <linux/freezer.h> |
3fcfab16c
|
6 |
#include <linux/fs.h> |
26160158d
|
7 |
#include <linux/pagemap.h> |
03ba3782e
|
8 |
#include <linux/mm.h> |
3fcfab16c
|
9 10 |
#include <linux/sched.h> #include <linux/module.h> |
cf0ca9fe5
|
11 12 |
#include <linux/writeback.h> #include <linux/device.h> |
455b28646
|
13 |
#include <trace/events/writeback.h> |
cf0ca9fe5
|
14 |
|
c3c532061
|
15 |
static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); |
26160158d
|
16 |
struct backing_dev_info default_backing_dev_info = { |
d993831fa
|
17 |
.name = "default", |
26160158d
|
18 19 20 |
.ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE, .state = 0, .capabilities = BDI_CAP_MAP_COPY, |
26160158d
|
21 22 |
}; EXPORT_SYMBOL_GPL(default_backing_dev_info); |
cf0ca9fe5
|
23 |
|
5129a469a
|
24 25 |
struct backing_dev_info noop_backing_dev_info = { .name = "noop", |
976e48f8a
|
26 |
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, |
5129a469a
|
27 28 |
}; EXPORT_SYMBOL_GPL(noop_backing_dev_info); |
cf0ca9fe5
|
29 |
static struct class *bdi_class; |
cfc4ba536
|
30 31 32 33 34 35 |
/* * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as * reader side protection for bdi_pending_list. bdi_list has RCU reader side * locking. */ |
03ba3782e
|
36 |
DEFINE_SPINLOCK(bdi_lock); |
66f3b8e2e
|
37 |
LIST_HEAD(bdi_list); |
03ba3782e
|
38 |
LIST_HEAD(bdi_pending_list); |
f758eeabe
|
39 40 41 42 43 44 45 46 47 48 |
void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2) { if (wb1 < wb2) { spin_lock(&wb1->list_lock); spin_lock_nested(&wb2->list_lock, 1); } else { spin_lock(&wb2->list_lock); spin_lock_nested(&wb1->list_lock, 1); } } |
76f1418b4
|
49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
#ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> #include <linux/seq_file.h> static struct dentry *bdi_debug_root; static void bdi_debug_init(void) { bdi_debug_root = debugfs_create_dir("bdi", NULL); } static int bdi_debug_stats_show(struct seq_file *m, void *v) { struct backing_dev_info *bdi = m->private; |
c1955ce32
|
63 |
struct bdi_writeback *wb = &bdi->wb; |
364aeb284
|
64 65 66 |
unsigned long background_thresh; unsigned long dirty_thresh; unsigned long bdi_thresh; |
345227d70
|
67 |
unsigned long nr_dirty, nr_io, nr_more_io; |
f09b00d3e
|
68 |
struct inode *inode; |
345227d70
|
69 |
nr_dirty = nr_io = nr_more_io = 0; |
f758eeabe
|
70 |
spin_lock(&wb->list_lock); |
7ccf19a80
|
71 |
list_for_each_entry(inode, &wb->b_dirty, i_wb_list) |
c1955ce32
|
72 |
nr_dirty++; |
7ccf19a80
|
73 |
list_for_each_entry(inode, &wb->b_io, i_wb_list) |
c1955ce32
|
74 |
nr_io++; |
7ccf19a80
|
75 |
list_for_each_entry(inode, &wb->b_more_io, i_wb_list) |
c1955ce32
|
76 |
nr_more_io++; |
f758eeabe
|
77 |
spin_unlock(&wb->list_lock); |
76f1418b4
|
78 |
|
16c4042f0
|
79 80 |
global_dirty_limits(&background_thresh, &dirty_thresh); bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); |
76f1418b4
|
81 82 83 |
#define K(x) ((x) << (PAGE_SHIFT - 10)) seq_printf(m, |
00821b002
|
84 85 86 87 88 89 90 91 92 93 |
"BdiWriteback: %10lu kB " "BdiReclaimable: %10lu kB " "BdiDirtyThresh: %10lu kB " "DirtyThresh: %10lu kB " "BackgroundThresh: %10lu kB " |
c8e28ce04
|
94 95 |
"BdiDirtied: %10lu kB " |
00821b002
|
96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
"BdiWritten: %10lu kB " "BdiWriteBandwidth: %10lu kBps " "b_dirty: %10lu " "b_io: %10lu " "b_more_io: %10lu " "bdi_list: %10u " "state: %10lx ", |
76f1418b4
|
110 111 |
(unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), |
f7d2b1ecd
|
112 113 114 |
K(bdi_thresh), K(dirty_thresh), K(background_thresh), |
c8e28ce04
|
115 |
(unsigned long) K(bdi_stat(bdi, BDI_DIRTIED)), |
f7d2b1ecd
|
116 |
(unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)), |
00821b002
|
117 |
(unsigned long) K(bdi->write_bandwidth), |
f7d2b1ecd
|
118 119 120 |
nr_dirty, nr_io, nr_more_io, |
c1955ce32
|
121 |
!list_empty(&bdi->bdi_list), bdi->state); |
76f1418b4
|
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
#undef K return 0; } static int bdi_debug_stats_open(struct inode *inode, struct file *file) { return single_open(file, bdi_debug_stats_show, inode->i_private); } static const struct file_operations bdi_debug_stats_fops = { .open = bdi_debug_stats_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void bdi_debug_register(struct backing_dev_info *bdi, const char *name) { bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root); bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir, bdi, &bdi_debug_stats_fops); } static void bdi_debug_unregister(struct backing_dev_info *bdi) { debugfs_remove(bdi->debug_stats); debugfs_remove(bdi->debug_dir); } #else static inline void bdi_debug_init(void) { } static inline void bdi_debug_register(struct backing_dev_info *bdi, const char *name) { } static inline void bdi_debug_unregister(struct backing_dev_info *bdi) { } #endif |
cf0ca9fe5
|
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 |
static ssize_t read_ahead_kb_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct backing_dev_info *bdi = dev_get_drvdata(dev); char *end; unsigned long read_ahead_kb; ssize_t ret = -EINVAL; read_ahead_kb = simple_strtoul(buf, &end, 10); if (*buf && (end[0] == '\0' || (end[0] == ' ' && end[1] == '\0'))) { bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10); ret = count; } return ret; } #define K(pages) ((pages) << (PAGE_SHIFT - 10)) #define BDI_SHOW(name, expr) \ static ssize_t name##_show(struct device *dev, \ struct device_attribute *attr, char *page) \ { \ struct backing_dev_info *bdi = dev_get_drvdata(dev); \ \ return snprintf(page, PAGE_SIZE-1, "%lld ", (long long)expr); \ } BDI_SHOW(read_ahead_kb, K(bdi->ra_pages)) |
189d3c4a9
|
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 |
static ssize_t min_ratio_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct backing_dev_info *bdi = dev_get_drvdata(dev); char *end; unsigned int ratio; ssize_t ret = -EINVAL; ratio = simple_strtoul(buf, &end, 10); if (*buf && (end[0] == '\0' || (end[0] == ' ' && end[1] == '\0'))) { ret = bdi_set_min_ratio(bdi, ratio); if (!ret) ret = count; } return ret; } BDI_SHOW(min_ratio, bdi->min_ratio) |
a42dde041
|
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 |
static ssize_t max_ratio_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct backing_dev_info *bdi = dev_get_drvdata(dev); char *end; unsigned int ratio; ssize_t ret = -EINVAL; ratio = simple_strtoul(buf, &end, 10); if (*buf && (end[0] == '\0' || (end[0] == ' ' && end[1] == '\0'))) { ret = bdi_set_max_ratio(bdi, ratio); if (!ret) ret = count; } return ret; } BDI_SHOW(max_ratio, bdi->max_ratio) |
cf0ca9fe5
|
230 231 232 233 |
#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store) static struct device_attribute bdi_dev_attrs[] = { __ATTR_RW(read_ahead_kb), |
189d3c4a9
|
234 |
__ATTR_RW(min_ratio), |
a42dde041
|
235 |
__ATTR_RW(max_ratio), |
cf0ca9fe5
|
236 237 238 239 240 241 |
__ATTR_NULL, }; static __init int bdi_class_init(void) { bdi_class = class_create(THIS_MODULE, "bdi"); |
144214537
|
242 243 |
if (IS_ERR(bdi_class)) return PTR_ERR(bdi_class); |
cf0ca9fe5
|
244 |
bdi_class->dev_attrs = bdi_dev_attrs; |
76f1418b4
|
245 |
bdi_debug_init(); |
cf0ca9fe5
|
246 247 |
return 0; } |
76f1418b4
|
248 |
postcore_initcall(bdi_class_init); |
cf0ca9fe5
|
249 |
|
26160158d
|
250 251 252 253 254 255 256 |
static int __init default_bdi_init(void) { int err; err = bdi_init(&default_backing_dev_info); if (!err) bdi_register(&default_backing_dev_info, NULL, "default"); |
976e48f8a
|
257 |
err = bdi_init(&noop_backing_dev_info); |
26160158d
|
258 259 260 261 |
return err; } subsys_initcall(default_bdi_init); |
03ba3782e
|
262 263 264 265 |
int bdi_has_dirty_io(struct backing_dev_info *bdi) { return wb_has_dirty_io(&bdi->wb); } |
6467716a3
|
266 267 268 269 270 271 |
static void wakeup_timer_fn(unsigned long data) { struct backing_dev_info *bdi = (struct backing_dev_info *)data; spin_lock_bh(&bdi->wb_lock); if (bdi->wb.task) { |
603320239
|
272 |
trace_writeback_wake_thread(bdi); |
6467716a3
|
273 |
wake_up_process(bdi->wb.task); |
2673b4cf5
|
274 |
} else if (bdi->dev) { |
6467716a3
|
275 276 277 278 279 |
/* * When bdi tasks are inactive for long time, they are killed. * In this case we have to wake-up the forker thread which * should create and run the bdi thread. */ |
603320239
|
280 |
trace_writeback_wake_forker_thread(bdi); |
6467716a3
|
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 |
wake_up_process(default_backing_dev_info.wb.task); } spin_unlock_bh(&bdi->wb_lock); } /* * This function is used when the first inode for this bdi is marked dirty. It * wakes-up the corresponding bdi thread which should then take care of the * periodic background write-out of dirty inodes. Since the write-out would * starts only 'dirty_writeback_interval' centisecs from now anyway, we just * set up a timer which wakes the bdi thread up later. * * Note, we wouldn't bother setting up the timer, but this function is on the * fast-path (used by '__mark_inode_dirty()'), so we save few context switches * by delaying the wake-up. */ void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi) { unsigned long timeout; timeout = msecs_to_jiffies(dirty_writeback_interval * 10); mod_timer(&bdi->wb.wakeup_timer, jiffies + timeout); } |
fff5b85aa
|
304 305 306 307 308 309 310 311 312 313 314 |
/* * Calculate the longest interval (jiffies) bdi threads are allowed to be * inactive. */ static unsigned long bdi_longest_inactive(void) { unsigned long interval; interval = msecs_to_jiffies(dirty_writeback_interval * 10); return max(5UL * 60 * HZ, interval); } |
5a042aa4b
|
315 316 317 318 319 320 321 322 323 324 |
/* * Clear pending bit and wakeup anybody waiting for flusher thread creation or * shutdown */ static void bdi_clear_pending(struct backing_dev_info *bdi) { clear_bit(BDI_pending, &bdi->state); smp_mb__after_clear_bit(); wake_up_bit(&bdi->state, BDI_pending); } |
6f904ff0e
|
325 |
static int bdi_forker_thread(void *ptr) |
03ba3782e
|
326 327 |
{ struct bdi_writeback *me = ptr; |
766f91641
|
328 |
current->flags |= PF_SWAPWRITE; |
c1955ce32
|
329 330 331 332 333 334 |
set_freezable(); /* * Our parent may run at a different priority, just set us to normal */ set_user_nice(current, 0); |
03ba3782e
|
335 336 |
for (;;) { |
fff5b85aa
|
337 |
struct task_struct *task = NULL; |
78c40cb65
|
338 |
struct backing_dev_info *bdi; |
adf392407
|
339 340 341 |
enum { NO_ACTION, /* Nothing to do */ FORK_THREAD, /* Fork bdi thread */ |
fff5b85aa
|
342 |
KILL_THREAD, /* Kill inactive bdi thread */ |
adf392407
|
343 |
} action = NO_ACTION; |
03ba3782e
|
344 345 346 347 348 |
/* * Temporary measure, we want to make sure we don't see * dirty data on the default backing_dev_info */ |
6467716a3
|
349 350 |
if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) { del_timer(&me->wakeup_timer); |
03ba3782e
|
351 |
wb_do_writeback(me, 0); |
6467716a3
|
352 |
} |
03ba3782e
|
353 |
|
cfc4ba536
|
354 |
spin_lock_bh(&bdi_lock); |
09f40f98b
|
355 356 357 |
/* * In the following loop we are going to check whether we have * some work to do without any synchronization with tasks |
20c8c6289
|
358 359 |
* waking us up to do work for them. Set the task state here * so that we don't miss wakeups after verifying conditions. |
09f40f98b
|
360 |
*/ |
c5f7ad233
|
361 |
set_current_state(TASK_INTERRUPTIBLE); |
03ba3782e
|
362 |
|
78c40cb65
|
363 |
list_for_each_entry(bdi, &bdi_list, bdi_list) { |
adf392407
|
364 365 366 367 |
bool have_dirty_io; if (!bdi_cap_writeback_dirty(bdi) || bdi_cap_flush_forker(bdi)) |
03ba3782e
|
368 |
continue; |
080dcec41
|
369 370 371 |
WARN(!test_bit(BDI_registered, &bdi->state), "bdi %p/%s is not registered! ", bdi, bdi->name); |
adf392407
|
372 373 |
have_dirty_io = !list_empty(&bdi->work_list) || wb_has_dirty_io(&bdi->wb); |
78c40cb65
|
374 375 |
/* |
adf392407
|
376 377 |
* If the bdi has work to do, but the thread does not * exist - create it. |
78c40cb65
|
378 |
*/ |
adf392407
|
379 380 381 382 383 384 385 386 387 |
if (!bdi->wb.task && have_dirty_io) { /* * Set the pending bit - if someone will try to * unregister this bdi - it'll wait on this bit. */ set_bit(BDI_pending, &bdi->state); action = FORK_THREAD; break; } |
fff5b85aa
|
388 |
|
6bf05d03e
|
389 |
spin_lock(&bdi->wb_lock); |
fff5b85aa
|
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 |
/* * If there is no work to do and the bdi thread was * inactive long enough - kill it. The wb_lock is taken * to make sure no-one adds more work to this bdi and * wakes the bdi thread up. */ if (bdi->wb.task && !have_dirty_io && time_after(jiffies, bdi->wb.last_active + bdi_longest_inactive())) { task = bdi->wb.task; bdi->wb.task = NULL; spin_unlock(&bdi->wb_lock); set_bit(BDI_pending, &bdi->state); action = KILL_THREAD; break; } |
6bf05d03e
|
406 |
spin_unlock(&bdi->wb_lock); |
03ba3782e
|
407 |
} |
080dcec41
|
408 |
spin_unlock_bh(&bdi_lock); |
03ba3782e
|
409 |
|
c4ec7908c
|
410 411 412 |
/* Keep working if default bdi still has things to do */ if (!list_empty(&me->bdi->work_list)) __set_current_state(TASK_RUNNING); |
adf392407
|
413 414 415 |
switch (action) { case FORK_THREAD: __set_current_state(TASK_RUNNING); |
6628bc74f
|
416 417 |
task = kthread_create(bdi_writeback_thread, &bdi->wb, "flush-%s", dev_name(bdi->dev)); |
adf392407
|
418 419 420 |
if (IS_ERR(task)) { /* * If thread creation fails, force writeout of |
d46db3d58
|
421 422 |
* the bdi from the thread. Hopefully 1024 is * large enough for efficient IO. |
adf392407
|
423 |
*/ |
0e175a183
|
424 425 |
writeback_inodes_wb(&bdi->wb, 1024, WB_REASON_FORKER_THREAD); |
fff5b85aa
|
426 427 428 429 |
} else { /* * The spinlock makes sure we do not lose * wake-ups when racing with 'bdi_queue_work()'. |
6628bc74f
|
430 431 |
* And as soon as the bdi thread is visible, we * can start it. |
fff5b85aa
|
432 |
*/ |
6467716a3
|
433 |
spin_lock_bh(&bdi->wb_lock); |
adf392407
|
434 |
bdi->wb.task = task; |
6467716a3
|
435 |
spin_unlock_bh(&bdi->wb_lock); |
6628bc74f
|
436 |
wake_up_process(task); |
fff5b85aa
|
437 |
} |
5a042aa4b
|
438 |
bdi_clear_pending(bdi); |
fff5b85aa
|
439 440 441 442 443 |
break; case KILL_THREAD: __set_current_state(TASK_RUNNING); kthread_stop(task); |
5a042aa4b
|
444 |
bdi_clear_pending(bdi); |
adf392407
|
445 |
break; |
03ba3782e
|
446 |
|
adf392407
|
447 |
case NO_ACTION: |
253c34e9b
|
448 449 450 451 452 453 454 455 456 |
if (!wb_has_dirty_io(me) || !dirty_writeback_interval) /* * There are no dirty data. The only thing we * should now care about is checking for * inactive bdi threads and killing them. Thus, * let's sleep for longer time, save energy and * be friendly for battery-driven devices. */ schedule_timeout(bdi_longest_inactive()); |
6423104b6
|
457 |
else |
253c34e9b
|
458 |
schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10)); |
03ba3782e
|
459 |
try_to_freeze(); |
5a042aa4b
|
460 |
break; |
03ba3782e
|
461 |
} |
03ba3782e
|
462 463 464 465 |
} return 0; } |
cfc4ba536
|
466 467 468 469 470 471 472 473 |
/* * Remove bdi from bdi_list, and ensure that it is no longer visible */ static void bdi_remove_from_list(struct backing_dev_info *bdi) { spin_lock_bh(&bdi_lock); list_del_rcu(&bdi->bdi_list); spin_unlock_bh(&bdi_lock); |
ef3230880
|
474 |
synchronize_rcu_expedited(); |
cfc4ba536
|
475 |
} |
cf0ca9fe5
|
476 477 478 |
int bdi_register(struct backing_dev_info *bdi, struct device *parent, const char *fmt, ...) { |
cf0ca9fe5
|
479 |
va_list args; |
cf0ca9fe5
|
480 |
struct device *dev; |
69fc208be
|
481 |
if (bdi->dev) /* The driver needs to use separate queues per device */ |
c284de61d
|
482 |
return 0; |
f1d0b063d
|
483 |
|
cf0ca9fe5
|
484 |
va_start(args, fmt); |
19051c503
|
485 |
dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args); |
cf0ca9fe5
|
486 |
va_end(args); |
c284de61d
|
487 488 |
if (IS_ERR(dev)) return PTR_ERR(dev); |
66f3b8e2e
|
489 |
|
cf0ca9fe5
|
490 |
bdi->dev = dev; |
cf0ca9fe5
|
491 |
|
03ba3782e
|
492 493 494 495 496 497 498 |
/* * Just start the forker thread for our default backing_dev_info, * and add other bdi's to the list. They will get a thread created * on-demand when they need it. */ if (bdi_cap_flush_forker(bdi)) { struct bdi_writeback *wb = &bdi->wb; |
6f904ff0e
|
499 |
wb->task = kthread_run(bdi_forker_thread, wb, "bdi-%s", |
03ba3782e
|
500 |
dev_name(dev)); |
c284de61d
|
501 502 |
if (IS_ERR(wb->task)) return PTR_ERR(wb->task); |
03ba3782e
|
503 504 505 |
} bdi_debug_register(bdi, dev_name(dev)); |
500b067c5
|
506 |
set_bit(BDI_registered, &bdi->state); |
c284de61d
|
507 508 509 510 |
spin_lock_bh(&bdi_lock); list_add_tail_rcu(&bdi->bdi_list, &bdi_list); spin_unlock_bh(&bdi_lock); |
455b28646
|
511 |
trace_writeback_bdi_register(bdi); |
c284de61d
|
512 |
return 0; |
cf0ca9fe5
|
513 514 515 516 517 518 519 520 |
} EXPORT_SYMBOL(bdi_register); int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev) { return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev)); } EXPORT_SYMBOL(bdi_register_dev); |
03ba3782e
|
521 522 523 524 |
/* * Remove bdi from the global list and shutdown any threads we have running */ static void bdi_wb_shutdown(struct backing_dev_info *bdi) |
66f3b8e2e
|
525 |
{ |
2673b4cf5
|
526 |
struct task_struct *task; |
03ba3782e
|
527 528 529 530 |
if (!bdi_cap_writeback_dirty(bdi)) return; /* |
fff5b85aa
|
531 |
* Make sure nobody finds us on the bdi_list anymore |
03ba3782e
|
532 |
*/ |
fff5b85aa
|
533 |
bdi_remove_from_list(bdi); |
03ba3782e
|
534 535 |
/* |
fff5b85aa
|
536 |
* If setup is pending, wait for that to complete first |
03ba3782e
|
537 |
*/ |
fff5b85aa
|
538 539 |
wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait, TASK_UNINTERRUPTIBLE); |
03ba3782e
|
540 541 |
/* |
c1955ce32
|
542 |
* Finally, kill the kernel thread. We don't need to be RCU |
8a32c441c
|
543 |
* safe anymore, since the bdi is gone from visibility. |
03ba3782e
|
544 |
*/ |
2673b4cf5
|
545 546 547 548 549 550 551 |
spin_lock_bh(&bdi->wb_lock); task = bdi->wb.task; bdi->wb.task = NULL; spin_unlock_bh(&bdi->wb_lock); if (task) kthread_stop(task); |
66f3b8e2e
|
552 |
} |
592b09a42
|
553 554 555 556 557 558 559 560 561 562 |
/* * This bdi is going away now, make sure that no super_blocks point to it */ static void bdi_prune_sb(struct backing_dev_info *bdi) { struct super_block *sb; spin_lock(&sb_lock); list_for_each_entry(sb, &super_blocks, s_list) { if (sb->s_bdi == bdi) |
95f28604a
|
563 |
sb->s_bdi = &default_backing_dev_info; |
592b09a42
|
564 565 566 |
} spin_unlock(&sb_lock); } |
cf0ca9fe5
|
567 568 |
void bdi_unregister(struct backing_dev_info *bdi) { |
2673b4cf5
|
569 570 571 |
struct device *dev = bdi->dev; if (dev) { |
ccb6108f5
|
572 |
bdi_set_min_ratio(bdi, 0); |
455b28646
|
573 |
trace_writeback_bdi_unregister(bdi); |
8c4db3355
|
574 |
bdi_prune_sb(bdi); |
6467716a3
|
575 |
del_timer_sync(&bdi->wb.wakeup_timer); |
8c4db3355
|
576 |
|
03ba3782e
|
577 578 |
if (!bdi_cap_flush_forker(bdi)) bdi_wb_shutdown(bdi); |
76f1418b4
|
579 |
bdi_debug_unregister(bdi); |
2673b4cf5
|
580 581 |
spin_lock_bh(&bdi->wb_lock); |
cf0ca9fe5
|
582 |
bdi->dev = NULL; |
2673b4cf5
|
583 584 585 |
spin_unlock_bh(&bdi->wb_lock); device_unregister(dev); |
cf0ca9fe5
|
586 587 588 |
} } EXPORT_SYMBOL(bdi_unregister); |
3fcfab16c
|
589 |
|
6467716a3
|
590 591 592 593 594 595 596 597 598 |
static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) { memset(wb, 0, sizeof(*wb)); wb->bdi = bdi; wb->last_old_flush = jiffies; INIT_LIST_HEAD(&wb->b_dirty); INIT_LIST_HEAD(&wb->b_io); INIT_LIST_HEAD(&wb->b_more_io); |
f758eeabe
|
599 |
spin_lock_init(&wb->list_lock); |
6467716a3
|
600 601 |
setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi); } |
e98be2d59
|
602 603 604 605 |
/* * Initial write bandwidth: 100 MB/s */ #define INIT_BW (100 << (20 - PAGE_SHIFT)) |
b2e8fb6ef
|
606 607 |
int bdi_init(struct backing_dev_info *bdi) { |
03ba3782e
|
608 |
int i, err; |
b2e8fb6ef
|
609 |
|
cf0ca9fe5
|
610 |
bdi->dev = NULL; |
189d3c4a9
|
611 |
bdi->min_ratio = 0; |
a42dde041
|
612 |
bdi->max_ratio = 100; |
eb608e3a3
|
613 |
bdi->max_prop_frac = FPROP_FRAC_BASE; |
03ba3782e
|
614 |
spin_lock_init(&bdi->wb_lock); |
66f3b8e2e
|
615 |
INIT_LIST_HEAD(&bdi->bdi_list); |
03ba3782e
|
616 617 618 |
INIT_LIST_HEAD(&bdi->work_list); bdi_wb_init(&bdi->wb, bdi); |
b2e8fb6ef
|
619 |
for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { |
ea319518b
|
620 |
err = percpu_counter_init(&bdi->bdi_stat[i], 0); |
04fbfdc14
|
621 622 623 624 625 |
if (err) goto err; } bdi->dirty_exceeded = 0; |
e98be2d59
|
626 627 628 |
bdi->bw_time_stamp = jiffies; bdi->written_stamp = 0; |
7381131cb
|
629 |
bdi->balanced_dirty_ratelimit = INIT_BW; |
be3ffa276
|
630 |
bdi->dirty_ratelimit = INIT_BW; |
e98be2d59
|
631 632 |
bdi->write_bandwidth = INIT_BW; bdi->avg_write_bandwidth = INIT_BW; |
eb608e3a3
|
633 |
err = fprop_local_init_percpu(&bdi->completions); |
04fbfdc14
|
634 635 636 |
if (err) { err: |
4b01a0b16
|
637 |
while (i--) |
04fbfdc14
|
638 |
percpu_counter_destroy(&bdi->bdi_stat[i]); |
b2e8fb6ef
|
639 640 641 642 643 644 645 646 647 |
} return err; } EXPORT_SYMBOL(bdi_init); void bdi_destroy(struct backing_dev_info *bdi) { int i; |
ce5f8e779
|
648 649 650 651 652 653 |
/* * Splice our entries to the default_backing_dev_info, if this * bdi disappears */ if (bdi_has_dirty_io(bdi)) { struct bdi_writeback *dst = &default_backing_dev_info.wb; |
f758eeabe
|
654 |
bdi_lock_two(&bdi->wb, dst); |
ce5f8e779
|
655 656 657 |
list_splice(&bdi->wb.b_dirty, &dst->b_dirty); list_splice(&bdi->wb.b_io, &dst->b_io); list_splice(&bdi->wb.b_more_io, &dst->b_more_io); |
f758eeabe
|
658 659 |
spin_unlock(&bdi->wb.list_lock); spin_unlock(&dst->list_lock); |
ce5f8e779
|
660 |
} |
66f3b8e2e
|
661 |
|
cf0ca9fe5
|
662 |
bdi_unregister(bdi); |
7a401a972
|
663 664 665 666 667 668 669 |
/* * If bdi_unregister() had already been called earlier, the * wakeup_timer could still be armed because bdi_prune_sb() * can race with the bdi_wakeup_thread_delayed() calls from * __mark_inode_dirty(). */ del_timer_sync(&bdi->wb.wakeup_timer); |
b2e8fb6ef
|
670 671 |
for (i = 0; i < NR_BDI_STAT_ITEMS; i++) percpu_counter_destroy(&bdi->bdi_stat[i]); |
04fbfdc14
|
672 |
|
eb608e3a3
|
673 |
fprop_local_destroy_percpu(&bdi->completions); |
b2e8fb6ef
|
674 675 |
} EXPORT_SYMBOL(bdi_destroy); |
c3c532061
|
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 |
/* * For use from filesystems to quickly init and register a bdi associated * with dirty writeback */ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name, unsigned int cap) { char tmp[32]; int err; bdi->name = name; bdi->capabilities = cap; err = bdi_init(bdi); if (err) return err; sprintf(tmp, "%.28s%s", name, "-%d"); err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq)); if (err) { bdi_destroy(bdi); return err; } return 0; } EXPORT_SYMBOL(bdi_setup_and_register); |
3fcfab16c
|
702 703 704 705 |
static wait_queue_head_t congestion_wqh[2] = { __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) }; |
0e093d997
|
706 |
static atomic_t nr_bdi_congested[2]; |
3fcfab16c
|
707 |
|
1faa16d22
|
708 |
void clear_bdi_congested(struct backing_dev_info *bdi, int sync) |
3fcfab16c
|
709 710 |
{ enum bdi_state bit; |
1faa16d22
|
711 |
wait_queue_head_t *wqh = &congestion_wqh[sync]; |
3fcfab16c
|
712 |
|
1faa16d22
|
713 |
bit = sync ? BDI_sync_congested : BDI_async_congested; |
0e093d997
|
714 715 |
if (test_and_clear_bit(bit, &bdi->state)) atomic_dec(&nr_bdi_congested[sync]); |
3fcfab16c
|
716 717 718 719 720 |
smp_mb__after_clear_bit(); if (waitqueue_active(wqh)) wake_up(wqh); } EXPORT_SYMBOL(clear_bdi_congested); |
1faa16d22
|
721 |
void set_bdi_congested(struct backing_dev_info *bdi, int sync) |
3fcfab16c
|
722 723 |
{ enum bdi_state bit; |
1faa16d22
|
724 |
bit = sync ? BDI_sync_congested : BDI_async_congested; |
0e093d997
|
725 726 |
if (!test_and_set_bit(bit, &bdi->state)) atomic_inc(&nr_bdi_congested[sync]); |
3fcfab16c
|
727 728 729 730 731 |
} EXPORT_SYMBOL(set_bdi_congested); /** * congestion_wait - wait for a backing_dev to become uncongested |
8aa7e847d
|
732 |
* @sync: SYNC or ASYNC IO |
3fcfab16c
|
733 734 735 736 737 738 |
* @timeout: timeout in jiffies * * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit * write congestion. If no backing_devs are congested then just wait for the * next write to be completed. */ |
8aa7e847d
|
739 |
long congestion_wait(int sync, long timeout) |
3fcfab16c
|
740 741 |
{ long ret; |
52bb91986
|
742 |
unsigned long start = jiffies; |
3fcfab16c
|
743 |
DEFINE_WAIT(wait); |
8aa7e847d
|
744 |
wait_queue_head_t *wqh = &congestion_wqh[sync]; |
3fcfab16c
|
745 746 747 748 |
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); ret = io_schedule_timeout(timeout); finish_wait(wqh, &wait); |
52bb91986
|
749 750 751 |
trace_writeback_congestion_wait(jiffies_to_usecs(timeout), jiffies_to_usecs(jiffies - start)); |
3fcfab16c
|
752 753 754 |
return ret; } EXPORT_SYMBOL(congestion_wait); |
04fbfdc14
|
755 |
|
0e093d997
|
756 757 758 759 760 761 762 763 764 765 766 |
/** * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes * @zone: A zone to check if it is heavily congested * @sync: SYNC or ASYNC IO * @timeout: timeout in jiffies * * In the event of a congested backing_dev (any backing_dev) and the given * @zone has experienced recent congestion, this waits for up to @timeout * jiffies for either a BDI to exit congestion of the given @sync queue * or a write to complete. * |
25985edce
|
767 |
* In the absence of zone congestion, cond_resched() is called to yield |
0e093d997
|
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 |
* the processor if necessary but otherwise does not sleep. * * The return value is 0 if the sleep is for the full timeout. Otherwise, * it is the number of jiffies that were still remaining when the function * returned. return_value == timeout implies the function did not sleep. */ long wait_iff_congested(struct zone *zone, int sync, long timeout) { long ret; unsigned long start = jiffies; DEFINE_WAIT(wait); wait_queue_head_t *wqh = &congestion_wqh[sync]; /* * If there is no congestion, or heavy congestion is not being * encountered in the current zone, yield if necessary instead * of sleeping on the congestion queue */ if (atomic_read(&nr_bdi_congested[sync]) == 0 || !zone_is_reclaim_congested(zone)) { cond_resched(); /* In case we scheduled, work out time remaining */ ret = timeout - (jiffies - start); if (ret < 0) ret = 0; goto out; } /* Sleep until uncongested or a write happens */ prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); ret = io_schedule_timeout(timeout); finish_wait(wqh, &wait); out: trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout), jiffies_to_usecs(jiffies - start)); return ret; } EXPORT_SYMBOL(wait_iff_congested); |
3965c9ae4
|
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 |
int pdflush_proc_obsolete(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { char kbuf[] = "0 "; if (*ppos) { *lenp = 0; return 0; } if (copy_to_user(buffer, kbuf, sizeof(kbuf))) return -EFAULT; printk_once(KERN_WARNING "%s exported in /proc is scheduled for removal ", table->procname); *lenp = 2; *ppos += *lenp; return 2; } |