Commit 16c4042f08919f447d6b2a55679546c9b97c7264

Authored by Wu Fengguang
Committed by Linus Torvalds
1 parent e50e37201a

writeback: avoid unnecessary calculation of bdi dirty thresholds

Split get_dirty_limits() into global_dirty_limits()+bdi_dirty_limit(), so
that the latter can be avoided when under global dirty background
threshold (which is the normal state for most systems).

Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 4 changed files with 44 additions and 41 deletions Side-by-side Diff

... ... @@ -590,7 +590,7 @@
590 590 {
591 591 unsigned long background_thresh, dirty_thresh;
592 592  
593   - get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
  593 + global_dirty_limits(&background_thresh, &dirty_thresh);
594 594  
595 595 return (global_page_state(NR_FILE_DIRTY) +
596 596 global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
include/linux/writeback.h
... ... @@ -124,8 +124,9 @@
124 124 int dirty_writeback_centisecs_handler(struct ctl_table *, int,
125 125 void __user *, size_t *, loff_t *);
126 126  
127   -void get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
128   - unsigned long *pbdi_dirty, struct backing_dev_info *bdi);
  127 +void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
  128 +unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
  129 + unsigned long dirty);
129 130  
130 131 void page_writeback_init(void);
131 132 void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
... ... @@ -81,7 +81,8 @@
81 81 nr_more_io++;
82 82 spin_unlock(&inode_lock);
83 83  
84   - get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
  84 + global_dirty_limits(&background_thresh, &dirty_thresh);
  85 + bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
85 86  
86 87 #define K(x) ((x) << (PAGE_SHIFT - 10))
87 88 seq_printf(m,
... ... @@ -267,10 +267,11 @@
267 267 *
268 268 * dirty -= (dirty/8) * p_{t}
269 269 */
270   -static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty)
  270 +static unsigned long task_dirty_limit(struct task_struct *tsk,
  271 + unsigned long bdi_dirty)
271 272 {
272 273 long numerator, denominator;
273   - unsigned long dirty = *pdirty;
  274 + unsigned long dirty = bdi_dirty;
274 275 u64 inv = dirty >> 3;
275 276  
276 277 task_dirties_fraction(tsk, &numerator, &denominator);
277 278  
... ... @@ -278,10 +279,8 @@
278 279 do_div(inv, denominator);
279 280  
280 281 dirty -= inv;
281   - if (dirty < *pdirty/2)
282   - dirty = *pdirty/2;
283 282  
284   - *pdirty = dirty;
  283 + return max(dirty, bdi_dirty/2);
285 284 }
286 285  
287 286 /*
... ... @@ -391,9 +390,7 @@
391 390 return x + 1; /* Ensure that we never return 0 */
392 391 }
393 392  
394   -void
395   -get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
396   - unsigned long *pbdi_dirty, struct backing_dev_info *bdi)
  393 +void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
397 394 {
398 395 unsigned long background;
399 396 unsigned long dirty;
400 397  
401 398  
402 399  
403 400  
... ... @@ -425,26 +422,28 @@
425 422 }
426 423 *pbackground = background;
427 424 *pdirty = dirty;
  425 +}
428 426  
429   - if (bdi) {
430   - u64 bdi_dirty;
431   - long numerator, denominator;
  427 +unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
  428 + unsigned long dirty)
  429 +{
  430 + u64 bdi_dirty;
  431 + long numerator, denominator;
432 432  
433   - /*
434   - * Calculate this BDI's share of the dirty ratio.
435   - */
436   - bdi_writeout_fraction(bdi, &numerator, &denominator);
  433 + /*
  434 + * Calculate this BDI's share of the dirty ratio.
  435 + */
  436 + bdi_writeout_fraction(bdi, &numerator, &denominator);
437 437  
438   - bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
439   - bdi_dirty *= numerator;
440   - do_div(bdi_dirty, denominator);
441   - bdi_dirty += (dirty * bdi->min_ratio) / 100;
442   - if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
443   - bdi_dirty = dirty * bdi->max_ratio / 100;
  438 + bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
  439 + bdi_dirty *= numerator;
  440 + do_div(bdi_dirty, denominator);
444 441  
445   - *pbdi_dirty = bdi_dirty;
446   - task_dirty_limit(current, pbdi_dirty);
447   - }
  442 + bdi_dirty += (dirty * bdi->min_ratio) / 100;
  443 + if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
  444 + bdi_dirty = dirty * bdi->max_ratio / 100;
  445 +
  446 + return bdi_dirty;
448 447 }
449 448  
450 449 /*
451 450  
452 451  
... ... @@ -475,14 +474,25 @@
475 474 .range_cyclic = 1,
476 475 };
477 476  
478   - get_dirty_limits(&background_thresh, &dirty_thresh,
479   - &bdi_thresh, bdi);
480   -
481 477 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
482 478 global_page_state(NR_UNSTABLE_NFS);
483 479 nr_writeback = global_page_state(NR_WRITEBACK);
484 480  
  481 + global_dirty_limits(&background_thresh, &dirty_thresh);
  482 +
485 483 /*
  484 + * Throttle it only when the background writeback cannot
  485 + * catch-up. This avoids (excessively) small writeouts
  486 + * when the bdi limits are ramping up.
  487 + */
  488 + if (nr_reclaimable + nr_writeback <
  489 + (background_thresh + dirty_thresh) / 2)
  490 + break;
  491 +
  492 + bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
  493 + bdi_thresh = task_dirty_limit(current, bdi_thresh);
  494 +
  495 + /*
486 496 * In order to avoid the stacked BDI deadlock we need
487 497 * to ensure we accurately count the 'dirty' pages when
488 498 * the threshold is low.
... ... @@ -513,15 +523,6 @@
513 523 if (!dirty_exceeded)
514 524 break;
515 525  
516   - /*
517   - * Throttle it only when the background writeback cannot
518   - * catch-up. This avoids (excessively) small writeouts
519   - * when the bdi limits are ramping up.
520   - */
521   - if (nr_reclaimable + nr_writeback <
522   - (background_thresh + dirty_thresh) / 2)
523   - break;
524   -
525 526 if (!bdi->dirty_exceeded)
526 527 bdi->dirty_exceeded = 1;
527 528  
... ... @@ -634,7 +635,7 @@
634 635 unsigned long dirty_thresh;
635 636  
636 637 for ( ; ; ) {
637   - get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
  638 + global_dirty_limits(&background_thresh, &dirty_thresh);
638 639  
639 640 /*
640 641 * Boost the allowable dirty threshold a bit for page