Commit 0be956928f4f570ee15922315c0c5bb5256e1734

Authored by Dan Murphy

Merge branch 'master' of http://git.kernel.org/pub/scm/linux/kernel/git/torvalds…

…/linux into ti-linux-3.15.y

* 'master' of http://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux:
  dentry_kill() doesn't need the second argument now
  dealing with the rest of shrink_dentry_list() livelock
  shrink_dentry_list(): take parent's ->d_lock earlier
  expand dentry_kill(dentry, 0) in shrink_dentry_list()
  split dentry_kill()
  lift the "already marked killed" case into shrink_dentry_list()

Signed-off-by: Dan Murphy <DMurphy@ti.com>

Showing 1 changed file Side-by-side Diff

... ... @@ -441,42 +441,12 @@
441 441 }
442 442 EXPORT_SYMBOL(d_drop);
443 443  
444   -/*
445   - * Finish off a dentry we've decided to kill.
446   - * dentry->d_lock must be held, returns with it unlocked.
447   - * If ref is non-zero, then decrement the refcount too.
448   - * Returns dentry requiring refcount drop, or NULL if we're done.
449   - */
450   -static struct dentry *
451   -dentry_kill(struct dentry *dentry, int unlock_on_failure)
452   - __releases(dentry->d_lock)
  444 +static void __dentry_kill(struct dentry *dentry)
453 445 {
454   - struct inode *inode;
455 446 struct dentry *parent = NULL;
456 447 bool can_free = true;
457   -
458   - if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
459   - can_free = dentry->d_flags & DCACHE_MAY_FREE;
460   - spin_unlock(&dentry->d_lock);
461   - goto out;
462   - }
463   -
464   - inode = dentry->d_inode;
465   - if (inode && !spin_trylock(&inode->i_lock)) {
466   -relock:
467   - if (unlock_on_failure) {
468   - spin_unlock(&dentry->d_lock);
469   - cpu_relax();
470   - }
471   - return dentry; /* try again with same dentry */
472   - }
473 448 if (!IS_ROOT(dentry))
474 449 parent = dentry->d_parent;
475   - if (parent && !spin_trylock(&parent->d_lock)) {
476   - if (inode)
477   - spin_unlock(&inode->i_lock);
478   - goto relock;
479   - }
480 450  
481 451 /*
482 452 * The dentry is now unrecoverably dead to the world.
483 453  
484 454  
485 455  
... ... @@ -520,12 +490,75 @@
520 490 can_free = false;
521 491 }
522 492 spin_unlock(&dentry->d_lock);
523   -out:
524 493 if (likely(can_free))
525 494 dentry_free(dentry);
  495 +}
  496 +
  497 +/*
  498 + * Finish off a dentry we've decided to kill.
  499 + * dentry->d_lock must be held, returns with it unlocked.
  500 + * If ref is non-zero, then decrement the refcount too.
  501 + * Returns dentry requiring refcount drop, or NULL if we're done.
  502 + */
  503 +static struct dentry *dentry_kill(struct dentry *dentry)
  504 + __releases(dentry->d_lock)
  505 +{
  506 + struct inode *inode = dentry->d_inode;
  507 + struct dentry *parent = NULL;
  508 +
  509 + if (inode && unlikely(!spin_trylock(&inode->i_lock)))
  510 + goto failed;
  511 +
  512 + if (!IS_ROOT(dentry)) {
  513 + parent = dentry->d_parent;
  514 + if (unlikely(!spin_trylock(&parent->d_lock))) {
  515 + if (inode)
  516 + spin_unlock(&inode->i_lock);
  517 + goto failed;
  518 + }
  519 + }
  520 +
  521 + __dentry_kill(dentry);
526 522 return parent;
  523 +
  524 +failed:
  525 + spin_unlock(&dentry->d_lock);
  526 + cpu_relax();
  527 + return dentry; /* try again with same dentry */
527 528 }
528 529  
  530 +static inline struct dentry *lock_parent(struct dentry *dentry)
  531 +{
  532 + struct dentry *parent = dentry->d_parent;
  533 + if (IS_ROOT(dentry))
  534 + return NULL;
  535 + if (likely(spin_trylock(&parent->d_lock)))
  536 + return parent;
  537 + spin_unlock(&dentry->d_lock);
  538 + rcu_read_lock();
  539 +again:
  540 + parent = ACCESS_ONCE(dentry->d_parent);
  541 + spin_lock(&parent->d_lock);
  542 + /*
  543 + * We can't blindly lock dentry until we are sure
  544 + * that we won't violate the locking order.
  545 + * Any changes of dentry->d_parent must have
  546 + * been done with parent->d_lock held, so
  547 + * spin_lock() above is enough of a barrier
  548 + * for checking if it's still our child.
  549 + */
  550 + if (unlikely(parent != dentry->d_parent)) {
  551 + spin_unlock(&parent->d_lock);
  552 + goto again;
  553 + }
  554 + rcu_read_unlock();
  555 + if (parent != dentry)
  556 + spin_lock(&dentry->d_lock);
  557 + else
  558 + parent = NULL;
  559 + return parent;
  560 +}
  561 +
529 562 /*
530 563 * This is dput
531 564 *
... ... @@ -579,7 +612,7 @@
579 612 return;
580 613  
581 614 kill_it:
582   - dentry = dentry_kill(dentry, 1);
  615 + dentry = dentry_kill(dentry);
583 616 if (dentry)
584 617 goto repeat;
585 618 }
586 619  
... ... @@ -797,8 +830,11 @@
797 830 struct dentry *dentry, *parent;
798 831  
799 832 while (!list_empty(list)) {
  833 + struct inode *inode;
800 834 dentry = list_entry(list->prev, struct dentry, d_lru);
801 835 spin_lock(&dentry->d_lock);
  836 + parent = lock_parent(dentry);
  837 +
802 838 /*
803 839 * The dispose list is isolated and dentries are not accounted
804 840 * to the LRU here, so we can simply remove it from the list
805 841  
806 842  
807 843  
808 844  
809 845  
... ... @@ -812,26 +848,33 @@
812 848 */
813 849 if ((int)dentry->d_lockref.count > 0) {
814 850 spin_unlock(&dentry->d_lock);
  851 + if (parent)
  852 + spin_unlock(&parent->d_lock);
815 853 continue;
816 854 }
817 855  
818   - parent = dentry_kill(dentry, 0);
819   - /*
820   - * If dentry_kill returns NULL, we have nothing more to do.
821   - */
822   - if (!parent)
  856 +
  857 + if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
  858 + bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
  859 + spin_unlock(&dentry->d_lock);
  860 + if (parent)
  861 + spin_unlock(&parent->d_lock);
  862 + if (can_free)
  863 + dentry_free(dentry);
823 864 continue;
  865 + }
824 866  
825   - if (unlikely(parent == dentry)) {
826   - /*
827   - * trylocks have failed and d_lock has been held the
828   - * whole time, so it could not have been added to any
829   - * other lists. Just add it back to the shrink list.
830   - */
  867 + inode = dentry->d_inode;
  868 + if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
831 869 d_shrink_add(dentry, list);
832 870 spin_unlock(&dentry->d_lock);
  871 + if (parent)
  872 + spin_unlock(&parent->d_lock);
833 873 continue;
834 874 }
  875 +
  876 + __dentry_kill(dentry);
  877 +
835 878 /*
836 879 * We need to prune ancestors too. This is necessary to prevent
837 880 * quadratic behavior of shrink_dcache_parent(), but is also
... ... @@ -839,8 +882,26 @@
839 882 * fragmentation.
840 883 */
841 884 dentry = parent;
842   - while (dentry && !lockref_put_or_lock(&dentry->d_lockref))
843   - dentry = dentry_kill(dentry, 1);
  885 + while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
  886 + parent = lock_parent(dentry);
  887 + if (dentry->d_lockref.count != 1) {
  888 + dentry->d_lockref.count--;
  889 + spin_unlock(&dentry->d_lock);
  890 + if (parent)
  891 + spin_unlock(&parent->d_lock);
  892 + break;
  893 + }
  894 + inode = dentry->d_inode; /* can't be NULL */
  895 + if (unlikely(!spin_trylock(&inode->i_lock))) {
  896 + spin_unlock(&dentry->d_lock);
  897 + if (parent)
  898 + spin_unlock(&parent->d_lock);
  899 + cpu_relax();
  900 + continue;
  901 + }
  902 + __dentry_kill(dentry);
  903 + dentry = parent;
  904 + }
844 905 }
845 906 }
846 907