Commit f21760b15dcd091e5afd38d0b97197b45f7ef2ea

Authored by Shaohua Li
Committed by Linus Torvalds
1 parent e5591307f0

thp: add tlb_remove_pmd_tlb_entry

We have tlb_remove_tlb_entry to indicate a pte tlb flush entry should be
flushed, but not a corresponding API for pmd entry.  This isn't a
problem so far because THP is only for x86 currently and tlb_flush()
under x86 will flush entire TLB.  But this is confusion and could be
missed if thp is ported to other arch.

Also convert tlb->need_flush = 1 to a VM_BUG_ON(!tlb->need_flush) in
__tlb_remove_page() as suggested by Andrea Arcangeli.  The
__tlb_remove_page() function is supposed to be called after
tlb_remove_xxx_tlb_entry() and we can catch any misuse.

Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Reviewed-by: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 4 changed files with 19 additions and 4 deletions Side-by-side Diff

include/asm-generic/tlb.h
... ... @@ -139,6 +139,20 @@
139 139 __tlb_remove_tlb_entry(tlb, ptep, address); \
140 140 } while (0)
141 141  
  142 +/**
  143 + * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
  144 + * This is a nop so far, because only x86 needs it.
  145 + */
  146 +#ifndef __tlb_remove_pmd_tlb_entry
  147 +#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
  148 +#endif
  149 +
  150 +#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
  151 + do { \
  152 + tlb->need_flush = 1; \
  153 + __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
  154 + } while (0)
  155 +
142 156 #define pte_free_tlb(tlb, ptep, address) \
143 157 do { \
144 158 tlb->need_flush = 1; \
include/linux/huge_mm.h
... ... @@ -18,7 +18,7 @@
18 18 unsigned int flags);
19 19 extern int zap_huge_pmd(struct mmu_gather *tlb,
20 20 struct vm_area_struct *vma,
21   - pmd_t *pmd);
  21 + pmd_t *pmd, unsigned long addr);
22 22 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
23 23 unsigned long addr, unsigned long end,
24 24 unsigned char *vec);
... ... @@ -1026,7 +1026,7 @@
1026 1026 }
1027 1027  
1028 1028 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1029   - pmd_t *pmd)
  1029 + pmd_t *pmd, unsigned long addr)
1030 1030 {
1031 1031 int ret = 0;
1032 1032  
... ... @@ -1042,6 +1042,7 @@
1042 1042 pgtable = get_pmd_huge_pte(tlb->mm);
1043 1043 page = pmd_page(*pmd);
1044 1044 pmd_clear(pmd);
  1045 + tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1045 1046 page_remove_rmap(page);
1046 1047 VM_BUG_ON(page_mapcount(page) < 0);
1047 1048 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
... ... @@ -293,7 +293,7 @@
293 293 {
294 294 struct mmu_gather_batch *batch;
295 295  
296   - tlb->need_flush = 1;
  296 + VM_BUG_ON(!tlb->need_flush);
297 297  
298 298 if (tlb_fast_mode(tlb)) {
299 299 free_page_and_swap_cache(page);
... ... @@ -1231,7 +1231,7 @@
1231 1231 if (next-addr != HPAGE_PMD_SIZE) {
1232 1232 VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
1233 1233 split_huge_page_pmd(vma->vm_mm, pmd);
1234   - } else if (zap_huge_pmd(tlb, vma, pmd))
  1234 + } else if (zap_huge_pmd(tlb, vma, pmd, addr))
1235 1235 continue;
1236 1236 /* fall through */
1237 1237 }