Blame view
mm/mmu_gather.c
6.69 KB
196d9d8bb mm/memory: Move m... |
1 2 3 4 5 6 7 8 9 10 11 12 |
#include <linux/gfp.h> #include <linux/highmem.h> #include <linux/kernel.h> #include <linux/mmdebug.h> #include <linux/mm_types.h> #include <linux/pagemap.h> #include <linux/rcupdate.h> #include <linux/smp.h> #include <linux/swap.h> #include <asm/pgalloc.h> #include <asm/tlb.h> |
952a31c9e asm-generic/tlb: ... |
13 |
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER |
196d9d8bb mm/memory: Move m... |
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
static bool tlb_next_batch(struct mmu_gather *tlb) { struct mmu_gather_batch *batch; batch = tlb->active; if (batch->next) { tlb->active = batch->next; return true; } if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) return false; batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); if (!batch) return false; tlb->batch_count++; batch->next = NULL; batch->nr = 0; batch->max = MAX_GATHER_BATCH; tlb->active->next = batch; tlb->active = batch; return true; } |
952a31c9e asm-generic/tlb: ... |
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
static void tlb_batch_pages_flush(struct mmu_gather *tlb) { struct mmu_gather_batch *batch; for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { free_pages_and_swap_cache(batch->pages, batch->nr); batch->nr = 0; } tlb->active = &tlb->local; } static void tlb_batch_list_free(struct mmu_gather *tlb) { struct mmu_gather_batch *batch, *next; for (batch = tlb->local.next; batch; batch = next) { next = batch->next; free_pages((unsigned long)batch, 0); } tlb->local.next = NULL; } bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { struct mmu_gather_batch *batch; VM_BUG_ON(!tlb->end); #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE VM_WARN_ON(tlb->page_size != page_size); #endif batch = tlb->active; /* * Add the page and check if we are full. If so * force a flush. */ batch->pages[batch->nr++] = page; if (batch->nr == batch->max) { if (!tlb_next_batch(tlb)) return true; batch = tlb->active; } VM_BUG_ON_PAGE(batch->nr > batch->max, page); return false; } #endif /* HAVE_MMU_GATHER_NO_GATHER */ |
196d9d8bb mm/memory: Move m... |
90 91 92 93 94 95 96 97 98 99 100 |
#ifdef CONFIG_HAVE_RCU_TABLE_FREE /* * See the comment near struct mmu_table_batch. */ /* * If we want tlb_remove_table() to imply TLB invalidates. */ static inline void tlb_table_invalidate(struct mmu_gather *tlb) { |
806cabd31 mm/mmu_gather: in... |
101 102 103 104 105 106 107 108 |
if (tlb_needs_table_invalidate()) { /* * Invalidate page-table caches used by hardware walkers. Then * we still need to RCU-sched wait while freeing the pages * because software walkers can still be in-flight. */ tlb_flush_mmu_tlbonly(tlb); } |
196d9d8bb mm/memory: Move m... |
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
} static void tlb_remove_table_smp_sync(void *arg) { /* Simply deliver the interrupt */ } static void tlb_remove_table_one(void *table) { /* * This isn't an RCU grace period and hence the page-tables cannot be * assumed to be actually RCU-freed. * * It is however sufficient for software page-table walkers that rely on * IRQ disabling. See the comment near struct mmu_table_batch. */ smp_call_function(tlb_remove_table_smp_sync, NULL, 1); __tlb_remove_table(table); } static void tlb_remove_table_rcu(struct rcu_head *head) { struct mmu_table_batch *batch; int i; batch = container_of(head, struct mmu_table_batch, rcu); for (i = 0; i < batch->nr; i++) __tlb_remove_table(batch->tables[i]); free_page((unsigned long)batch); } |
0a8caf211 asm-generic/tlb: ... |
141 |
static void tlb_table_flush(struct mmu_gather *tlb) |
196d9d8bb mm/memory: Move m... |
142 143 144 145 146 |
{ struct mmu_table_batch **batch = &tlb->batch; if (*batch) { tlb_table_invalidate(tlb); |
b401ec184 mm: Replace call_... |
147 |
call_rcu(&(*batch)->rcu, tlb_remove_table_rcu); |
196d9d8bb mm/memory: Move m... |
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 |
*batch = NULL; } } void tlb_remove_table(struct mmu_gather *tlb, void *table) { struct mmu_table_batch **batch = &tlb->batch; if (*batch == NULL) { *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); if (*batch == NULL) { tlb_table_invalidate(tlb); tlb_remove_table_one(table); return; } (*batch)->nr = 0; } (*batch)->tables[(*batch)->nr++] = table; if ((*batch)->nr == MAX_TABLE_BATCH) tlb_table_flush(tlb); } #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ |
0a8caf211 asm-generic/tlb: ... |
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 |
static void tlb_flush_mmu_free(struct mmu_gather *tlb) { #ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb_table_flush(tlb); #endif #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER tlb_batch_pages_flush(tlb); #endif } void tlb_flush_mmu(struct mmu_gather *tlb) { tlb_flush_mmu_tlbonly(tlb); tlb_flush_mmu_free(tlb); } |
196d9d8bb mm/memory: Move m... |
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 |
/** * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down * @tlb: the mmu_gather structure to initialize * @mm: the mm_struct of the target address space * @start: start of the region that will be removed from the page-table * @end: end of the region that will be removed from the page-table * * Called to initialize an (on-stack) mmu_gather structure for page-table * tear-down from @mm. The @start and @end are set to 0 and -1 * respectively when @mm is without users and we're going to destroy * the full address space (exit/execve). */ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { |
1808d65b5 asm-generic/tlb: ... |
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 |
tlb->mm = mm; /* Is it from 0 to ~0? */ tlb->fullmm = !(start | (end+1)); #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER tlb->need_flush_all = 0; tlb->local.next = NULL; tlb->local.nr = 0; tlb->local.max = ARRAY_SIZE(tlb->__pages); tlb->active = &tlb->local; tlb->batch_count = 0; #endif #ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb->batch = NULL; #endif #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE tlb->page_size = 0; #endif __tlb_reset_range(tlb); |
196d9d8bb mm/memory: Move m... |
224 225 |
inc_tlb_flush_pending(tlb->mm); } |
1808d65b5 asm-generic/tlb: ... |
226 227 228 229 230 231 232 233 234 |
/** * tlb_finish_mmu - finish an mmu_gather structure * @tlb: the mmu_gather structure to finish * @start: start of the region that will be removed from the page-table * @end: end of the region that will be removed from the page-table * * Called at the end of the shootdown operation to free up any resources that * were required. */ |
196d9d8bb mm/memory: Move m... |
235 236 237 238 239 |
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { /* * If there are parallel threads are doing PTE changes on same range |
7a30df49f mm: mmu_gather: r... |
240 241 242 243 244 245 246 247 248 |
* under non-exclusive lock (e.g., mmap_sem read-side) but defer TLB * flush by batching, one thread may end up seeing inconsistent PTEs * and result in having stale TLB entries. So flush TLB forcefully * if we detect parallel PTE batching threads. * * However, some syscalls, e.g. munmap(), may free page tables, this * needs force flush everything in the given range. Otherwise this * may result in having stale TLB entries for some architectures, * e.g. aarch64, that could specify flush what level TLB. |
196d9d8bb mm/memory: Move m... |
249 |
*/ |
1808d65b5 asm-generic/tlb: ... |
250 |
if (mm_tlb_flush_nested(tlb->mm)) { |
7a30df49f mm: mmu_gather: r... |
251 252 253 254 255 256 257 258 259 |
/* * The aarch64 yields better performance with fullmm by * avoiding multiple CPUs spamming TLBI messages at the * same time. * * On x86 non-fullmm doesn't yield significant difference * against fullmm. */ tlb->fullmm = 1; |
1808d65b5 asm-generic/tlb: ... |
260 |
__tlb_reset_range(tlb); |
7a30df49f mm: mmu_gather: r... |
261 |
tlb->freed_tables = 1; |
1808d65b5 asm-generic/tlb: ... |
262 |
} |
196d9d8bb mm/memory: Move m... |
263 |
|
1808d65b5 asm-generic/tlb: ... |
264 |
tlb_flush_mmu(tlb); |
1808d65b5 asm-generic/tlb: ... |
265 266 267 |
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER tlb_batch_list_free(tlb); #endif |
196d9d8bb mm/memory: Move m... |
268 269 |
dec_tlb_flush_pending(tlb->mm); } |