Commit 1466abf2d03207b42980ddf8cde1fba17b7cd301

Authored by Jeff Dike
Committed by Linus Torvalds
1 parent 512b6fb1c1

uml: clean up tlb flush path

Tidy the tlb flushing code.

With tt mode gone, there is no reason to have the capability to have
called directly from do_mmap, do_mprotect, and do_munmap, rather than
calling a function pointer that it is given.

There was a large amount of data that was passed from function to
function, being used at the lowest level, without being changed.  This
stuff is now encapsulated in a structure which is initialized at the
top layer and passed down.  This simplifies the code, reduces the
amount of code needed to pass the parameters around, and saves on
stack space.

A somewhat more subtle change is the meaning of the current operation
index.  It used to start at -1, being pre-incremented when adding an
operation.  It now starts at 0, being post-incremented, with
associated adjustments of +/- 1 on comparisons.

In addition, tlb.h contained a couple of declarations which had no
users outside of tlb.c, so they could be moved or deleted.

Signed-off-by: Jeff Dike <jdike@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 2 changed files with 127 additions and 158 deletions Side-by-side Diff

arch/um/include/tlb.h
... ... @@ -8,34 +8,7 @@
8 8  
9 9 #include "um_mmu.h"
10 10  
11   -struct host_vm_op {
12   - enum { NONE, MMAP, MUNMAP, MPROTECT } type;
13   - union {
14   - struct {
15   - unsigned long addr;
16   - unsigned long len;
17   - unsigned int prot;
18   - int fd;
19   - __u64 offset;
20   - } mmap;
21   - struct {
22   - unsigned long addr;
23   - unsigned long len;
24   - } munmap;
25   - struct {
26   - unsigned long addr;
27   - unsigned long len;
28   - unsigned int prot;
29   - } mprotect;
30   - } u;
31   -};
32   -
33 11 extern void force_flush_all(void);
34   -extern void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
35   - unsigned long end_addr, int force,
36   - int (*do_ops)(struct mm_context *,
37   - struct host_vm_op *, int, int,
38   - void **));
39 12 extern int flush_tlb_kernel_range_common(unsigned long start,
40 13 unsigned long end);
41 14  
arch/um/kernel/tlb.c
... ... @@ -12,19 +12,85 @@
12 12 #include "skas.h"
13 13 #include "tlb.h"
14 14  
  15 +struct host_vm_change {
  16 + struct host_vm_op {
  17 + enum { NONE, MMAP, MUNMAP, MPROTECT } type;
  18 + union {
  19 + struct {
  20 + unsigned long addr;
  21 + unsigned long len;
  22 + unsigned int prot;
  23 + int fd;
  24 + __u64 offset;
  25 + } mmap;
  26 + struct {
  27 + unsigned long addr;
  28 + unsigned long len;
  29 + } munmap;
  30 + struct {
  31 + unsigned long addr;
  32 + unsigned long len;
  33 + unsigned int prot;
  34 + } mprotect;
  35 + } u;
  36 + } ops[1];
  37 + int index;
  38 + struct mm_id *id;
  39 + void *data;
  40 + int force;
  41 +};
  42 +
  43 +#define INIT_HVC(mm, force) \
  44 + ((struct host_vm_change) \
  45 + { .ops = { { .type = NONE } }, \
  46 + .id = &mm->context.id, \
  47 + .data = NULL, \
  48 + .index = 0, \
  49 + .force = force })
  50 +
  51 +static int do_ops(struct host_vm_change *hvc, int end,
  52 + int finished)
  53 +{
  54 + struct host_vm_op *op;
  55 + int i, ret = 0;
  56 +
  57 + for (i = 0; i < end && !ret; i++) {
  58 + op = &hvc->ops[i];
  59 + switch(op->type) {
  60 + case MMAP:
  61 + ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
  62 + op->u.mmap.prot, op->u.mmap.fd,
  63 + op->u.mmap.offset, finished, &hvc->data);
  64 + break;
  65 + case MUNMAP:
  66 + ret = unmap(hvc->id, op->u.munmap.addr,
  67 + op->u.munmap.len, finished, &hvc->data);
  68 + break;
  69 + case MPROTECT:
  70 + ret = protect(hvc->id, op->u.mprotect.addr,
  71 + op->u.mprotect.len, op->u.mprotect.prot,
  72 + finished, &hvc->data);
  73 + break;
  74 + default:
  75 + printk(KERN_ERR "Unknown op type %d in do_ops\n",
  76 + op->type);
  77 + break;
  78 + }
  79 + }
  80 +
  81 + return ret;
  82 +}
  83 +
15 84 static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
16   - unsigned int prot, struct host_vm_op *ops, int *index,
17   - int last_filled, struct mm_context *mmu, void **flush,
18   - int (*do_ops)(struct mm_context *, struct host_vm_op *,
19   - int, int, void **))
  85 + unsigned int prot, struct host_vm_change *hvc)
20 86 {
21 87 __u64 offset;
22 88 struct host_vm_op *last;
23 89 int fd, ret = 0;
24 90  
25 91 fd = phys_mapping(phys, &offset);
26   - if (*index != -1) {
27   - last = &ops[*index];
  92 + if (hvc->index != 0) {
  93 + last = &hvc->ops[hvc->index - 1];
28 94 if ((last->type == MMAP) &&
29 95 (last->u.mmap.addr + last->u.mmap.len == virt) &&
30 96 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
31 97  
32 98  
33 99  
... ... @@ -34,33 +100,30 @@
34 100 }
35 101 }
36 102  
37   - if (*index == last_filled) {
38   - ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
39   - *index = -1;
  103 + if (hvc->index == ARRAY_SIZE(hvc->ops)) {
  104 + ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
  105 + hvc->index = 0;
40 106 }
41 107  
42   - ops[++*index] = ((struct host_vm_op) { .type = MMAP,
43   - .u = { .mmap = {
44   - .addr = virt,
45   - .len = len,
46   - .prot = prot,
47   - .fd = fd,
48   - .offset = offset }
  108 + hvc->ops[hvc->index++] = ((struct host_vm_op)
  109 + { .type = MMAP,
  110 + .u = { .mmap = { .addr = virt,
  111 + .len = len,
  112 + .prot = prot,
  113 + .fd = fd,
  114 + .offset = offset }
49 115 } });
50 116 return ret;
51 117 }
52 118  
53 119 static int add_munmap(unsigned long addr, unsigned long len,
54   - struct host_vm_op *ops, int *index, int last_filled,
55   - struct mm_context *mmu, void **flush,
56   - int (*do_ops)(struct mm_context *, struct host_vm_op *,
57   - int, int, void **))
  120 + struct host_vm_change *hvc)
58 121 {
59 122 struct host_vm_op *last;
60 123 int ret = 0;
61 124  
62   - if (*index != -1) {
63   - last = &ops[*index];
  125 + if (hvc->index != 0) {
  126 + last = &hvc->ops[hvc->index - 1];
64 127 if ((last->type == MUNMAP) &&
65 128 (last->u.munmap.addr + last->u.mmap.len == addr)) {
66 129 last->u.munmap.len += len;
67 130  
68 131  
69 132  
... ... @@ -68,29 +131,26 @@
68 131 }
69 132 }
70 133  
71   - if (*index == last_filled) {
72   - ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
73   - *index = -1;
  134 + if (hvc->index == ARRAY_SIZE(hvc->ops)) {
  135 + ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
  136 + hvc->index = 0;
74 137 }
75 138  
76   - ops[++*index] = ((struct host_vm_op) { .type = MUNMAP,
77   - .u = { .munmap = {
78   - .addr = addr,
79   - .len = len } } });
  139 + hvc->ops[hvc->index++] = ((struct host_vm_op)
  140 + { .type = MUNMAP,
  141 + .u = { .munmap = { .addr = addr,
  142 + .len = len } } });
80 143 return ret;
81 144 }
82 145  
83 146 static int add_mprotect(unsigned long addr, unsigned long len,
84   - unsigned int prot, struct host_vm_op *ops, int *index,
85   - int last_filled, struct mm_context *mmu, void **flush,
86   - int (*do_ops)(struct mm_context *, struct host_vm_op *,
87   - int, int, void **))
  147 + unsigned int prot, struct host_vm_change *hvc)
88 148 {
89 149 struct host_vm_op *last;
90 150 int ret = 0;
91 151  
92   - if (*index != -1) {
93   - last = &ops[*index];
  152 + if (hvc->index != 0) {
  153 + last = &hvc->ops[hvc->index - 1];
94 154 if ((last->type == MPROTECT) &&
95 155 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
96 156 (last->u.mprotect.prot == prot)) {
97 157  
98 158  
... ... @@ -99,28 +159,24 @@
99 159 }
100 160 }
101 161  
102   - if (*index == last_filled) {
103   - ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
104   - *index = -1;
  162 + if (hvc->index == ARRAY_SIZE(hvc->ops)) {
  163 + ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
  164 + hvc->index = 0;
105 165 }
106 166  
107   - ops[++*index] = ((struct host_vm_op) { .type = MPROTECT,
108   - .u = { .mprotect = {
109   - .addr = addr,
110   - .len = len,
111   - .prot = prot } } });
  167 + hvc->ops[hvc->index++] = ((struct host_vm_op)
  168 + { .type = MPROTECT,
  169 + .u = { .mprotect = { .addr = addr,
  170 + .len = len,
  171 + .prot = prot } } });
112 172 return ret;
113 173 }
114 174  
115 175 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
116 176  
117 177 static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
118   - unsigned long end, struct host_vm_op *ops,
119   - int last_op, int *op_index, int force,
120   - struct mm_context *mmu, void **flush,
121   - int (*do_ops)(struct mm_context *,
122   - struct host_vm_op *, int, int,
123   - void **))
  178 + unsigned long end,
  179 + struct host_vm_change *hvc)
124 180 {
125 181 pte_t *pte;
126 182 int r, w, x, prot, ret = 0;
127 183  
128 184  
129 185  
... ... @@ -138,29 +194,22 @@
138 194 }
139 195 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
140 196 (x ? UM_PROT_EXEC : 0));
141   - if (force || pte_newpage(*pte)) {
  197 + if (hvc->force || pte_newpage(*pte)) {
142 198 if (pte_present(*pte))
143 199 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
144   - PAGE_SIZE, prot, ops, op_index,
145   - last_op, mmu, flush, do_ops);
146   - else ret = add_munmap(addr, PAGE_SIZE, ops, op_index,
147   - last_op, mmu, flush, do_ops);
  200 + PAGE_SIZE, prot, hvc);
  201 + else ret = add_munmap(addr, PAGE_SIZE, hvc);
148 202 }
149 203 else if (pte_newprot(*pte))
150   - ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index,
151   - last_op, mmu, flush, do_ops);
  204 + ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
152 205 *pte = pte_mkuptodate(*pte);
153 206 } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
154 207 return ret;
155 208 }
156 209  
157 210 static inline int update_pmd_range(pud_t *pud, unsigned long addr,
158   - unsigned long end, struct host_vm_op *ops,
159   - int last_op, int *op_index, int force,
160   - struct mm_context *mmu, void **flush,
161   - int (*do_ops)(struct mm_context *,
162   - struct host_vm_op *, int, int,
163   - void **))
  211 + unsigned long end,
  212 + struct host_vm_change *hvc)
164 213 {
165 214 pmd_t *pmd;
166 215 unsigned long next;
167 216  
168 217  
... ... @@ -170,27 +219,19 @@
170 219 do {
171 220 next = pmd_addr_end(addr, end);
172 221 if (!pmd_present(*pmd)) {
173   - if (force || pmd_newpage(*pmd)) {
174   - ret = add_munmap(addr, next - addr, ops,
175   - op_index, last_op, mmu,
176   - flush, do_ops);
  222 + if (hvc->force || pmd_newpage(*pmd)) {
  223 + ret = add_munmap(addr, next - addr, hvc);
177 224 pmd_mkuptodate(*pmd);
178 225 }
179 226 }
180   - else ret = update_pte_range(pmd, addr, next, ops, last_op,
181   - op_index, force, mmu, flush,
182   - do_ops);
  227 + else ret = update_pte_range(pmd, addr, next, hvc);
183 228 } while (pmd++, addr = next, ((addr != end) && !ret));
184 229 return ret;
185 230 }
186 231  
187 232 static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
188   - unsigned long end, struct host_vm_op *ops,
189   - int last_op, int *op_index, int force,
190   - struct mm_context *mmu, void **flush,
191   - int (*do_ops)(struct mm_context *,
192   - struct host_vm_op *, int, int,
193   - void **))
  233 + unsigned long end,
  234 + struct host_vm_change *hvc)
194 235 {
195 236 pud_t *pud;
196 237 unsigned long next;
197 238  
198 239  
199 240  
200 241  
201 242  
202 243  
203 244  
204 245  
... ... @@ -200,51 +241,39 @@
200 241 do {
201 242 next = pud_addr_end(addr, end);
202 243 if (!pud_present(*pud)) {
203   - if (force || pud_newpage(*pud)) {
204   - ret = add_munmap(addr, next - addr, ops,
205   - op_index, last_op, mmu,
206   - flush, do_ops);
  244 + if (hvc->force || pud_newpage(*pud)) {
  245 + ret = add_munmap(addr, next - addr, hvc);
207 246 pud_mkuptodate(*pud);
208 247 }
209 248 }
210   - else ret = update_pmd_range(pud, addr, next, ops, last_op,
211   - op_index, force, mmu, flush,
212   - do_ops);
  249 + else ret = update_pmd_range(pud, addr, next, hvc);
213 250 } while (pud++, addr = next, ((addr != end) && !ret));
214 251 return ret;
215 252 }
216 253  
217 254 void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
218   - unsigned long end_addr, int force,
219   - int (*do_ops)(struct mm_context *, struct host_vm_op *,
220   - int, int, void **))
  255 + unsigned long end_addr, int force)
221 256 {
222 257 pgd_t *pgd;
223   - struct mm_context *mmu = &mm->context;
224   - struct host_vm_op ops[1];
  258 + struct host_vm_change hvc;
225 259 unsigned long addr = start_addr, next;
226   - int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1;
227   - void *flush = NULL;
  260 + int ret = 0;
228 261  
229   - ops[0].type = NONE;
  262 + hvc = INIT_HVC(mm, force);
230 263 pgd = pgd_offset(mm, addr);
231 264 do {
232 265 next = pgd_addr_end(addr, end_addr);
233 266 if (!pgd_present(*pgd)) {
234 267 if (force || pgd_newpage(*pgd)) {
235   - ret = add_munmap(addr, next - addr, ops,
236   - &op_index, last_op, mmu,
237   - &flush, do_ops);
  268 + ret = add_munmap(addr, next - addr, &hvc);
238 269 pgd_mkuptodate(*pgd);
239 270 }
240 271 }
241   - else ret = update_pud_range(pgd, addr, next, ops, last_op,
242   - &op_index, force, mmu, &flush,
243   - do_ops);
  272 + else ret = update_pud_range(pgd, addr, next, &hvc);
244 273 } while (pgd++, addr = next, ((addr != end_addr) && !ret));
245 274  
246 275 if (!ret)
247   - ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
  276 + ret = do_ops(&hvc, hvc.index, 1);
248 277  
249 278 /* This is not an else because ret is modified above */
250 279 if (ret) {
251 280  
... ... @@ -453,46 +482,13 @@
453 482 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
454 483 }
455 484  
456   -static int do_ops(struct mm_context *mmu, struct host_vm_op *ops, int last,
457   - int finished, void **flush)
458   -{
459   - struct host_vm_op *op;
460   - int i, ret = 0;
461   -
462   - for (i = 0; i <= last && !ret; i++) {
463   - op = &ops[i];
464   - switch(op->type) {
465   - case MMAP:
466   - ret = map(&mmu->id, op->u.mmap.addr, op->u.mmap.len,
467   - op->u.mmap.prot, op->u.mmap.fd,
468   - op->u.mmap.offset, finished, flush);
469   - break;
470   - case MUNMAP:
471   - ret = unmap(&mmu->id, op->u.munmap.addr,
472   - op->u.munmap.len, finished, flush);
473   - break;
474   - case MPROTECT:
475   - ret = protect(&mmu->id, op->u.mprotect.addr,
476   - op->u.mprotect.len, op->u.mprotect.prot,
477   - finished, flush);
478   - break;
479   - default:
480   - printk(KERN_ERR "Unknown op type %d in do_ops\n",
481   - op->type);
482   - break;
483   - }
484   - }
485   -
486   - return ret;
487   -}
488   -
489 485 static void fix_range(struct mm_struct *mm, unsigned long start_addr,
490 486 unsigned long end_addr, int force)
491 487 {
492 488 if (!proc_mm && (end_addr > CONFIG_STUB_START))
493 489 end_addr = CONFIG_STUB_START;
494 490  
495   - fix_range_common(mm, start_addr, end_addr, force, do_ops);
  491 + fix_range_common(mm, start_addr, end_addr, force);
496 492 }
497 493  
498 494 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,