Commit 1363c3cd8603a913a27e2995dccbd70d5312d8e6

Authored by Wolfgang Wander
Committed by Linus Torvalds
1 parent e7c8d5c995

[PATCH] Avoiding mmap fragmentation

Ingo recently introduced a great speedup for allocating new mmaps using the
free_area_cache pointer which boosts the specweb SSL benchmark by 4-5% and
causes huge performance increases in thread creation.

The downside of this patch is that it does lead to fragmentation in the
mmap-ed areas (visible via /proc/self/maps), such that some applications
that work fine under 2.4 kernels quickly run out of memory on any 2.6
kernel.

The problem is twofold:

  1) the free_area_cache is used to continue a search for memory where
     the last search ended.  Before the change new areas were always
     searched from the base address on.

     So now new small areas are cluttering holes of all sizes
     throughout the whole mmap-able region whereas before small holes
     tended to close holes near the base leaving holes far from the base
     large and available for larger requests.

  2) the free_area_cache also is set to the location of the last
     munmap-ed area so in scenarios where we allocate e.g.  five regions of
     1K each, then free regions 4 2 3 in this order the next request for 1K
     will be placed in the position of the old region 3, whereas before we
     appended it to the still active region 1, placing it at the location
     of the old region 2.  Before we had 1 free region of 2K, now we only
     get two free regions of 1K -> fragmentation.

The patch addresses thes issues by introducing yet another cache descriptor
cached_hole_size that contains the largest known hole size below the
current free_area_cache.  If a new request comes in the size is compared
against the cached_hole_size and if the request can be filled with a hole
below free_area_cache the search is started from the base instead.

The results look promising: Whereas 2.6.12-rc4 fragments quickly and my
(earlier posted) leakme.c test program terminates after 50000+ iterations
with 96 distinct and fragmented maps in /proc/self/maps it performs nicely
(as expected) with thread creation, Ingo's test_str02 with 20000 threads
requires 0.7s system time.

Taking out Ingo's patch (un-patch available per request) by basically
deleting all mentions of free_area_cache from the kernel and starting the
search for new memory always at the respective bases we observe: leakme
terminates successfully with 11 distinctive hardly fragmented areas in
/proc/self/maps but thread creating is gringdingly slow: 30+s(!) system
time for Ingo's test_str02 with 20000 threads.

Now - drumroll ;-) the appended patch works fine with leakme: it ends with
only 7 distinct areas in /proc/self/maps and also thread creation seems
sufficiently fast with 0.71s for 20000 threads.

Signed-off-by: Wolfgang Wander <wwc@rentec.com>
Credit-to: "Richard Purdie" <rpurdie@rpsys.net>
Signed-off-by: Ken Chen <kenneth.w.chen@intel.com>
Acked-by: Ingo Molnar <mingo@elte.hu> (partly)
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 14 changed files with 147 additions and 30 deletions Side-by-side Diff

... ... @@ -73,7 +73,12 @@
73 73 (!vma || addr + len <= vma->vm_start))
74 74 return addr;
75 75 }
76   - start_addr = addr = mm->free_area_cache;
  76 + if (len > mm->cached_hole_size) {
  77 + start_addr = addr = mm->free_area_cache;
  78 + } else {
  79 + start_addr = addr = TASK_UNMAPPED_BASE;
  80 + mm->cached_hole_size = 0;
  81 + }
77 82  
78 83 full_search:
79 84 if (do_align)
... ... @@ -90,6 +95,7 @@
90 95 */
91 96 if (start_addr != TASK_UNMAPPED_BASE) {
92 97 start_addr = addr = TASK_UNMAPPED_BASE;
  98 + mm->cached_hole_size = 0;
93 99 goto full_search;
94 100 }
95 101 return -ENOMEM;
... ... @@ -101,6 +107,8 @@
101 107 mm->free_area_cache = addr + len;
102 108 return addr;
103 109 }
  110 + if (addr + mm->cached_hole_size < vma->vm_start)
  111 + mm->cached_hole_size = vma->vm_start - addr;
104 112 addr = vma->vm_end;
105 113 if (do_align)
106 114 addr = COLOUR_ALIGN(addr, pgoff);
arch/i386/mm/hugetlbpage.c
... ... @@ -140,7 +140,12 @@
140 140 struct vm_area_struct *vma;
141 141 unsigned long start_addr;
142 142  
143   - start_addr = mm->free_area_cache;
  143 + if (len > mm->cached_hole_size) {
  144 + start_addr = mm->free_area_cache;
  145 + } else {
  146 + start_addr = TASK_UNMAPPED_BASE;
  147 + mm->cached_hole_size = 0;
  148 + }
144 149  
145 150 full_search:
146 151 addr = ALIGN(start_addr, HPAGE_SIZE);
... ... @@ -154,6 +159,7 @@
154 159 */
155 160 if (start_addr != TASK_UNMAPPED_BASE) {
156 161 start_addr = TASK_UNMAPPED_BASE;
  162 + mm->cached_hole_size = 0;
157 163 goto full_search;
158 164 }
159 165 return -ENOMEM;
... ... @@ -162,6 +168,8 @@
162 168 mm->free_area_cache = addr + len;
163 169 return addr;
164 170 }
  171 + if (addr + mm->cached_hole_size < vma->vm_start)
  172 + mm->cached_hole_size = vma->vm_start - addr;
165 173 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
166 174 }
167 175 }
168 176  
... ... @@ -173,12 +181,17 @@
173 181 struct mm_struct *mm = current->mm;
174 182 struct vm_area_struct *vma, *prev_vma;
175 183 unsigned long base = mm->mmap_base, addr = addr0;
  184 + unsigned long largest_hole = mm->cached_hole_size;
176 185 int first_time = 1;
177 186  
178 187 /* don't allow allocations above current base */
179 188 if (mm->free_area_cache > base)
180 189 mm->free_area_cache = base;
181 190  
  191 + if (len <= largest_hole) {
  192 + largest_hole = 0;
  193 + mm->free_area_cache = base;
  194 + }
182 195 try_again:
183 196 /* make sure it can fit in the remaining address space */
184 197 if (mm->free_area_cache < len)
185 198  
186 199  
187 200  
188 201  
... ... @@ -199,14 +212,22 @@
199 212 * vma->vm_start, use it:
200 213 */
201 214 if (addr + len <= vma->vm_start &&
202   - (!prev_vma || (addr >= prev_vma->vm_end)))
  215 + (!prev_vma || (addr >= prev_vma->vm_end))) {
203 216 /* remember the address as a hint for next time */
204   - return (mm->free_area_cache = addr);
205   - else
  217 + mm->cached_hole_size = largest_hole;
  218 + return (mm->free_area_cache = addr);
  219 + } else {
206 220 /* pull free_area_cache down to the first hole */
207   - if (mm->free_area_cache == vma->vm_end)
  221 + if (mm->free_area_cache == vma->vm_end) {
208 222 mm->free_area_cache = vma->vm_start;
  223 + mm->cached_hole_size = largest_hole;
  224 + }
  225 + }
209 226  
  227 + /* remember the largest hole we saw so far */
  228 + if (addr + largest_hole < vma->vm_start)
  229 + largest_hole = vma->vm_start - addr;
  230 +
210 231 /* try just below the current vma->vm_start */
211 232 addr = (vma->vm_start - len) & HPAGE_MASK;
212 233 } while (len <= vma->vm_start);
... ... @@ -218,6 +239,7 @@
218 239 */
219 240 if (first_time) {
220 241 mm->free_area_cache = base;
  242 + largest_hole = 0;
221 243 first_time = 0;
222 244 goto try_again;
223 245 }
... ... @@ -228,6 +250,7 @@
228 250 * allocations.
229 251 */
230 252 mm->free_area_cache = TASK_UNMAPPED_BASE;
  253 + mm->cached_hole_size = ~0UL;
231 254 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
232 255 len, pgoff, flags);
233 256  
... ... @@ -235,6 +258,7 @@
235 258 * Restore the topdown base:
236 259 */
237 260 mm->free_area_cache = base;
  261 + mm->cached_hole_size = ~0UL;
238 262  
239 263 return addr;
240 264 }
arch/ppc64/mm/hugetlbpage.c
... ... @@ -292,7 +292,12 @@
292 292 && !is_hugepage_only_range(mm, addr,len))
293 293 return addr;
294 294 }
295   - start_addr = addr = mm->free_area_cache;
  295 + if (len > mm->cached_hole_size) {
  296 + start_addr = addr = mm->free_area_cache;
  297 + } else {
  298 + start_addr = addr = TASK_UNMAPPED_BASE;
  299 + mm->cached_hole_size = 0;
  300 + }
296 301  
297 302 full_search:
298 303 vma = find_vma(mm, addr);
... ... @@ -316,6 +321,8 @@
316 321 mm->free_area_cache = addr + len;
317 322 return addr;
318 323 }
  324 + if (addr + mm->cached_hole_size < vma->vm_start)
  325 + mm->cached_hole_size = vma->vm_start - addr;
319 326 addr = vma->vm_end;
320 327 vma = vma->vm_next;
321 328 }
... ... @@ -323,6 +330,7 @@
323 330 /* Make sure we didn't miss any holes */
324 331 if (start_addr != TASK_UNMAPPED_BASE) {
325 332 start_addr = addr = TASK_UNMAPPED_BASE;
  333 + mm->cached_hole_size = 0;
326 334 goto full_search;
327 335 }
328 336 return -ENOMEM;
... ... @@ -344,6 +352,7 @@
344 352 struct vm_area_struct *vma, *prev_vma;
345 353 struct mm_struct *mm = current->mm;
346 354 unsigned long base = mm->mmap_base, addr = addr0;
  355 + unsigned long largest_hole = mm->cached_hole_size;
347 356 int first_time = 1;
348 357  
349 358 /* requested length too big for entire address space */
... ... @@ -364,6 +373,10 @@
364 373 return addr;
365 374 }
366 375  
  376 + if (len <= largest_hole) {
  377 + largest_hole = 0;
  378 + mm->free_area_cache = base;
  379 + }
367 380 try_again:
368 381 /* make sure it can fit in the remaining address space */
369 382 if (mm->free_area_cache < len)
370 383  
371 384  
372 385  
373 386  
... ... @@ -392,14 +405,22 @@
392 405 * vma->vm_start, use it:
393 406 */
394 407 if (addr+len <= vma->vm_start &&
395   - (!prev_vma || (addr >= prev_vma->vm_end)))
  408 + (!prev_vma || (addr >= prev_vma->vm_end))) {
396 409 /* remember the address as a hint for next time */
397   - return (mm->free_area_cache = addr);
398   - else
  410 + mm->cached_hole_size = largest_hole;
  411 + return (mm->free_area_cache = addr);
  412 + } else {
399 413 /* pull free_area_cache down to the first hole */
400   - if (mm->free_area_cache == vma->vm_end)
  414 + if (mm->free_area_cache == vma->vm_end) {
401 415 mm->free_area_cache = vma->vm_start;
  416 + mm->cached_hole_size = largest_hole;
  417 + }
  418 + }
402 419  
  420 + /* remember the largest hole we saw so far */
  421 + if (addr + largest_hole < vma->vm_start)
  422 + largest_hole = vma->vm_start - addr;
  423 +
403 424 /* try just below the current vma->vm_start */
404 425 addr = vma->vm_start-len;
405 426 } while (len <= vma->vm_start);
... ... @@ -411,6 +432,7 @@
411 432 */
412 433 if (first_time) {
413 434 mm->free_area_cache = base;
  435 + largest_hole = 0;
414 436 first_time = 0;
415 437 goto try_again;
416 438 }
417 439  
... ... @@ -421,11 +443,13 @@
421 443 * allocations.
422 444 */
423 445 mm->free_area_cache = TASK_UNMAPPED_BASE;
  446 + mm->cached_hole_size = ~0UL;
424 447 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
425 448 /*
426 449 * Restore the topdown base:
427 450 */
428 451 mm->free_area_cache = base;
  452 + mm->cached_hole_size = ~0UL;
429 453  
430 454 return addr;
431 455 }
arch/sh/kernel/sys_sh.c
... ... @@ -79,6 +79,10 @@
79 79 (!vma || addr + len <= vma->vm_start))
80 80 return addr;
81 81 }
  82 + if (len <= mm->cached_hole_size) {
  83 + mm->cached_hole_size = 0;
  84 + mm->free_area_cache = TASK_UNMAPPED_BASE;
  85 + }
82 86 if (flags & MAP_PRIVATE)
83 87 addr = PAGE_ALIGN(mm->free_area_cache);
84 88 else
... ... @@ -95,6 +99,7 @@
95 99 */
96 100 if (start_addr != TASK_UNMAPPED_BASE) {
97 101 start_addr = addr = TASK_UNMAPPED_BASE;
  102 + mm->cached_hole_size = 0;
98 103 goto full_search;
99 104 }
100 105 return -ENOMEM;
... ... @@ -106,6 +111,9 @@
106 111 mm->free_area_cache = addr + len;
107 112 return addr;
108 113 }
  114 + if (addr + mm->cached_hole_size < vma->vm_start)
  115 + mm->cached_hole_size = vma->vm_start - addr;
  116 +
109 117 addr = vma->vm_end;
110 118 if (!(flags & MAP_PRIVATE))
111 119 addr = COLOUR_ALIGN(addr);
arch/sparc64/kernel/sys_sparc.c
... ... @@ -84,6 +84,10 @@
84 84 return addr;
85 85 }
86 86  
  87 + if (len <= mm->cached_hole_size) {
  88 + mm->cached_hole_size = 0;
  89 + mm->free_area_cache = TASK_UNMAPPED_BASE;
  90 + }
87 91 start_addr = addr = mm->free_area_cache;
88 92  
89 93 task_size -= len;
... ... @@ -103,6 +107,7 @@
103 107 if (task_size < addr) {
104 108 if (start_addr != TASK_UNMAPPED_BASE) {
105 109 start_addr = addr = TASK_UNMAPPED_BASE;
  110 + mm->cached_hole_size = 0;
106 111 goto full_search;
107 112 }
108 113 return -ENOMEM;
... ... @@ -114,6 +119,9 @@
114 119 mm->free_area_cache = addr + len;
115 120 return addr;
116 121 }
  122 + if (addr + mm->cached_hole_size < vma->vm_start)
  123 + mm->cached_hole_size = vma->vm_start - addr;
  124 +
117 125 addr = vma->vm_end;
118 126 if (do_color_align)
119 127 addr = COLOUR_ALIGN(addr, pgoff);
arch/x86_64/ia32/ia32_aout.c
... ... @@ -312,6 +312,7 @@
312 312 current->mm->brk = ex.a_bss +
313 313 (current->mm->start_brk = N_BSSADDR(ex));
314 314 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
  315 + current->mm->cached_hole_size = 0;
315 316  
316 317 set_mm_counter(current->mm, rss, 0);
317 318 current->mm->mmap = NULL;
arch/x86_64/kernel/sys_x86_64.c
... ... @@ -105,6 +105,11 @@
105 105 (!vma || addr + len <= vma->vm_start))
106 106 return addr;
107 107 }
  108 + if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
  109 + && len <= mm->cached_hole_size) {
  110 + mm->cached_hole_size = 0;
  111 + mm->free_area_cache = begin;
  112 + }
108 113 addr = mm->free_area_cache;
109 114 if (addr < begin)
110 115 addr = begin;
... ... @@ -120,6 +125,7 @@
120 125 */
121 126 if (start_addr != begin) {
122 127 start_addr = addr = begin;
  128 + mm->cached_hole_size = 0;
123 129 goto full_search;
124 130 }
125 131 return -ENOMEM;
... ... @@ -131,6 +137,9 @@
131 137 mm->free_area_cache = addr + len;
132 138 return addr;
133 139 }
  140 + if (addr + mm->cached_hole_size < vma->vm_start)
  141 + mm->cached_hole_size = vma->vm_start - addr;
  142 +
134 143 addr = vma->vm_end;
135 144 }
136 145 }
... ... @@ -316,6 +316,7 @@
316 316 current->mm->brk = ex.a_bss +
317 317 (current->mm->start_brk = N_BSSADDR(ex));
318 318 current->mm->free_area_cache = current->mm->mmap_base;
  319 + current->mm->cached_hole_size = 0;
319 320  
320 321 set_mm_counter(current->mm, rss, 0);
321 322 current->mm->mmap = NULL;
... ... @@ -775,6 +775,7 @@
775 775 change some of these later */
776 776 set_mm_counter(current->mm, rss, 0);
777 777 current->mm->free_area_cache = current->mm->mmap_base;
  778 + current->mm->cached_hole_size = 0;
778 779 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
779 780 executable_stack);
780 781 if (retval < 0) {
fs/hugetlbfs/inode.c
... ... @@ -122,6 +122,9 @@
122 122  
123 123 start_addr = mm->free_area_cache;
124 124  
  125 + if (len <= mm->cached_hole_size)
  126 + start_addr = TASK_UNMAPPED_BASE;
  127 +
125 128 full_search:
126 129 addr = ALIGN(start_addr, HPAGE_SIZE);
127 130  
include/linux/sched.h
... ... @@ -201,8 +201,8 @@
201 201 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
202 202 unsigned long len, unsigned long pgoff,
203 203 unsigned long flags);
204   -extern void arch_unmap_area(struct vm_area_struct *area);
205   -extern void arch_unmap_area_topdown(struct vm_area_struct *area);
  204 +extern void arch_unmap_area(struct mm_struct *, unsigned long);
  205 +extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
206 206  
207 207 #define set_mm_counter(mm, member, value) (mm)->_##member = (value)
208 208 #define get_mm_counter(mm, member) ((mm)->_##member)
... ... @@ -218,9 +218,10 @@
218 218 unsigned long (*get_unmapped_area) (struct file *filp,
219 219 unsigned long addr, unsigned long len,
220 220 unsigned long pgoff, unsigned long flags);
221   - void (*unmap_area) (struct vm_area_struct *area);
222   - unsigned long mmap_base; /* base of mmap area */
223   - unsigned long free_area_cache; /* first hole */
  221 + void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
  222 + unsigned long mmap_base; /* base of mmap area */
  223 + unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
  224 + unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */
224 225 pgd_t * pgd;
225 226 atomic_t mm_users; /* How many users with user space? */
226 227 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
... ... @@ -194,6 +194,7 @@
194 194 mm->mmap = NULL;
195 195 mm->mmap_cache = NULL;
196 196 mm->free_area_cache = oldmm->mmap_base;
  197 + mm->cached_hole_size = ~0UL;
197 198 mm->map_count = 0;
198 199 set_mm_counter(mm, rss, 0);
199 200 set_mm_counter(mm, anon_rss, 0);
... ... @@ -322,6 +323,7 @@
322 323 mm->ioctx_list = NULL;
323 324 mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm);
324 325 mm->free_area_cache = TASK_UNMAPPED_BASE;
  326 + mm->cached_hole_size = ~0UL;
325 327  
326 328 if (likely(!mm_alloc_pgd(mm))) {
327 329 mm->def_flags = 0;
... ... @@ -1175,7 +1175,12 @@
1175 1175 (!vma || addr + len <= vma->vm_start))
1176 1176 return addr;
1177 1177 }
1178   - start_addr = addr = mm->free_area_cache;
  1178 + if (len > mm->cached_hole_size) {
  1179 + start_addr = addr = mm->free_area_cache;
  1180 + } else {
  1181 + start_addr = addr = TASK_UNMAPPED_BASE;
  1182 + mm->cached_hole_size = 0;
  1183 + }
1179 1184  
1180 1185 full_search:
1181 1186 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
... ... @@ -1186,7 +1191,9 @@
1186 1191 * some holes.
1187 1192 */
1188 1193 if (start_addr != TASK_UNMAPPED_BASE) {
1189   - start_addr = addr = TASK_UNMAPPED_BASE;
  1194 + addr = TASK_UNMAPPED_BASE;
  1195 + start_addr = addr;
  1196 + mm->cached_hole_size = 0;
1190 1197 goto full_search;
1191 1198 }
1192 1199 return -ENOMEM;
1193 1200  
1194 1201  
... ... @@ -1198,19 +1205,22 @@
1198 1205 mm->free_area_cache = addr + len;
1199 1206 return addr;
1200 1207 }
  1208 + if (addr + mm->cached_hole_size < vma->vm_start)
  1209 + mm->cached_hole_size = vma->vm_start - addr;
1201 1210 addr = vma->vm_end;
1202 1211 }
1203 1212 }
1204 1213 #endif
1205 1214  
1206   -void arch_unmap_area(struct vm_area_struct *area)
  1215 +void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1207 1216 {
1208 1217 /*
1209 1218 * Is this a new hole at the lowest possible address?
1210 1219 */
1211   - if (area->vm_start >= TASK_UNMAPPED_BASE &&
1212   - area->vm_start < area->vm_mm->free_area_cache)
1213   - area->vm_mm->free_area_cache = area->vm_start;
  1220 + if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
  1221 + mm->free_area_cache = addr;
  1222 + mm->cached_hole_size = ~0UL;
  1223 + }
1214 1224 }
1215 1225  
1216 1226 /*
... ... @@ -1240,6 +1250,12 @@
1240 1250 return addr;
1241 1251 }
1242 1252  
  1253 + /* check if free_area_cache is useful for us */
  1254 + if (len <= mm->cached_hole_size) {
  1255 + mm->cached_hole_size = 0;
  1256 + mm->free_area_cache = mm->mmap_base;
  1257 + }
  1258 +
1243 1259 /* either no address requested or can't fit in requested address hole */
1244 1260 addr = mm->free_area_cache;
1245 1261  
... ... @@ -1264,6 +1280,10 @@
1264 1280 /* remember the address as a hint for next time */
1265 1281 return (mm->free_area_cache = addr);
1266 1282  
  1283 + /* remember the largest hole we saw so far */
  1284 + if (addr + mm->cached_hole_size < vma->vm_start)
  1285 + mm->cached_hole_size = vma->vm_start - addr;
  1286 +
1267 1287 /* try just below the current vma->vm_start */
1268 1288 addr = vma->vm_start-len;
1269 1289 } while (len < vma->vm_start);
1270 1290  
1271 1291  
1272 1292  
1273 1293  
... ... @@ -1274,28 +1294,30 @@
1274 1294 * can happen with large stack limits and large mmap()
1275 1295 * allocations.
1276 1296 */
1277   - mm->free_area_cache = TASK_UNMAPPED_BASE;
  1297 + mm->cached_hole_size = ~0UL;
  1298 + mm->free_area_cache = TASK_UNMAPPED_BASE;
1278 1299 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
1279 1300 /*
1280 1301 * Restore the topdown base:
1281 1302 */
1282 1303 mm->free_area_cache = mm->mmap_base;
  1304 + mm->cached_hole_size = ~0UL;
1283 1305  
1284 1306 return addr;
1285 1307 }
1286 1308 #endif
1287 1309  
1288   -void arch_unmap_area_topdown(struct vm_area_struct *area)
  1310 +void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
1289 1311 {
1290 1312 /*
1291 1313 * Is this a new hole at the highest possible address?
1292 1314 */
1293   - if (area->vm_end > area->vm_mm->free_area_cache)
1294   - area->vm_mm->free_area_cache = area->vm_end;
  1315 + if (addr > mm->free_area_cache)
  1316 + mm->free_area_cache = addr;
1295 1317  
1296 1318 /* dont allow allocations above current base */
1297   - if (area->vm_mm->free_area_cache > area->vm_mm->mmap_base)
1298   - area->vm_mm->free_area_cache = area->vm_mm->mmap_base;
  1319 + if (mm->free_area_cache > mm->mmap_base)
  1320 + mm->free_area_cache = mm->mmap_base;
1299 1321 }
1300 1322  
1301 1323 unsigned long
... ... @@ -1595,7 +1617,6 @@
1595 1617 if (area->vm_flags & VM_LOCKED)
1596 1618 area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
1597 1619 vm_stat_unaccount(area);
1598   - area->vm_mm->unmap_area(area);
1599 1620 remove_vm_struct(area);
1600 1621 }
1601 1622  
... ... @@ -1649,6 +1670,7 @@
1649 1670 {
1650 1671 struct vm_area_struct **insertion_point;
1651 1672 struct vm_area_struct *tail_vma = NULL;
  1673 + unsigned long addr;
1652 1674  
1653 1675 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
1654 1676 do {
... ... @@ -1659,6 +1681,11 @@
1659 1681 } while (vma && vma->vm_start < end);
1660 1682 *insertion_point = vma;
1661 1683 tail_vma->vm_next = NULL;
  1684 + if (mm->unmap_area == arch_unmap_area)
  1685 + addr = prev ? prev->vm_end : mm->mmap_base;
  1686 + else
  1687 + addr = vma ? vma->vm_start : mm->mmap_base;
  1688 + mm->unmap_area(mm, addr);
1662 1689 mm->mmap_cache = NULL; /* Kill the cache. */
1663 1690 }
1664 1691  
... ... @@ -1067,7 +1067,7 @@
1067 1067 return -ENOMEM;
1068 1068 }
1069 1069  
1070   -void arch_unmap_area(struct vm_area_struct *area)
  1070 +void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1071 1071 {
1072 1072 }
1073 1073