Commit c3fcf8a5daacf350f0632e1379414c01f34eeea3
Committed by
Linus Torvalds
1 parent
5b5c7120e2
Exists in
master
and in
7 other branches
[PATCH] page migration cleanup: extract try_to_unmap from migration functions
Extract try_to_unmap and rename remove_references -> move_mapping try_to_unmap() may significantly change the page state by for example setting the dirty bit. It is therefore best to unmap in migrate_pages() before calling any migration functions. migrate_page_remove_references() will then only move the new page in place of the old page in the mapping. Rename the function to migrate_page_move_mapping(). This allows us to get rid of the special unmapping for the fallback path. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 1 changed file with 31 additions and 45 deletions Side-by-side Diff
mm/migrate.c
... | ... | @@ -166,15 +166,14 @@ |
166 | 166 | } |
167 | 167 | |
168 | 168 | /* |
169 | - * Remove references for a page and establish the new page with the correct | |
170 | - * basic settings to be able to stop accesses to the page. | |
169 | + * Replace the page in the mapping. | |
171 | 170 | * |
172 | 171 | * The number of remaining references must be: |
173 | 172 | * 1 for anonymous pages without a mapping |
174 | 173 | * 2 for pages with a mapping |
175 | 174 | * 3 for pages with a mapping and PagePrivate set. |
176 | 175 | */ |
177 | -static int migrate_page_remove_references(struct page *newpage, | |
176 | +static int migrate_page_move_mapping(struct page *newpage, | |
178 | 177 | struct page *page) |
179 | 178 | { |
180 | 179 | struct address_space *mapping = page_mapping(page); |
... | ... | @@ -183,35 +182,6 @@ |
183 | 182 | if (!mapping) |
184 | 183 | return -EAGAIN; |
185 | 184 | |
186 | - /* | |
187 | - * Establish swap ptes for anonymous pages or destroy pte | |
188 | - * maps for files. | |
189 | - * | |
190 | - * In order to reestablish file backed mappings the fault handlers | |
191 | - * will take the radix tree_lock which may then be used to stop | |
192 | - * processses from accessing this page until the new page is ready. | |
193 | - * | |
194 | - * A process accessing via a swap pte (an anonymous page) will take a | |
195 | - * page_lock on the old page which will block the process until the | |
196 | - * migration attempt is complete. At that time the PageSwapCache bit | |
197 | - * will be examined. If the page was migrated then the PageSwapCache | |
198 | - * bit will be clear and the operation to retrieve the page will be | |
199 | - * retried which will find the new page in the radix tree. Then a new | |
200 | - * direct mapping may be generated based on the radix tree contents. | |
201 | - * | |
202 | - * If the page was not migrated then the PageSwapCache bit | |
203 | - * is still set and the operation may continue. | |
204 | - */ | |
205 | - if (try_to_unmap(page, 1) == SWAP_FAIL) | |
206 | - /* A vma has VM_LOCKED set -> permanent failure */ | |
207 | - return -EPERM; | |
208 | - | |
209 | - /* | |
210 | - * Give up if we were unable to remove all mappings. | |
211 | - */ | |
212 | - if (page_mapcount(page)) | |
213 | - return -EAGAIN; | |
214 | - | |
215 | 185 | write_lock_irq(&mapping->tree_lock); |
216 | 186 | |
217 | 187 | radix_pointer = (struct page **)radix_tree_lookup_slot( |
... | ... | @@ -310,7 +280,7 @@ |
310 | 280 | |
311 | 281 | BUG_ON(PageWriteback(page)); /* Writeback must be complete */ |
312 | 282 | |
313 | - rc = migrate_page_remove_references(newpage, page); | |
283 | + rc = migrate_page_move_mapping(newpage, page); | |
314 | 284 | |
315 | 285 | if (rc) |
316 | 286 | return rc; |
... | ... | @@ -349,7 +319,7 @@ |
349 | 319 | |
350 | 320 | head = page_buffers(page); |
351 | 321 | |
352 | - rc = migrate_page_remove_references(newpage, page); | |
322 | + rc = migrate_page_move_mapping(newpage, page); | |
353 | 323 | |
354 | 324 | if (rc) |
355 | 325 | return rc; |
... | ... | @@ -482,6 +452,33 @@ |
482 | 452 | lock_page(newpage); |
483 | 453 | |
484 | 454 | /* |
455 | + * Establish swap ptes for anonymous pages or destroy pte | |
456 | + * maps for files. | |
457 | + * | |
458 | + * In order to reestablish file backed mappings the fault handlers | |
459 | + * will take the radix tree_lock which may then be used to stop | |
460 | + * processses from accessing this page until the new page is ready. | |
461 | + * | |
462 | + * A process accessing via a swap pte (an anonymous page) will take a | |
463 | + * page_lock on the old page which will block the process until the | |
464 | + * migration attempt is complete. At that time the PageSwapCache bit | |
465 | + * will be examined. If the page was migrated then the PageSwapCache | |
466 | + * bit will be clear and the operation to retrieve the page will be | |
467 | + * retried which will find the new page in the radix tree. Then a new | |
468 | + * direct mapping may be generated based on the radix tree contents. | |
469 | + * | |
470 | + * If the page was not migrated then the PageSwapCache bit | |
471 | + * is still set and the operation may continue. | |
472 | + */ | |
473 | + rc = -EPERM; | |
474 | + if (try_to_unmap(page, 1) == SWAP_FAIL) | |
475 | + /* A vma has VM_LOCKED set -> permanent failure */ | |
476 | + goto unlock_both; | |
477 | + | |
478 | + rc = -EAGAIN; | |
479 | + if (page_mapped(page)) | |
480 | + goto unlock_both; | |
481 | + /* | |
485 | 482 | * Pages are properly locked and writeback is complete. |
486 | 483 | * Try to migrate the page. |
487 | 484 | */ |
... | ... | @@ -500,17 +497,6 @@ |
500 | 497 | rc = mapping->a_ops->migratepage(newpage, page); |
501 | 498 | goto unlock_both; |
502 | 499 | } |
503 | - | |
504 | - /* Make sure the dirty bit is up to date */ | |
505 | - if (try_to_unmap(page, 1) == SWAP_FAIL) { | |
506 | - rc = -EPERM; | |
507 | - goto unlock_both; | |
508 | - } | |
509 | - | |
510 | - if (page_mapcount(page)) { | |
511 | - rc = -EAGAIN; | |
512 | - goto unlock_both; | |
513 | - } | |
514 | 500 | |
515 | 501 | /* |
516 | 502 | * Default handling if a filesystem does not provide |