Commit cb4b86ba47bb0937b71fb825b3ed88adf7a190f0
Committed by
Linus Torvalds
1 parent
6837765963
Exists in
master
and in
20 other branches
mm: add swap cache interface for swap reference
In a following patch, the usage of swap cache is recorded into swap_map. This patch is for necessary interface changes to do that. 2 interfaces: - swapcache_prepare() - swapcache_free() are added for allocating/freeing refcnt from swap-cache to existing swap entries. But implementation itself is not changed under this patch. At adding swapcache_free(), memcg's hook code is moved under swapcache_free(). This is better than using scattered hooks. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Acked-by: Balbir Singh <balbir@in.ibm.com> Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Dhaval Giani <dhaval@linux.vnet.ibm.com> Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 5 changed files with 33 additions and 9 deletions Side-by-side Diff
include/linux/swap.h
... | ... | @@ -282,8 +282,10 @@ |
282 | 282 | extern swp_entry_t get_swap_page(void); |
283 | 283 | extern swp_entry_t get_swap_page_of_type(int); |
284 | 284 | extern int swap_duplicate(swp_entry_t); |
285 | +extern int swapcache_prepare(swp_entry_t); | |
285 | 286 | extern int valid_swaphandles(swp_entry_t, unsigned long *); |
286 | 287 | extern void swap_free(swp_entry_t); |
288 | +extern void swapcache_free(swp_entry_t, struct page *page); | |
287 | 289 | extern int free_swap_and_cache(swp_entry_t); |
288 | 290 | extern int swap_type_of(dev_t, sector_t, struct block_device **); |
289 | 291 | extern unsigned int count_swap_pages(int, int); |
290 | 292 | |
... | ... | @@ -352,8 +354,13 @@ |
352 | 354 | |
353 | 355 | #define free_swap_and_cache(swp) is_migration_entry(swp) |
354 | 356 | #define swap_duplicate(swp) is_migration_entry(swp) |
357 | +#define swapcache_prepare(swp) is_migration_entry(swp) | |
355 | 358 | |
356 | 359 | static inline void swap_free(swp_entry_t swp) |
360 | +{ | |
361 | +} | |
362 | + | |
363 | +static inline void swapcache_free(swp_entry_t swp, struct page *page) | |
357 | 364 | { |
358 | 365 | } |
359 | 366 |
mm/shmem.c
mm/swap_state.c
... | ... | @@ -162,11 +162,11 @@ |
162 | 162 | return 1; |
163 | 163 | case -EEXIST: |
164 | 164 | /* Raced with "speculative" read_swap_cache_async */ |
165 | - swap_free(entry); | |
165 | + swapcache_free(entry, NULL); | |
166 | 166 | continue; |
167 | 167 | default: |
168 | 168 | /* -ENOMEM radix-tree allocation failure */ |
169 | - swap_free(entry); | |
169 | + swapcache_free(entry, NULL); | |
170 | 170 | return 0; |
171 | 171 | } |
172 | 172 | } |
... | ... | @@ -188,8 +188,7 @@ |
188 | 188 | __delete_from_swap_cache(page); |
189 | 189 | spin_unlock_irq(&swapper_space.tree_lock); |
190 | 190 | |
191 | - mem_cgroup_uncharge_swapcache(page, entry); | |
192 | - swap_free(entry); | |
191 | + swapcache_free(entry, page); | |
193 | 192 | page_cache_release(page); |
194 | 193 | } |
195 | 194 | |
... | ... | @@ -293,7 +292,7 @@ |
293 | 292 | /* |
294 | 293 | * Swap entry may have been freed since our caller observed it. |
295 | 294 | */ |
296 | - if (!swap_duplicate(entry)) | |
295 | + if (!swapcache_prepare(entry)) | |
297 | 296 | break; |
298 | 297 | |
299 | 298 | /* |
... | ... | @@ -317,7 +316,7 @@ |
317 | 316 | } |
318 | 317 | ClearPageSwapBacked(new_page); |
319 | 318 | __clear_page_locked(new_page); |
320 | - swap_free(entry); | |
319 | + swapcache_free(entry, NULL); | |
321 | 320 | } while (err != -ENOMEM); |
322 | 321 | |
323 | 322 | if (new_page) |
mm/swapfile.c
... | ... | @@ -510,6 +510,16 @@ |
510 | 510 | } |
511 | 511 | |
512 | 512 | /* |
513 | + * Called after dropping swapcache to decrease refcnt to swap entries. | |
514 | + */ | |
515 | +void swapcache_free(swp_entry_t entry, struct page *page) | |
516 | +{ | |
517 | + if (page) | |
518 | + mem_cgroup_uncharge_swapcache(page, entry); | |
519 | + return swap_free(entry); | |
520 | +} | |
521 | + | |
522 | +/* | |
513 | 523 | * How many references to page are currently swapped out? |
514 | 524 | */ |
515 | 525 | static inline int page_swapcount(struct page *page) |
... | ... | @@ -1978,6 +1988,15 @@ |
1978 | 1988 | printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val); |
1979 | 1989 | goto out; |
1980 | 1990 | } |
1991 | + | |
1992 | +/* | |
1993 | + * Called when allocating swap cache for exising swap entry, | |
1994 | + */ | |
1995 | +int swapcache_prepare(swp_entry_t entry) | |
1996 | +{ | |
1997 | + return swap_duplicate(entry); | |
1998 | +} | |
1999 | + | |
1981 | 2000 | |
1982 | 2001 | struct swap_info_struct * |
1983 | 2002 | get_swap_info_struct(unsigned type) |
mm/vmscan.c
... | ... | @@ -470,8 +470,7 @@ |
470 | 470 | swp_entry_t swap = { .val = page_private(page) }; |
471 | 471 | __delete_from_swap_cache(page); |
472 | 472 | spin_unlock_irq(&mapping->tree_lock); |
473 | - mem_cgroup_uncharge_swapcache(page, swap); | |
474 | - swap_free(swap); | |
473 | + swapcache_free(swap, page); | |
475 | 474 | } else { |
476 | 475 | __remove_from_page_cache(page); |
477 | 476 | spin_unlock_irq(&mapping->tree_lock); |