Blame view

mm/swap_state.c 12.9 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
8
  /*
   *  linux/mm/swap_state.c
   *
   *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   *  Swap reorganised 29.12.95, Stephen Tweedie
   *
   *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
9
  #include <linux/mm.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
10
  #include <linux/gfp.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
11
12
  #include <linux/kernel_stat.h>
  #include <linux/swap.h>
46017e954   Hugh Dickins   swapin_readahead:...
13
  #include <linux/swapops.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
14
15
  #include <linux/init.h>
  #include <linux/pagemap.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
16
  #include <linux/backing-dev.h>
3fb5c298b   Christian Ehrhardt   swap: allow swap ...
17
  #include <linux/blkdev.h>
c484d4104   Hugh Dickins   [PATCH] mm: free_...
18
  #include <linux/pagevec.h>
b20a35035   Christoph Lameter   [PATCH] page migr...
19
  #include <linux/migrate.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
20
21
22
23
24
  
  #include <asm/pgtable.h>
  
  /*
   * swapper_space is a fiction, retained to simplify the path through
7eaceacca   Jens Axboe   block: remove per...
25
   * vmscan's shrink_page_list.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
26
   */
f5e54d6e5   Christoph Hellwig   [PATCH] mark addr...
27
  static const struct address_space_operations swap_aops = {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
28
  	.writepage	= swap_writepage,
62c230bc1   Mel Gorman   mm: add support f...
29
  	.set_page_dirty	= swap_set_page_dirty,
1c93923cc   Andrew Morton   include/linux/mig...
30
  #ifdef CONFIG_MIGRATION
e965f9630   Christoph Lameter   [PATCH] Direct Mi...
31
  	.migratepage	= migrate_page,
1c93923cc   Andrew Morton   include/linux/mig...
32
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
33
34
35
  };
  
  static struct backing_dev_info swap_backing_dev_info = {
d993831fa   Jens Axboe   writeback: add na...
36
  	.name		= "swap",
4f98a2fee   Rik van Riel   vmscan: split LRU...
37
  	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
38
  };
33806f06d   Shaohua Li   swap: make each s...
39
40
41
  struct address_space swapper_spaces[MAX_SWAPFILES] = {
  	[0 ... MAX_SWAPFILES - 1] = {
  		.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
4bb5f5d93   David Herrmann   mm: allow drivers...
42
  		.i_mmap_writable = ATOMIC_INIT(0),
33806f06d   Shaohua Li   swap: make each s...
43
44
45
  		.a_ops		= &swap_aops,
  		.backing_dev_info = &swap_backing_dev_info,
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
46
  };
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
47
48
49
50
51
52
53
54
  
  #define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)
  
  static struct {
  	unsigned long add_total;
  	unsigned long del_total;
  	unsigned long find_success;
  	unsigned long find_total;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
55
  } swap_cache_info;
33806f06d   Shaohua Li   swap: make each s...
56
57
58
59
60
61
62
63
64
  unsigned long total_swapcache_pages(void)
  {
  	int i;
  	unsigned long ret = 0;
  
  	for (i = 0; i < MAX_SWAPFILES; i++)
  		ret += swapper_spaces[i].nrpages;
  	return ret;
  }
579f82901   Shaohua Li   swap: add a simpl...
65
  static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
66
67
  void show_swap_cache_info(void)
  {
33806f06d   Shaohua Li   swap: make each s...
68
69
  	printk("%lu pages in swap cache
  ", total_swapcache_pages());
2c97b7fc0   Johannes Weiner   mm: print swapcac...
70
71
  	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu
  ",
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
72
  		swap_cache_info.add_total, swap_cache_info.del_total,
bb63be0a0   Hugh Dickins   tmpfs: move swap_...
73
  		swap_cache_info.find_success, swap_cache_info.find_total);
ec8acf20a   Shaohua Li   swap: add per-par...
74
75
76
  	printk("Free swap  = %ldkB
  ",
  		get_nr_swap_pages() << (PAGE_SHIFT - 10));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
77
78
79
80
81
  	printk("Total swap = %lukB
  ", total_swap_pages << (PAGE_SHIFT - 10));
  }
  
  /*
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
82
   * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
83
84
   * but sets SwapCache flag and private instead of mapping and index.
   */
2f772e6ca   Seth Jennings   mm: break up swap...
85
  int __add_to_swap_cache(struct page *page, swp_entry_t entry)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
86
87
  {
  	int error;
33806f06d   Shaohua Li   swap: make each s...
88
  	struct address_space *address_space;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
89

309381fea   Sasha Levin   mm: dump page whe...
90
91
92
  	VM_BUG_ON_PAGE(!PageLocked(page), page);
  	VM_BUG_ON_PAGE(PageSwapCache(page), page);
  	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
51726b122   Hugh Dickins   mm: replace some ...
93

31a563962   Daisuke Nishimura   mm: add_to_swap_c...
94
95
96
  	page_cache_get(page);
  	SetPageSwapCache(page);
  	set_page_private(page, entry.val);
33806f06d   Shaohua Li   swap: make each s...
97
98
99
100
  	address_space = swap_address_space(entry);
  	spin_lock_irq(&address_space->tree_lock);
  	error = radix_tree_insert(&address_space->page_tree,
  					entry.val, page);
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
101
  	if (likely(!error)) {
33806f06d   Shaohua Li   swap: make each s...
102
  		address_space->nrpages++;
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
103
104
105
  		__inc_zone_page_state(page, NR_FILE_PAGES);
  		INC_CACHE_INFO(add_total);
  	}
33806f06d   Shaohua Li   swap: make each s...
106
  	spin_unlock_irq(&address_space->tree_lock);
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
107
108
  
  	if (unlikely(error)) {
2ca4532a4   Daisuke Nishimura   mm: add_to_swap_c...
109
110
111
112
113
114
  		/*
  		 * Only the context which have set SWAP_HAS_CACHE flag
  		 * would call add_to_swap_cache().
  		 * So add_to_swap_cache() doesn't returns -EEXIST.
  		 */
  		VM_BUG_ON(error == -EEXIST);
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
115
116
117
118
119
120
121
122
123
124
125
126
  		set_page_private(page, 0UL);
  		ClearPageSwapCache(page);
  		page_cache_release(page);
  	}
  
  	return error;
  }
  
  
  int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
  {
  	int error;
5e4c0d974   Jan Kara   lib/radix-tree.c:...
127
  	error = radix_tree_maybe_preload(gfp_mask);
35c754d79   Balbir Singh   memory controller...
128
  	if (!error) {
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
129
  		error = __add_to_swap_cache(page, entry);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
130
  		radix_tree_preload_end();
fa1de9008   Hugh Dickins   memcgroup: revert...
131
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
132
133
  	return error;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
134
135
136
137
138
139
  /*
   * This must be called only on pages that have
   * been verified to be in the swap cache.
   */
  void __delete_from_swap_cache(struct page *page)
  {
33806f06d   Shaohua Li   swap: make each s...
140
141
  	swp_entry_t entry;
  	struct address_space *address_space;
309381fea   Sasha Levin   mm: dump page whe...
142
143
144
  	VM_BUG_ON_PAGE(!PageLocked(page), page);
  	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
  	VM_BUG_ON_PAGE(PageWriteback(page), page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
145

33806f06d   Shaohua Li   swap: make each s...
146
147
148
  	entry.val = page_private(page);
  	address_space = swap_address_space(entry);
  	radix_tree_delete(&address_space->page_tree, page_private(page));
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
149
  	set_page_private(page, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
150
  	ClearPageSwapCache(page);
33806f06d   Shaohua Li   swap: make each s...
151
  	address_space->nrpages--;
347ce434d   Christoph Lameter   [PATCH] zoned vm ...
152
  	__dec_zone_page_state(page, NR_FILE_PAGES);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
153
154
155
156
157
158
159
160
161
162
  	INC_CACHE_INFO(del_total);
  }
  
  /**
   * add_to_swap - allocate swap space for a page
   * @page: page we want to move to swap
   *
   * Allocate swap space for the page and add the page to the
   * swap cache.  Caller needs to hold the page lock. 
   */
5bc7b8aca   Shaohua Li   mm: thp: add spli...
163
  int add_to_swap(struct page *page, struct list_head *list)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
164
165
  {
  	swp_entry_t entry;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
166
  	int err;
309381fea   Sasha Levin   mm: dump page whe...
167
168
  	VM_BUG_ON_PAGE(!PageLocked(page), page);
  	VM_BUG_ON_PAGE(!PageUptodate(page), page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
169

2ca4532a4   Daisuke Nishimura   mm: add_to_swap_c...
170
171
172
  	entry = get_swap_page();
  	if (!entry.val)
  		return 0;
3f04f62f9   Andrea Arcangeli   thp: split_huge_p...
173
  	if (unlikely(PageTransHuge(page)))
5bc7b8aca   Shaohua Li   mm: thp: add spli...
174
  		if (unlikely(split_huge_page_to_list(page, list))) {
0a31bc97c   Johannes Weiner   mm: memcontrol: r...
175
  			swapcache_free(entry);
3f04f62f9   Andrea Arcangeli   thp: split_huge_p...
176
177
  			return 0;
  		}
2ca4532a4   Daisuke Nishimura   mm: add_to_swap_c...
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
  	/*
  	 * Radix-tree node allocations from PF_MEMALLOC contexts could
  	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
  	 * stops emergency reserves from being allocated.
  	 *
  	 * TODO: this could cause a theoretical memory reclaim
  	 * deadlock in the swap out path.
  	 */
  	/*
  	 * Add it to the swap cache and mark it dirty
  	 */
  	err = add_to_swap_cache(page, entry,
  			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
  
  	if (!err) {	/* Success */
  		SetPageDirty(page);
  		return 1;
  	} else {	/* -ENOMEM radix-tree allocation failure */
bd53b714d   Nick Piggin   [PATCH] mm: use _...
196
  		/*
2ca4532a4   Daisuke Nishimura   mm: add_to_swap_c...
197
198
  		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
  		 * clear SWAP_HAS_CACHE flag.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
199
  		 */
0a31bc97c   Johannes Weiner   mm: memcontrol: r...
200
  		swapcache_free(entry);
2ca4532a4   Daisuke Nishimura   mm: add_to_swap_c...
201
  		return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
202
203
204
205
206
207
208
209
210
211
212
213
  	}
  }
  
  /*
   * This must be called only on pages that have
   * been verified to be in the swap cache and locked.
   * It will never put the page into the free list,
   * the caller has a reference on the page.
   */
  void delete_from_swap_cache(struct page *page)
  {
  	swp_entry_t entry;
33806f06d   Shaohua Li   swap: make each s...
214
  	struct address_space *address_space;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
215

4c21e2f24   Hugh Dickins   [PATCH] mm: split...
216
  	entry.val = page_private(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
217

33806f06d   Shaohua Li   swap: make each s...
218
219
  	address_space = swap_address_space(entry);
  	spin_lock_irq(&address_space->tree_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
220
  	__delete_from_swap_cache(page);
33806f06d   Shaohua Li   swap: make each s...
221
  	spin_unlock_irq(&address_space->tree_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
222

0a31bc97c   Johannes Weiner   mm: memcontrol: r...
223
  	swapcache_free(entry);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
224
225
  	page_cache_release(page);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
226
227
228
229
  /* 
   * If we are the only user, then try to free up the swap cache. 
   * 
   * Its ok to check for PageSwapCache without the page lock
a2c43eed8   Hugh Dickins   mm: try_to_free_s...
230
231
   * here because we are going to recheck again inside
   * try_to_free_swap() _with_ the lock.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
232
233
234
235
   * 					- Marcelo
   */
  static inline void free_swap_cache(struct page *page)
  {
a2c43eed8   Hugh Dickins   mm: try_to_free_s...
236
237
  	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
  		try_to_free_swap(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
238
239
240
241
242
243
  		unlock_page(page);
  	}
  }
  
  /* 
   * Perform a free_page(), also freeing any swap cache associated with
b8072f099   Hugh Dickins   [PATCH] mm: updat...
244
   * this page if it is the last user of the page.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
245
246
247
248
249
250
251
252
253
254
255
256
257
   */
  void free_page_and_swap_cache(struct page *page)
  {
  	free_swap_cache(page);
  	page_cache_release(page);
  }
  
  /*
   * Passed an array of pages, drop them all from swapcache and then release
   * them.  They are removed from the LRU and freed if this is their last use.
   */
  void free_pages_and_swap_cache(struct page **pages, int nr)
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
258
  	struct page **pagep = pages;
aabfb5729   Michal Hocko   mm: memcontrol: d...
259
  	int i;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
260
261
  
  	lru_add_drain();
aabfb5729   Michal Hocko   mm: memcontrol: d...
262
263
264
  	for (i = 0; i < nr; i++)
  		free_swap_cache(pagep[i]);
  	release_pages(pagep, nr, false);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
265
266
267
268
269
270
271
272
273
274
275
  }
  
  /*
   * Lookup a swap entry in the swap cache. A found page will be returned
   * unlocked and with its refcount incremented - we rely on the kernel
   * lock getting page table operations atomic even if we drop the page
   * lock before returning.
   */
  struct page * lookup_swap_cache(swp_entry_t entry)
  {
  	struct page *page;
33806f06d   Shaohua Li   swap: make each s...
276
  	page = find_get_page(swap_address_space(entry), entry.val);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
277

579f82901   Shaohua Li   swap: add a simpl...
278
  	if (page) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
279
  		INC_CACHE_INFO(find_success);
579f82901   Shaohua Li   swap: add a simpl...
280
281
282
  		if (TestClearPageReadahead(page))
  			atomic_inc(&swapin_readahead_hits);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
283
284
285
286
287
288
289
290
291
292
293
  
  	INC_CACHE_INFO(find_total);
  	return page;
  }
  
  /* 
   * Locate a page of swap in physical memory, reserving swap cache space
   * and reading the disk if it is not already cached.
   * A failure return means that either the page allocation failed or that
   * the swap entry is no longer in use.
   */
02098feaa   Hugh Dickins   swapin needs gfp_...
294
  struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
295
296
297
298
299
300
301
302
303
304
305
  			struct vm_area_struct *vma, unsigned long addr)
  {
  	struct page *found_page, *new_page = NULL;
  	int err;
  
  	do {
  		/*
  		 * First check the swap cache.  Since this is normally
  		 * called after lookup_swap_cache() failed, re-calling
  		 * that would confuse statistics.
  		 */
33806f06d   Shaohua Li   swap: make each s...
306
307
  		found_page = find_get_page(swap_address_space(entry),
  					entry.val);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
308
309
310
311
312
313
314
  		if (found_page)
  			break;
  
  		/*
  		 * Get a new page to read into from swap.
  		 */
  		if (!new_page) {
02098feaa   Hugh Dickins   swapin needs gfp_...
315
  			new_page = alloc_page_vma(gfp_mask, vma, addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
316
317
318
319
320
  			if (!new_page)
  				break;		/* Out of memory */
  		}
  
  		/*
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
321
322
  		 * call radix_tree_preload() while we can wait.
  		 */
5e4c0d974   Jan Kara   lib/radix-tree.c:...
323
  		err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
324
325
326
327
  		if (err)
  			break;
  
  		/*
f000944d0   Hugh Dickins   tmpfs: shuffle ad...
328
329
  		 * Swap entry may have been freed since our caller observed it.
  		 */
355cfa73d   KAMEZAWA Hiroyuki   mm: modify swap_m...
330
  		err = swapcache_prepare(entry);
cbab0e4ee   Rafael Aquini   swap: avoid read_...
331
  		if (err == -EEXIST) {
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
332
  			radix_tree_preload_end();
cbab0e4ee   Rafael Aquini   swap: avoid read_...
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
  			/*
  			 * We might race against get_swap_page() and stumble
  			 * across a SWAP_HAS_CACHE swap_map entry whose page
  			 * has not been brought into the swapcache yet, while
  			 * the other end is scheduled away waiting on discard
  			 * I/O completion at scan_swap_map().
  			 *
  			 * In order to avoid turning this transitory state
  			 * into a permanent loop around this -EEXIST case
  			 * if !CONFIG_PREEMPT and the I/O completion happens
  			 * to be waiting on the CPU waitqueue where we are now
  			 * busy looping, we just conditionally invoke the
  			 * scheduler here, if there are some more important
  			 * tasks to run.
  			 */
  			cond_resched();
355cfa73d   KAMEZAWA Hiroyuki   mm: modify swap_m...
349
  			continue;
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
350
351
352
  		}
  		if (err) {		/* swp entry is obsolete ? */
  			radix_tree_preload_end();
f000944d0   Hugh Dickins   tmpfs: shuffle ad...
353
  			break;
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
354
  		}
f000944d0   Hugh Dickins   tmpfs: shuffle ad...
355

2ca4532a4   Daisuke Nishimura   mm: add_to_swap_c...
356
  		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
f45840b5c   Nick Piggin   mm: pagecache ins...
357
  		__set_page_locked(new_page);
b2e185384   Rik van Riel   define page_file_...
358
  		SetPageSwapBacked(new_page);
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
359
  		err = __add_to_swap_cache(new_page, entry);
529ae9aaa   Nick Piggin   mm: rename page t...
360
  		if (likely(!err)) {
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
361
  			radix_tree_preload_end();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
362
363
364
  			/*
  			 * Initiate read into locked page and return.
  			 */
c5fdae469   Rik van Riel   vmscan: add newly...
365
  			lru_cache_add_anon(new_page);
aca8bf323   Minchan Kim   mm: remove file a...
366
  			swap_readpage(new_page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
367
368
  			return new_page;
  		}
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
369
  		radix_tree_preload_end();
b2e185384   Rik van Riel   define page_file_...
370
  		ClearPageSwapBacked(new_page);
f45840b5c   Nick Piggin   mm: pagecache ins...
371
  		__clear_page_locked(new_page);
2ca4532a4   Daisuke Nishimura   mm: add_to_swap_c...
372
373
374
375
  		/*
  		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
  		 * clear SWAP_HAS_CACHE flag.
  		 */
0a31bc97c   Johannes Weiner   mm: memcontrol: r...
376
  		swapcache_free(entry);
f000944d0   Hugh Dickins   tmpfs: shuffle ad...
377
  	} while (err != -ENOMEM);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
378
379
380
381
382
  
  	if (new_page)
  		page_cache_release(new_page);
  	return found_page;
  }
46017e954   Hugh Dickins   swapin_readahead:...
383

579f82901   Shaohua Li   swap: add a simpl...
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
  static unsigned long swapin_nr_pages(unsigned long offset)
  {
  	static unsigned long prev_offset;
  	unsigned int pages, max_pages, last_ra;
  	static atomic_t last_readahead_pages;
  
  	max_pages = 1 << ACCESS_ONCE(page_cluster);
  	if (max_pages <= 1)
  		return 1;
  
  	/*
  	 * This heuristic has been found to work well on both sequential and
  	 * random loads, swapping to hard disk or to SSD: please don't ask
  	 * what the "+ 2" means, it just happens to work well, that's all.
  	 */
  	pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
  	if (pages == 2) {
  		/*
  		 * We can have no readahead hits to judge by: but must not get
  		 * stuck here forever, so check for an adjacent offset instead
  		 * (and don't even bother to check whether swap type is same).
  		 */
  		if (offset != prev_offset + 1 && offset != prev_offset - 1)
  			pages = 1;
  		prev_offset = offset;
  	} else {
  		unsigned int roundup = 4;
  		while (roundup < pages)
  			roundup <<= 1;
  		pages = roundup;
  	}
  
  	if (pages > max_pages)
  		pages = max_pages;
  
  	/* Don't shrink readahead too fast */
  	last_ra = atomic_read(&last_readahead_pages) / 2;
  	if (pages < last_ra)
  		pages = last_ra;
  	atomic_set(&last_readahead_pages, pages);
  
  	return pages;
  }
46017e954   Hugh Dickins   swapin_readahead:...
427
428
429
  /**
   * swapin_readahead - swap in pages in hope we need them soon
   * @entry: swap entry of this memory
7682486b3   Randy Dunlap   mm: fix various k...
430
   * @gfp_mask: memory allocation flags
46017e954   Hugh Dickins   swapin_readahead:...
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
   * @vma: user vma this address belongs to
   * @addr: target address for mempolicy
   *
   * Returns the struct page for entry and addr, after queueing swapin.
   *
   * Primitive swap readahead code. We simply read an aligned block of
   * (1 << page_cluster) entries in the swap area. This method is chosen
   * because it doesn't cost us any seek time.  We also make sure to queue
   * the 'original' request together with the readahead ones...
   *
   * This has been extended to use the NUMA policies from the mm triggering
   * the readahead.
   *
   * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
   */
02098feaa   Hugh Dickins   swapin needs gfp_...
446
  struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
46017e954   Hugh Dickins   swapin_readahead:...
447
448
  			struct vm_area_struct *vma, unsigned long addr)
  {
46017e954   Hugh Dickins   swapin_readahead:...
449
  	struct page *page;
579f82901   Shaohua Li   swap: add a simpl...
450
451
  	unsigned long entry_offset = swp_offset(entry);
  	unsigned long offset = entry_offset;
67f96aa25   Rik van Riel   mm: make swapin r...
452
  	unsigned long start_offset, end_offset;
579f82901   Shaohua Li   swap: add a simpl...
453
  	unsigned long mask;
3fb5c298b   Christian Ehrhardt   swap: allow swap ...
454
  	struct blk_plug plug;
46017e954   Hugh Dickins   swapin_readahead:...
455

579f82901   Shaohua Li   swap: add a simpl...
456
457
458
  	mask = swapin_nr_pages(offset) - 1;
  	if (!mask)
  		goto skip;
67f96aa25   Rik van Riel   mm: make swapin r...
459
460
461
462
463
  	/* Read a page_cluster sized and aligned cluster around offset. */
  	start_offset = offset & ~mask;
  	end_offset = offset | mask;
  	if (!start_offset)	/* First page is swap header. */
  		start_offset++;
3fb5c298b   Christian Ehrhardt   swap: allow swap ...
464
  	blk_start_plug(&plug);
67f96aa25   Rik van Riel   mm: make swapin r...
465
  	for (offset = start_offset; offset <= end_offset ; offset++) {
46017e954   Hugh Dickins   swapin_readahead:...
466
467
  		/* Ok, do the async read-ahead now */
  		page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
02098feaa   Hugh Dickins   swapin needs gfp_...
468
  						gfp_mask, vma, addr);
46017e954   Hugh Dickins   swapin_readahead:...
469
  		if (!page)
67f96aa25   Rik van Riel   mm: make swapin r...
470
  			continue;
579f82901   Shaohua Li   swap: add a simpl...
471
472
  		if (offset != entry_offset)
  			SetPageReadahead(page);
46017e954   Hugh Dickins   swapin_readahead:...
473
474
  		page_cache_release(page);
  	}
3fb5c298b   Christian Ehrhardt   swap: allow swap ...
475
  	blk_finish_plug(&plug);
46017e954   Hugh Dickins   swapin_readahead:...
476
  	lru_add_drain();	/* Push any new pages onto the LRU now */
579f82901   Shaohua Li   swap: add a simpl...
477
  skip:
02098feaa   Hugh Dickins   swapin needs gfp_...
478
  	return read_swap_cache_async(entry, gfp_mask, vma, addr);
46017e954   Hugh Dickins   swapin_readahead:...
479
  }