Blame view

mm/swap_state.c 12.9 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
8
  /*
   *  linux/mm/swap_state.c
   *
   *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   *  Swap reorganised 29.12.95, Stephen Tweedie
   *
   *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
9
  #include <linux/mm.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
10
  #include <linux/gfp.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
11
12
  #include <linux/kernel_stat.h>
  #include <linux/swap.h>
46017e954   Hugh Dickins   swapin_readahead:...
13
  #include <linux/swapops.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
14
15
  #include <linux/init.h>
  #include <linux/pagemap.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
16
  #include <linux/backing-dev.h>
3fb5c298b   Christian Ehrhardt   swap: allow swap ...
17
  #include <linux/blkdev.h>
c484d4104   Hugh Dickins   [PATCH] mm: free_...
18
  #include <linux/pagevec.h>
b20a35035   Christoph Lameter   [PATCH] page migr...
19
  #include <linux/migrate.h>
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
20
  #include <linux/page_cgroup.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
21
22
23
24
25
  
  #include <asm/pgtable.h>
  
  /*
   * swapper_space is a fiction, retained to simplify the path through
7eaceacca   Jens Axboe   block: remove per...
26
   * vmscan's shrink_page_list.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
27
   */
f5e54d6e5   Christoph Hellwig   [PATCH] mark addr...
28
  static const struct address_space_operations swap_aops = {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
29
  	.writepage	= swap_writepage,
62c230bc1   Mel Gorman   mm: add support f...
30
  	.set_page_dirty	= swap_set_page_dirty,
e965f9630   Christoph Lameter   [PATCH] Direct Mi...
31
  	.migratepage	= migrate_page,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
32
33
34
  };
  
  static struct backing_dev_info swap_backing_dev_info = {
d993831fa   Jens Axboe   writeback: add na...
35
  	.name		= "swap",
4f98a2fee   Rik van Riel   vmscan: split LRU...
36
  	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
37
  };
33806f06d   Shaohua Li   swap: make each s...
38
39
40
41
42
43
  struct address_space swapper_spaces[MAX_SWAPFILES] = {
  	[0 ... MAX_SWAPFILES - 1] = {
  		.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
  		.a_ops		= &swap_aops,
  		.backing_dev_info = &swap_backing_dev_info,
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
44
  };
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
45
46
47
48
49
50
51
52
  
  #define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)
  
  static struct {
  	unsigned long add_total;
  	unsigned long del_total;
  	unsigned long find_success;
  	unsigned long find_total;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
53
  } swap_cache_info;
33806f06d   Shaohua Li   swap: make each s...
54
55
56
57
58
59
60
61
62
  unsigned long total_swapcache_pages(void)
  {
  	int i;
  	unsigned long ret = 0;
  
  	for (i = 0; i < MAX_SWAPFILES; i++)
  		ret += swapper_spaces[i].nrpages;
  	return ret;
  }
579f82901   Shaohua Li   swap: add a simpl...
63
  static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
64
65
  void show_swap_cache_info(void)
  {
33806f06d   Shaohua Li   swap: make each s...
66
67
  	printk("%lu pages in swap cache
  ", total_swapcache_pages());
2c97b7fc0   Johannes Weiner   mm: print swapcac...
68
69
  	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu
  ",
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
70
  		swap_cache_info.add_total, swap_cache_info.del_total,
bb63be0a0   Hugh Dickins   tmpfs: move swap_...
71
  		swap_cache_info.find_success, swap_cache_info.find_total);
ec8acf20a   Shaohua Li   swap: add per-par...
72
73
74
  	printk("Free swap  = %ldkB
  ",
  		get_nr_swap_pages() << (PAGE_SHIFT - 10));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
75
76
77
78
79
  	printk("Total swap = %lukB
  ", total_swap_pages << (PAGE_SHIFT - 10));
  }
  
  /*
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
80
   * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
81
82
   * but sets SwapCache flag and private instead of mapping and index.
   */
2f772e6ca   Seth Jennings   mm: break up swap...
83
  int __add_to_swap_cache(struct page *page, swp_entry_t entry)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
84
85
  {
  	int error;
33806f06d   Shaohua Li   swap: make each s...
86
  	struct address_space *address_space;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
87

309381fea   Sasha Levin   mm: dump page whe...
88
89
90
  	VM_BUG_ON_PAGE(!PageLocked(page), page);
  	VM_BUG_ON_PAGE(PageSwapCache(page), page);
  	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
51726b122   Hugh Dickins   mm: replace some ...
91

31a563962   Daisuke Nishimura   mm: add_to_swap_c...
92
93
94
  	page_cache_get(page);
  	SetPageSwapCache(page);
  	set_page_private(page, entry.val);
33806f06d   Shaohua Li   swap: make each s...
95
96
97
98
  	address_space = swap_address_space(entry);
  	spin_lock_irq(&address_space->tree_lock);
  	error = radix_tree_insert(&address_space->page_tree,
  					entry.val, page);
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
99
  	if (likely(!error)) {
33806f06d   Shaohua Li   swap: make each s...
100
  		address_space->nrpages++;
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
101
102
103
  		__inc_zone_page_state(page, NR_FILE_PAGES);
  		INC_CACHE_INFO(add_total);
  	}
33806f06d   Shaohua Li   swap: make each s...
104
  	spin_unlock_irq(&address_space->tree_lock);
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
105
106
  
  	if (unlikely(error)) {
2ca4532a4   Daisuke Nishimura   mm: add_to_swap_c...
107
108
109
110
111
112
  		/*
  		 * Only the context which have set SWAP_HAS_CACHE flag
  		 * would call add_to_swap_cache().
  		 * So add_to_swap_cache() doesn't returns -EEXIST.
  		 */
  		VM_BUG_ON(error == -EEXIST);
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
113
114
115
116
117
118
119
120
121
122
123
124
  		set_page_private(page, 0UL);
  		ClearPageSwapCache(page);
  		page_cache_release(page);
  	}
  
  	return error;
  }
  
  
  int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
  {
  	int error;
5e4c0d974   Jan Kara   lib/radix-tree.c:...
125
  	error = radix_tree_maybe_preload(gfp_mask);
35c754d79   Balbir Singh   memory controller...
126
  	if (!error) {
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
127
  		error = __add_to_swap_cache(page, entry);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
128
  		radix_tree_preload_end();
fa1de9008   Hugh Dickins   memcgroup: revert...
129
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
130
131
  	return error;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
132
133
134
135
136
137
  /*
   * This must be called only on pages that have
   * been verified to be in the swap cache.
   */
  void __delete_from_swap_cache(struct page *page)
  {
33806f06d   Shaohua Li   swap: make each s...
138
139
  	swp_entry_t entry;
  	struct address_space *address_space;
309381fea   Sasha Levin   mm: dump page whe...
140
141
142
  	VM_BUG_ON_PAGE(!PageLocked(page), page);
  	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
  	VM_BUG_ON_PAGE(PageWriteback(page), page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
143

33806f06d   Shaohua Li   swap: make each s...
144
145
146
  	entry.val = page_private(page);
  	address_space = swap_address_space(entry);
  	radix_tree_delete(&address_space->page_tree, page_private(page));
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
147
  	set_page_private(page, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
148
  	ClearPageSwapCache(page);
33806f06d   Shaohua Li   swap: make each s...
149
  	address_space->nrpages--;
347ce434d   Christoph Lameter   [PATCH] zoned vm ...
150
  	__dec_zone_page_state(page, NR_FILE_PAGES);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
151
152
153
154
155
156
157
158
159
160
  	INC_CACHE_INFO(del_total);
  }
  
  /**
   * add_to_swap - allocate swap space for a page
   * @page: page we want to move to swap
   *
   * Allocate swap space for the page and add the page to the
   * swap cache.  Caller needs to hold the page lock. 
   */
5bc7b8aca   Shaohua Li   mm: thp: add spli...
161
  int add_to_swap(struct page *page, struct list_head *list)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
162
163
  {
  	swp_entry_t entry;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
164
  	int err;
309381fea   Sasha Levin   mm: dump page whe...
165
166
  	VM_BUG_ON_PAGE(!PageLocked(page), page);
  	VM_BUG_ON_PAGE(!PageUptodate(page), page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
167

2ca4532a4   Daisuke Nishimura   mm: add_to_swap_c...
168
169
170
  	entry = get_swap_page();
  	if (!entry.val)
  		return 0;
3f04f62f9   Andrea Arcangeli   thp: split_huge_p...
171
  	if (unlikely(PageTransHuge(page)))
5bc7b8aca   Shaohua Li   mm: thp: add spli...
172
  		if (unlikely(split_huge_page_to_list(page, list))) {
3f04f62f9   Andrea Arcangeli   thp: split_huge_p...
173
174
175
  			swapcache_free(entry, NULL);
  			return 0;
  		}
2ca4532a4   Daisuke Nishimura   mm: add_to_swap_c...
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
  	/*
  	 * Radix-tree node allocations from PF_MEMALLOC contexts could
  	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
  	 * stops emergency reserves from being allocated.
  	 *
  	 * TODO: this could cause a theoretical memory reclaim
  	 * deadlock in the swap out path.
  	 */
  	/*
  	 * Add it to the swap cache and mark it dirty
  	 */
  	err = add_to_swap_cache(page, entry,
  			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
  
  	if (!err) {	/* Success */
  		SetPageDirty(page);
  		return 1;
  	} else {	/* -ENOMEM radix-tree allocation failure */
bd53b714d   Nick Piggin   [PATCH] mm: use _...
194
  		/*
2ca4532a4   Daisuke Nishimura   mm: add_to_swap_c...
195
196
  		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
  		 * clear SWAP_HAS_CACHE flag.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
197
  		 */
2ca4532a4   Daisuke Nishimura   mm: add_to_swap_c...
198
199
  		swapcache_free(entry, NULL);
  		return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
200
201
202
203
204
205
206
207
208
209
210
211
  	}
  }
  
  /*
   * This must be called only on pages that have
   * been verified to be in the swap cache and locked.
   * It will never put the page into the free list,
   * the caller has a reference on the page.
   */
  void delete_from_swap_cache(struct page *page)
  {
  	swp_entry_t entry;
33806f06d   Shaohua Li   swap: make each s...
212
  	struct address_space *address_space;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
213

4c21e2f24   Hugh Dickins   [PATCH] mm: split...
214
  	entry.val = page_private(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
215

33806f06d   Shaohua Li   swap: make each s...
216
217
  	address_space = swap_address_space(entry);
  	spin_lock_irq(&address_space->tree_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
218
  	__delete_from_swap_cache(page);
33806f06d   Shaohua Li   swap: make each s...
219
  	spin_unlock_irq(&address_space->tree_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
220

cb4b86ba4   KAMEZAWA Hiroyuki   mm: add swap cach...
221
  	swapcache_free(entry, page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
222
223
  	page_cache_release(page);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
224
225
226
227
  /* 
   * If we are the only user, then try to free up the swap cache. 
   * 
   * Its ok to check for PageSwapCache without the page lock
a2c43eed8   Hugh Dickins   mm: try_to_free_s...
228
229
   * here because we are going to recheck again inside
   * try_to_free_swap() _with_ the lock.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
230
231
232
233
   * 					- Marcelo
   */
  static inline void free_swap_cache(struct page *page)
  {
a2c43eed8   Hugh Dickins   mm: try_to_free_s...
234
235
  	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
  		try_to_free_swap(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
236
237
238
239
240
241
  		unlock_page(page);
  	}
  }
  
  /* 
   * Perform a free_page(), also freeing any swap cache associated with
b8072f099   Hugh Dickins   [PATCH] mm: updat...
242
   * this page if it is the last user of the page.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
243
244
245
246
247
248
249
250
251
252
253
254
255
   */
  void free_page_and_swap_cache(struct page *page)
  {
  	free_swap_cache(page);
  	page_cache_release(page);
  }
  
  /*
   * Passed an array of pages, drop them all from swapcache and then release
   * them.  They are removed from the LRU and freed if this is their last use.
   */
  void free_pages_and_swap_cache(struct page **pages, int nr)
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
256
257
258
259
  	struct page **pagep = pages;
  
  	lru_add_drain();
  	while (nr) {
c484d4104   Hugh Dickins   [PATCH] mm: free_...
260
  		int todo = min(nr, PAGEVEC_SIZE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
  		int i;
  
  		for (i = 0; i < todo; i++)
  			free_swap_cache(pagep[i]);
  		release_pages(pagep, todo, 0);
  		pagep += todo;
  		nr -= todo;
  	}
  }
  
  /*
   * Lookup a swap entry in the swap cache. A found page will be returned
   * unlocked and with its refcount incremented - we rely on the kernel
   * lock getting page table operations atomic even if we drop the page
   * lock before returning.
   */
  struct page * lookup_swap_cache(swp_entry_t entry)
  {
  	struct page *page;
33806f06d   Shaohua Li   swap: make each s...
280
  	page = find_get_page(swap_address_space(entry), entry.val);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
281

579f82901   Shaohua Li   swap: add a simpl...
282
  	if (page) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
283
  		INC_CACHE_INFO(find_success);
579f82901   Shaohua Li   swap: add a simpl...
284
285
286
  		if (TestClearPageReadahead(page))
  			atomic_inc(&swapin_readahead_hits);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
287
288
289
290
291
292
293
294
295
296
297
  
  	INC_CACHE_INFO(find_total);
  	return page;
  }
  
  /* 
   * Locate a page of swap in physical memory, reserving swap cache space
   * and reading the disk if it is not already cached.
   * A failure return means that either the page allocation failed or that
   * the swap entry is no longer in use.
   */
02098feaa   Hugh Dickins   swapin needs gfp_...
298
  struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
299
300
301
302
303
304
305
306
307
308
309
  			struct vm_area_struct *vma, unsigned long addr)
  {
  	struct page *found_page, *new_page = NULL;
  	int err;
  
  	do {
  		/*
  		 * First check the swap cache.  Since this is normally
  		 * called after lookup_swap_cache() failed, re-calling
  		 * that would confuse statistics.
  		 */
33806f06d   Shaohua Li   swap: make each s...
310
311
  		found_page = find_get_page(swap_address_space(entry),
  					entry.val);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
312
313
314
315
316
317
318
  		if (found_page)
  			break;
  
  		/*
  		 * Get a new page to read into from swap.
  		 */
  		if (!new_page) {
02098feaa   Hugh Dickins   swapin needs gfp_...
319
  			new_page = alloc_page_vma(gfp_mask, vma, addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
320
321
322
323
324
  			if (!new_page)
  				break;		/* Out of memory */
  		}
  
  		/*
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
325
326
  		 * call radix_tree_preload() while we can wait.
  		 */
5e4c0d974   Jan Kara   lib/radix-tree.c:...
327
  		err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
328
329
330
331
  		if (err)
  			break;
  
  		/*
f000944d0   Hugh Dickins   tmpfs: shuffle ad...
332
333
  		 * Swap entry may have been freed since our caller observed it.
  		 */
355cfa73d   KAMEZAWA Hiroyuki   mm: modify swap_m...
334
  		err = swapcache_prepare(entry);
cbab0e4ee   Rafael Aquini   swap: avoid read_...
335
  		if (err == -EEXIST) {
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
336
  			radix_tree_preload_end();
cbab0e4ee   Rafael Aquini   swap: avoid read_...
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
  			/*
  			 * We might race against get_swap_page() and stumble
  			 * across a SWAP_HAS_CACHE swap_map entry whose page
  			 * has not been brought into the swapcache yet, while
  			 * the other end is scheduled away waiting on discard
  			 * I/O completion at scan_swap_map().
  			 *
  			 * In order to avoid turning this transitory state
  			 * into a permanent loop around this -EEXIST case
  			 * if !CONFIG_PREEMPT and the I/O completion happens
  			 * to be waiting on the CPU waitqueue where we are now
  			 * busy looping, we just conditionally invoke the
  			 * scheduler here, if there are some more important
  			 * tasks to run.
  			 */
  			cond_resched();
355cfa73d   KAMEZAWA Hiroyuki   mm: modify swap_m...
353
  			continue;
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
354
355
356
  		}
  		if (err) {		/* swp entry is obsolete ? */
  			radix_tree_preload_end();
f000944d0   Hugh Dickins   tmpfs: shuffle ad...
357
  			break;
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
358
  		}
f000944d0   Hugh Dickins   tmpfs: shuffle ad...
359

2ca4532a4   Daisuke Nishimura   mm: add_to_swap_c...
360
  		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
f45840b5c   Nick Piggin   mm: pagecache ins...
361
  		__set_page_locked(new_page);
b2e185384   Rik van Riel   define page_file_...
362
  		SetPageSwapBacked(new_page);
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
363
  		err = __add_to_swap_cache(new_page, entry);
529ae9aaa   Nick Piggin   mm: rename page t...
364
  		if (likely(!err)) {
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
365
  			radix_tree_preload_end();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
366
367
368
  			/*
  			 * Initiate read into locked page and return.
  			 */
c5fdae469   Rik van Riel   vmscan: add newly...
369
  			lru_cache_add_anon(new_page);
aca8bf323   Minchan Kim   mm: remove file a...
370
  			swap_readpage(new_page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
371
372
  			return new_page;
  		}
31a563962   Daisuke Nishimura   mm: add_to_swap_c...
373
  		radix_tree_preload_end();
b2e185384   Rik van Riel   define page_file_...
374
  		ClearPageSwapBacked(new_page);
f45840b5c   Nick Piggin   mm: pagecache ins...
375
  		__clear_page_locked(new_page);
2ca4532a4   Daisuke Nishimura   mm: add_to_swap_c...
376
377
378
379
  		/*
  		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
  		 * clear SWAP_HAS_CACHE flag.
  		 */
cb4b86ba4   KAMEZAWA Hiroyuki   mm: add swap cach...
380
  		swapcache_free(entry, NULL);
f000944d0   Hugh Dickins   tmpfs: shuffle ad...
381
  	} while (err != -ENOMEM);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
382
383
384
385
386
  
  	if (new_page)
  		page_cache_release(new_page);
  	return found_page;
  }
46017e954   Hugh Dickins   swapin_readahead:...
387

579f82901   Shaohua Li   swap: add a simpl...
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
  static unsigned long swapin_nr_pages(unsigned long offset)
  {
  	static unsigned long prev_offset;
  	unsigned int pages, max_pages, last_ra;
  	static atomic_t last_readahead_pages;
  
  	max_pages = 1 << ACCESS_ONCE(page_cluster);
  	if (max_pages <= 1)
  		return 1;
  
  	/*
  	 * This heuristic has been found to work well on both sequential and
  	 * random loads, swapping to hard disk or to SSD: please don't ask
  	 * what the "+ 2" means, it just happens to work well, that's all.
  	 */
  	pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
  	if (pages == 2) {
  		/*
  		 * We can have no readahead hits to judge by: but must not get
  		 * stuck here forever, so check for an adjacent offset instead
  		 * (and don't even bother to check whether swap type is same).
  		 */
  		if (offset != prev_offset + 1 && offset != prev_offset - 1)
  			pages = 1;
  		prev_offset = offset;
  	} else {
  		unsigned int roundup = 4;
  		while (roundup < pages)
  			roundup <<= 1;
  		pages = roundup;
  	}
  
  	if (pages > max_pages)
  		pages = max_pages;
  
  	/* Don't shrink readahead too fast */
  	last_ra = atomic_read(&last_readahead_pages) / 2;
  	if (pages < last_ra)
  		pages = last_ra;
  	atomic_set(&last_readahead_pages, pages);
  
  	return pages;
  }
46017e954   Hugh Dickins   swapin_readahead:...
431
432
433
  /**
   * swapin_readahead - swap in pages in hope we need them soon
   * @entry: swap entry of this memory
7682486b3   Randy Dunlap   mm: fix various k...
434
   * @gfp_mask: memory allocation flags
46017e954   Hugh Dickins   swapin_readahead:...
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
   * @vma: user vma this address belongs to
   * @addr: target address for mempolicy
   *
   * Returns the struct page for entry and addr, after queueing swapin.
   *
   * Primitive swap readahead code. We simply read an aligned block of
   * (1 << page_cluster) entries in the swap area. This method is chosen
   * because it doesn't cost us any seek time.  We also make sure to queue
   * the 'original' request together with the readahead ones...
   *
   * This has been extended to use the NUMA policies from the mm triggering
   * the readahead.
   *
   * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
   */
02098feaa   Hugh Dickins   swapin needs gfp_...
450
  struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
46017e954   Hugh Dickins   swapin_readahead:...
451
452
  			struct vm_area_struct *vma, unsigned long addr)
  {
46017e954   Hugh Dickins   swapin_readahead:...
453
  	struct page *page;
579f82901   Shaohua Li   swap: add a simpl...
454
455
  	unsigned long entry_offset = swp_offset(entry);
  	unsigned long offset = entry_offset;
67f96aa25   Rik van Riel   mm: make swapin r...
456
  	unsigned long start_offset, end_offset;
579f82901   Shaohua Li   swap: add a simpl...
457
  	unsigned long mask;
3fb5c298b   Christian Ehrhardt   swap: allow swap ...
458
  	struct blk_plug plug;
46017e954   Hugh Dickins   swapin_readahead:...
459

579f82901   Shaohua Li   swap: add a simpl...
460
461
462
  	mask = swapin_nr_pages(offset) - 1;
  	if (!mask)
  		goto skip;
67f96aa25   Rik van Riel   mm: make swapin r...
463
464
465
466
467
  	/* Read a page_cluster sized and aligned cluster around offset. */
  	start_offset = offset & ~mask;
  	end_offset = offset | mask;
  	if (!start_offset)	/* First page is swap header. */
  		start_offset++;
3fb5c298b   Christian Ehrhardt   swap: allow swap ...
468
  	blk_start_plug(&plug);
67f96aa25   Rik van Riel   mm: make swapin r...
469
  	for (offset = start_offset; offset <= end_offset ; offset++) {
46017e954   Hugh Dickins   swapin_readahead:...
470
471
  		/* Ok, do the async read-ahead now */
  		page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
02098feaa   Hugh Dickins   swapin needs gfp_...
472
  						gfp_mask, vma, addr);
46017e954   Hugh Dickins   swapin_readahead:...
473
  		if (!page)
67f96aa25   Rik van Riel   mm: make swapin r...
474
  			continue;
579f82901   Shaohua Li   swap: add a simpl...
475
476
  		if (offset != entry_offset)
  			SetPageReadahead(page);
46017e954   Hugh Dickins   swapin_readahead:...
477
478
  		page_cache_release(page);
  	}
3fb5c298b   Christian Ehrhardt   swap: allow swap ...
479
  	blk_finish_plug(&plug);
46017e954   Hugh Dickins   swapin_readahead:...
480
  	lru_add_drain();	/* Push any new pages onto the LRU now */
579f82901   Shaohua Li   swap: add a simpl...
481
  skip:
02098feaa   Hugh Dickins   swapin needs gfp_...
482
  	return read_swap_cache_async(entry, gfp_mask, vma, addr);
46017e954   Hugh Dickins   swapin_readahead:...
483
  }