Blame view

mm/swap.c 12.9 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
  /*
   *  linux/mm/swap.c
   *
   *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   */
  
  /*
183ff22bb   Simon Arlott   spelling fixes: mm/
8
   * This file contains the default values for the operation of the
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
   * Linux VM subsystem. Fine-tuning documentation can be found in
   * Documentation/sysctl/vm.txt.
   * Started 18.12.91
   * Swap aging added 23.2.95, Stephen Tweedie.
   * Buffermem limits added 12.3.98, Rik van Riel.
   */
  
  #include <linux/mm.h>
  #include <linux/sched.h>
  #include <linux/kernel_stat.h>
  #include <linux/swap.h>
  #include <linux/mman.h>
  #include <linux/pagemap.h>
  #include <linux/pagevec.h>
  #include <linux/init.h>
  #include <linux/module.h>
  #include <linux/mm_inline.h>
  #include <linux/buffer_head.h>	/* for try_to_release_page() */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
27
28
29
30
  #include <linux/percpu_counter.h>
  #include <linux/percpu.h>
  #include <linux/cpu.h>
  #include <linux/notifier.h>
e0bf68dde   Peter Zijlstra   mm: bdi init hooks
31
  #include <linux/backing-dev.h>
66e1707bc   Balbir Singh   Memory controller...
32
  #include <linux/memcontrol.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
33

64d6519dd   Lee Schermerhorn   swap: cull unevic...
34
  #include "internal.h"
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
35
36
  /* How many pages do we try to swap or page in/out together? */
  int page_cluster;
f04e9ebbe   KOSAKI Motohiro   swap: use an arra...
37
  static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
f84f9504b   Vegard Nossum   mm: remove initia...
38
  static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
902aaed0d   Hisashi Hifumi   mm: use pagevec t...
39

b221385bc   Adrian Bunk   [PATCH] mm/: make...
40
41
42
43
  /*
   * This path almost never happens for VM activity - pages are normally
   * freed via pagevecs.  But it gets used by networking.
   */
920c7a5d0   Harvey Harrison   mm: remove fastca...
44
  static void __page_cache_release(struct page *page)
b221385bc   Adrian Bunk   [PATCH] mm/: make...
45
46
47
48
49
50
51
52
53
54
55
56
57
  {
  	if (PageLRU(page)) {
  		unsigned long flags;
  		struct zone *zone = page_zone(page);
  
  		spin_lock_irqsave(&zone->lru_lock, flags);
  		VM_BUG_ON(!PageLRU(page));
  		__ClearPageLRU(page);
  		del_page_from_lru(zone, page);
  		spin_unlock_irqrestore(&zone->lru_lock, flags);
  	}
  	free_hot_page(page);
  }
8519fb30e   Nick Piggin   [PATCH] mm: compo...
58
  static void put_compound_page(struct page *page)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
59
  {
d85f33855   Christoph Lameter   Make page->privat...
60
  	page = compound_head(page);
8519fb30e   Nick Piggin   [PATCH] mm: compo...
61
  	if (put_page_testzero(page)) {
33f2ef89f   Andy Whitcroft   [PATCH] mm: make ...
62
  		compound_page_dtor *dtor;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
63

33f2ef89f   Andy Whitcroft   [PATCH] mm: make ...
64
  		dtor = get_compound_page_dtor(page);
8519fb30e   Nick Piggin   [PATCH] mm: compo...
65
  		(*dtor)(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
66
  	}
8519fb30e   Nick Piggin   [PATCH] mm: compo...
67
68
69
70
71
72
73
  }
  
  void put_page(struct page *page)
  {
  	if (unlikely(PageCompound(page)))
  		put_compound_page(page);
  	else if (put_page_testzero(page))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
74
75
76
  		__page_cache_release(page);
  }
  EXPORT_SYMBOL(put_page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
77

1d7ea7324   Alexander Zarochentsev   [PATCH] fuse: fix...
78
  /**
7682486b3   Randy Dunlap   mm: fix various k...
79
80
   * put_pages_list() - release a list of pages
   * @pages: list of pages threaded on page->lru
1d7ea7324   Alexander Zarochentsev   [PATCH] fuse: fix...
81
82
83
   *
   * Release a list of pages which are strung together on page.lru.  Currently
   * used by read_cache_pages() and related error recovery code.
1d7ea7324   Alexander Zarochentsev   [PATCH] fuse: fix...
84
85
86
87
88
89
90
91
92
93
94
95
   */
  void put_pages_list(struct list_head *pages)
  {
  	while (!list_empty(pages)) {
  		struct page *victim;
  
  		victim = list_entry(pages->prev, struct page, lru);
  		list_del(&victim->lru);
  		page_cache_release(victim);
  	}
  }
  EXPORT_SYMBOL(put_pages_list);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
96
  /*
902aaed0d   Hisashi Hifumi   mm: use pagevec t...
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
   * pagevec_move_tail() must be called with IRQ disabled.
   * Otherwise this may cause nasty races.
   */
  static void pagevec_move_tail(struct pagevec *pvec)
  {
  	int i;
  	int pgmoved = 0;
  	struct zone *zone = NULL;
  
  	for (i = 0; i < pagevec_count(pvec); i++) {
  		struct page *page = pvec->pages[i];
  		struct zone *pagezone = page_zone(page);
  
  		if (pagezone != zone) {
  			if (zone)
  				spin_unlock(&zone->lru_lock);
  			zone = pagezone;
  			spin_lock(&zone->lru_lock);
  		}
894bc3104   Lee Schermerhorn   Unevictable LRU I...
116
  		if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
401a8e1c1   Johannes Weiner   mm: introduce pag...
117
  			int lru = page_lru_base_type(page);
4f98a2fee   Rik van Riel   vmscan: split LRU...
118
  			list_move_tail(&page->lru, &zone->lru[lru].list);
902aaed0d   Hisashi Hifumi   mm: use pagevec t...
119
120
121
122
123
124
125
126
127
128
129
  			pgmoved++;
  		}
  	}
  	if (zone)
  		spin_unlock(&zone->lru_lock);
  	__count_vm_events(PGROTATED, pgmoved);
  	release_pages(pvec->pages, pvec->nr, pvec->cold);
  	pagevec_reinit(pvec);
  }
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
130
131
   * Writeback is about to end against a page which has been marked for immediate
   * reclaim.  If it still appears to be reclaimable, move it to the tail of the
902aaed0d   Hisashi Hifumi   mm: use pagevec t...
132
   * inactive list.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
133
   */
ac6aadb24   Miklos Szeredi   mm: rotate_reclai...
134
  void  rotate_reclaimable_page(struct page *page)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
135
  {
ac6aadb24   Miklos Szeredi   mm: rotate_reclai...
136
  	if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
894bc3104   Lee Schermerhorn   Unevictable LRU I...
137
  	    !PageUnevictable(page) && PageLRU(page)) {
ac6aadb24   Miklos Szeredi   mm: rotate_reclai...
138
139
140
141
142
143
144
145
146
147
  		struct pagevec *pvec;
  		unsigned long flags;
  
  		page_cache_get(page);
  		local_irq_save(flags);
  		pvec = &__get_cpu_var(lru_rotate_pvecs);
  		if (!pagevec_add(pvec, page))
  			pagevec_move_tail(pvec);
  		local_irq_restore(flags);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
148
  }
3e2f41f1f   KOSAKI Motohiro   memcg: add zone_r...
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
  static void update_page_reclaim_stat(struct zone *zone, struct page *page,
  				     int file, int rotated)
  {
  	struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
  	struct zone_reclaim_stat *memcg_reclaim_stat;
  
  	memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
  
  	reclaim_stat->recent_scanned[file]++;
  	if (rotated)
  		reclaim_stat->recent_rotated[file]++;
  
  	if (!memcg_reclaim_stat)
  		return;
  
  	memcg_reclaim_stat->recent_scanned[file]++;
  	if (rotated)
  		memcg_reclaim_stat->recent_rotated[file]++;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
168
169
170
  /*
   * FIXME: speed this up?
   */
920c7a5d0   Harvey Harrison   mm: remove fastca...
171
  void activate_page(struct page *page)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
172
173
174
175
  {
  	struct zone *zone = page_zone(page);
  
  	spin_lock_irq(&zone->lru_lock);
894bc3104   Lee Schermerhorn   Unevictable LRU I...
176
  	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
4f98a2fee   Rik van Riel   vmscan: split LRU...
177
  		int file = page_is_file_cache(page);
401a8e1c1   Johannes Weiner   mm: introduce pag...
178
  		int lru = page_lru_base_type(page);
4f98a2fee   Rik van Riel   vmscan: split LRU...
179
  		del_page_from_lru_list(zone, page, lru);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
180
  		SetPageActive(page);
4f98a2fee   Rik van Riel   vmscan: split LRU...
181
182
  		lru += LRU_ACTIVE;
  		add_page_to_lru_list(zone, page, lru);
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
183
  		__count_vm_event(PGACTIVATE);
4f98a2fee   Rik van Riel   vmscan: split LRU...
184

6c0b13519   Johannes Weiner   mm: return boolea...
185
  		update_page_reclaim_stat(zone, page, file, 1);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
186
187
188
189
190
191
192
193
194
195
196
  	}
  	spin_unlock_irq(&zone->lru_lock);
  }
  
  /*
   * Mark a page as having seen activity.
   *
   * inactive,unreferenced	->	inactive,referenced
   * inactive,referenced		->	active,unreferenced
   * active,unreferenced		->	active,referenced
   */
920c7a5d0   Harvey Harrison   mm: remove fastca...
197
  void mark_page_accessed(struct page *page)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
198
  {
894bc3104   Lee Schermerhorn   Unevictable LRU I...
199
200
  	if (!PageActive(page) && !PageUnevictable(page) &&
  			PageReferenced(page) && PageLRU(page)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
201
202
203
204
205
206
207
208
  		activate_page(page);
  		ClearPageReferenced(page);
  	} else if (!PageReferenced(page)) {
  		SetPageReferenced(page);
  	}
  }
  
  EXPORT_SYMBOL(mark_page_accessed);
f04e9ebbe   KOSAKI Motohiro   swap: use an arra...
209
  void __lru_cache_add(struct page *page, enum lru_list lru)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
210
  {
f04e9ebbe   KOSAKI Motohiro   swap: use an arra...
211
  	struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
212
213
214
  
  	page_cache_get(page);
  	if (!pagevec_add(pvec, page))
f04e9ebbe   KOSAKI Motohiro   swap: use an arra...
215
  		____pagevec_lru_add(pvec, lru);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
216
217
  	put_cpu_var(lru_add_pvecs);
  }
f04e9ebbe   KOSAKI Motohiro   swap: use an arra...
218
219
220
221
222
223
  /**
   * lru_cache_add_lru - add a page to a page list
   * @page: the page to be added to the LRU.
   * @lru: the LRU list to which the page is added.
   */
  void lru_cache_add_lru(struct page *page, enum lru_list lru)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
224
  {
f04e9ebbe   KOSAKI Motohiro   swap: use an arra...
225
  	if (PageActive(page)) {
894bc3104   Lee Schermerhorn   Unevictable LRU I...
226
  		VM_BUG_ON(PageUnevictable(page));
f04e9ebbe   KOSAKI Motohiro   swap: use an arra...
227
  		ClearPageActive(page);
894bc3104   Lee Schermerhorn   Unevictable LRU I...
228
229
230
  	} else if (PageUnevictable(page)) {
  		VM_BUG_ON(PageActive(page));
  		ClearPageUnevictable(page);
f04e9ebbe   KOSAKI Motohiro   swap: use an arra...
231
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
232

894bc3104   Lee Schermerhorn   Unevictable LRU I...
233
  	VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
f04e9ebbe   KOSAKI Motohiro   swap: use an arra...
234
  	__lru_cache_add(page, lru);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
235
  }
894bc3104   Lee Schermerhorn   Unevictable LRU I...
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
  /**
   * add_page_to_unevictable_list - add a page to the unevictable list
   * @page:  the page to be added to the unevictable list
   *
   * Add page directly to its zone's unevictable list.  To avoid races with
   * tasks that might be making the page evictable, through eg. munlock,
   * munmap or exit, while it's not on the lru, we want to add the page
   * while it's locked or otherwise "invisible" to other tasks.  This is
   * difficult to do when using the pagevec cache, so bypass that.
   */
  void add_page_to_unevictable_list(struct page *page)
  {
  	struct zone *zone = page_zone(page);
  
  	spin_lock_irq(&zone->lru_lock);
  	SetPageUnevictable(page);
  	SetPageLRU(page);
  	add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
  	spin_unlock_irq(&zone->lru_lock);
  }
902aaed0d   Hisashi Hifumi   mm: use pagevec t...
256
257
258
259
260
261
  /*
   * Drain pages out of the cpu's pagevecs.
   * Either "cpu" is the current CPU, and preemption has already been
   * disabled; or "cpu" is being hot-unplugged, and is already dead.
   */
  static void drain_cpu_pagevecs(int cpu)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
262
  {
f04e9ebbe   KOSAKI Motohiro   swap: use an arra...
263
  	struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
902aaed0d   Hisashi Hifumi   mm: use pagevec t...
264
  	struct pagevec *pvec;
f04e9ebbe   KOSAKI Motohiro   swap: use an arra...
265
  	int lru;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
266

f04e9ebbe   KOSAKI Motohiro   swap: use an arra...
267
268
269
270
271
  	for_each_lru(lru) {
  		pvec = &pvecs[lru - LRU_BASE];
  		if (pagevec_count(pvec))
  			____pagevec_lru_add(pvec, lru);
  	}
902aaed0d   Hisashi Hifumi   mm: use pagevec t...
272
273
274
275
276
277
278
279
280
281
  
  	pvec = &per_cpu(lru_rotate_pvecs, cpu);
  	if (pagevec_count(pvec)) {
  		unsigned long flags;
  
  		/* No harm done if a racing interrupt already did this */
  		local_irq_save(flags);
  		pagevec_move_tail(pvec);
  		local_irq_restore(flags);
  	}
80bfed904   Andrew Morton   [PATCH] consolida...
282
283
284
285
  }
  
  void lru_add_drain(void)
  {
902aaed0d   Hisashi Hifumi   mm: use pagevec t...
286
  	drain_cpu_pagevecs(get_cpu());
80bfed904   Andrew Morton   [PATCH] consolida...
287
  	put_cpu();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
288
  }
c4028958b   David Howells   WorkStruct: make ...
289
  static void lru_add_drain_per_cpu(struct work_struct *dummy)
053837fce   Nick Piggin   [PATCH] mm: migra...
290
291
292
293
294
295
296
297
298
  {
  	lru_add_drain();
  }
  
  /*
   * Returns 0 for success
   */
  int lru_add_drain_all(void)
  {
c4028958b   David Howells   WorkStruct: make ...
299
  	return schedule_on_each_cpu(lru_add_drain_per_cpu);
053837fce   Nick Piggin   [PATCH] mm: migra...
300
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
301
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
302
303
304
305
306
307
308
   * Batched page_cache_release().  Decrement the reference count on all the
   * passed pages.  If it fell to zero then remove the page from the LRU and
   * free it.
   *
   * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
   * for the remainder of the operation.
   *
ab33dc09a   Fernando Luis Vazquez Cao   swap: update func...
309
310
311
312
   * The locking in this function is against shrink_inactive_list(): we recheck
   * the page count inside the lock to see whether shrink_inactive_list()
   * grabbed the page via the LRU.  If it did, give up: shrink_inactive_list()
   * will free it.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
313
314
315
316
317
318
   */
  void release_pages(struct page **pages, int nr, int cold)
  {
  	int i;
  	struct pagevec pages_to_free;
  	struct zone *zone = NULL;
902aaed0d   Hisashi Hifumi   mm: use pagevec t...
319
  	unsigned long uninitialized_var(flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
320
321
322
323
  
  	pagevec_init(&pages_to_free, cold);
  	for (i = 0; i < nr; i++) {
  		struct page *page = pages[i];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
324

8519fb30e   Nick Piggin   [PATCH] mm: compo...
325
326
  		if (unlikely(PageCompound(page))) {
  			if (zone) {
902aaed0d   Hisashi Hifumi   mm: use pagevec t...
327
  				spin_unlock_irqrestore(&zone->lru_lock, flags);
8519fb30e   Nick Piggin   [PATCH] mm: compo...
328
329
330
331
332
  				zone = NULL;
  			}
  			put_compound_page(page);
  			continue;
  		}
b5810039a   Nick Piggin   [PATCH] core remo...
333
  		if (!put_page_testzero(page))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
334
  			continue;
46453a6e1   Nick Piggin   [PATCH] mm: never...
335
336
  		if (PageLRU(page)) {
  			struct zone *pagezone = page_zone(page);
894bc3104   Lee Schermerhorn   Unevictable LRU I...
337

46453a6e1   Nick Piggin   [PATCH] mm: never...
338
339
  			if (pagezone != zone) {
  				if (zone)
902aaed0d   Hisashi Hifumi   mm: use pagevec t...
340
341
  					spin_unlock_irqrestore(&zone->lru_lock,
  									flags);
46453a6e1   Nick Piggin   [PATCH] mm: never...
342
  				zone = pagezone;
902aaed0d   Hisashi Hifumi   mm: use pagevec t...
343
  				spin_lock_irqsave(&zone->lru_lock, flags);
46453a6e1   Nick Piggin   [PATCH] mm: never...
344
  			}
725d704ec   Nick Piggin   [PATCH] mm: VM_BU...
345
  			VM_BUG_ON(!PageLRU(page));
674539115   Nick Piggin   [PATCH] mm: less ...
346
  			__ClearPageLRU(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
347
  			del_page_from_lru(zone, page);
46453a6e1   Nick Piggin   [PATCH] mm: never...
348
349
350
351
  		}
  
  		if (!pagevec_add(&pages_to_free, page)) {
  			if (zone) {
902aaed0d   Hisashi Hifumi   mm: use pagevec t...
352
  				spin_unlock_irqrestore(&zone->lru_lock, flags);
46453a6e1   Nick Piggin   [PATCH] mm: never...
353
  				zone = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
354
  			}
46453a6e1   Nick Piggin   [PATCH] mm: never...
355
356
357
  			__pagevec_free(&pages_to_free);
  			pagevec_reinit(&pages_to_free);
    		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
358
359
  	}
  	if (zone)
902aaed0d   Hisashi Hifumi   mm: use pagevec t...
360
  		spin_unlock_irqrestore(&zone->lru_lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
  
  	pagevec_free(&pages_to_free);
  }
  
  /*
   * The pages which we're about to release may be in the deferred lru-addition
   * queues.  That would prevent them from really being freed right now.  That's
   * OK from a correctness point of view but is inefficient - those pages may be
   * cache-warm and we want to give them back to the page allocator ASAP.
   *
   * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
   * and __pagevec_lru_add_active() call release_pages() directly to avoid
   * mutual recursion.
   */
  void __pagevec_release(struct pagevec *pvec)
  {
  	lru_add_drain();
  	release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
  	pagevec_reinit(pvec);
  }
7f2857018   Steve French   Export __pagevec_...
381
  EXPORT_SYMBOL(__pagevec_release);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
382
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
383
384
385
   * Add the passed pages to the LRU, then drop the caller's refcount
   * on them.  Reinitialises the caller's pagevec.
   */
f04e9ebbe   KOSAKI Motohiro   swap: use an arra...
386
  void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
387
388
389
  {
  	int i;
  	struct zone *zone = NULL;
6e9015716   KOSAKI Motohiro   mm: introduce zon...
390

894bc3104   Lee Schermerhorn   Unevictable LRU I...
391
  	VM_BUG_ON(is_unevictable_lru(lru));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
392
393
394
395
  
  	for (i = 0; i < pagevec_count(pvec); i++) {
  		struct page *page = pvec->pages[i];
  		struct zone *pagezone = page_zone(page);
9ff473b9a   Rik van Riel   vmscan: evict str...
396
  		int file;
3e2f41f1f   KOSAKI Motohiro   memcg: add zone_r...
397
  		int active;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
398
399
400
401
402
403
404
  
  		if (pagezone != zone) {
  			if (zone)
  				spin_unlock_irq(&zone->lru_lock);
  			zone = pagezone;
  			spin_lock_irq(&zone->lru_lock);
  		}
894bc3104   Lee Schermerhorn   Unevictable LRU I...
405
406
  		VM_BUG_ON(PageActive(page));
  		VM_BUG_ON(PageUnevictable(page));
725d704ec   Nick Piggin   [PATCH] mm: VM_BU...
407
  		VM_BUG_ON(PageLRU(page));
8d438f96d   Nick Piggin   [PATCH] mm: PageL...
408
  		SetPageLRU(page);
3e2f41f1f   KOSAKI Motohiro   memcg: add zone_r...
409
  		active = is_active_lru(lru);
9ff473b9a   Rik van Riel   vmscan: evict str...
410
  		file = is_file_lru(lru);
3e2f41f1f   KOSAKI Motohiro   memcg: add zone_r...
411
  		if (active)
f04e9ebbe   KOSAKI Motohiro   swap: use an arra...
412
  			SetPageActive(page);
3e2f41f1f   KOSAKI Motohiro   memcg: add zone_r...
413
  		update_page_reclaim_stat(zone, page, file, active);
f04e9ebbe   KOSAKI Motohiro   swap: use an arra...
414
  		add_page_to_lru_list(zone, page, lru);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
415
416
417
418
419
420
  	}
  	if (zone)
  		spin_unlock_irq(&zone->lru_lock);
  	release_pages(pvec->pages, pvec->nr, pvec->cold);
  	pagevec_reinit(pvec);
  }
f04e9ebbe   KOSAKI Motohiro   swap: use an arra...
421
  EXPORT_SYMBOL(____pagevec_lru_add);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
422
423
424
425
426
427
428
429
430
431
  
  /*
   * Try to drop buffers from the pages in a pagevec
   */
  void pagevec_strip(struct pagevec *pvec)
  {
  	int i;
  
  	for (i = 0; i < pagevec_count(pvec); i++) {
  		struct page *page = pvec->pages[i];
266cf658e   David Howells   FS-Cache: Recruit...
432
433
  		if (page_has_private(page) && trylock_page(page)) {
  			if (page_has_private(page))
5b40dc780   Christoph Lameter   [PATCH] fix race ...
434
  				try_to_release_page(page, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
  			unlock_page(page);
  		}
  	}
  }
  
  /**
   * pagevec_lookup - gang pagecache lookup
   * @pvec:	Where the resulting pages are placed
   * @mapping:	The address_space to search
   * @start:	The starting page index
   * @nr_pages:	The maximum number of pages
   *
   * pagevec_lookup() will search for and return a group of up to @nr_pages pages
   * in the mapping.  The pages are placed in @pvec.  pagevec_lookup() takes a
   * reference against the pages in @pvec.
   *
   * The search returns a group of mapping-contiguous pages with ascending
   * indexes.  There may be holes in the indices due to not-present pages.
   *
   * pagevec_lookup() returns the number of pages which were found.
   */
  unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
  		pgoff_t start, unsigned nr_pages)
  {
  	pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
  	return pagevec_count(pvec);
  }
78539fdfa   Christoph Hellwig   [XFS] Export page...
462
  EXPORT_SYMBOL(pagevec_lookup);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
463
464
465
466
467
468
469
  unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
  		pgoff_t *index, int tag, unsigned nr_pages)
  {
  	pvec->nr = find_get_pages_tag(mapping, index, tag,
  					nr_pages, pvec->pages);
  	return pagevec_count(pvec);
  }
7f2857018   Steve French   Export __pagevec_...
470
  EXPORT_SYMBOL(pagevec_lookup_tag);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
471

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
472
473
474
475
476
  /*
   * Perform any setup for the swap system
   */
  void __init swap_setup(void)
  {
4481374ce   Jan Beulich   mm: replace vario...
477
  	unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
478

e0bf68dde   Peter Zijlstra   mm: bdi init hooks
479
480
481
  #ifdef CONFIG_SWAP
  	bdi_init(swapper_space.backing_dev_info);
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
482
483
484
485
486
487
488
489
490
  	/* Use a smaller cluster for small-memory machines */
  	if (megs < 16)
  		page_cluster = 2;
  	else
  		page_cluster = 3;
  	/*
  	 * Right now other parts of the system means that we
  	 * _really_ don't want to cluster much more
  	 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
491
  }