Blame view

mm/page_isolation.c 3.7 KB
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
1
2
3
  /*
   * linux/mm/page_isolation.c
   */
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
  #include <linux/mm.h>
  #include <linux/page-isolation.h>
  #include <linux/pageblock-flags.h>
  #include "internal.h"
  
  static inline struct page *
  __first_valid_page(unsigned long pfn, unsigned long nr_pages)
  {
  	int i;
  	for (i = 0; i < nr_pages; i++)
  		if (pfn_valid_within(pfn + i))
  			break;
  	if (unlikely(i == nr_pages))
  		return NULL;
  	return pfn_to_page(pfn + i);
  }
  
  /*
   * start_isolate_page_range() -- make page-allocation-type of range of pages
   * to be MIGRATE_ISOLATE.
   * @start_pfn: The lower PFN of the range to be isolated.
   * @end_pfn: The upper PFN of the range to be isolated.
0815f3d81   Michal Nazarewicz   mm: page_isolatio...
26
   * @migratetype: migrate type to set in error recovery.
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
27
28
29
30
31
32
33
34
   *
   * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
   * the range will never be allocated. Any free pages and pages freed in the
   * future will not be allocated again.
   *
   * start_pfn/end_pfn must be aligned to pageblock_order.
   * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
   */
0815f3d81   Michal Nazarewicz   mm: page_isolatio...
35
36
  int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
  			     unsigned migratetype)
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
  {
  	unsigned long pfn;
  	unsigned long undo_pfn;
  	struct page *page;
  
  	BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
  	BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
  
  	for (pfn = start_pfn;
  	     pfn < end_pfn;
  	     pfn += pageblock_nr_pages) {
  		page = __first_valid_page(pfn, pageblock_nr_pages);
  		if (page && set_migratetype_isolate(page)) {
  			undo_pfn = pfn;
  			goto undo;
  		}
  	}
  	return 0;
  undo:
  	for (pfn = start_pfn;
dbc0e4cef   KAMEZAWA Hiroyuki   memory hotremove:...
57
  	     pfn < undo_pfn;
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
58
  	     pfn += pageblock_nr_pages)
0815f3d81   Michal Nazarewicz   mm: page_isolatio...
59
  		unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
60
61
62
63
64
65
66
  
  	return -EBUSY;
  }
  
  /*
   * Make isolated pages available again.
   */
0815f3d81   Michal Nazarewicz   mm: page_isolatio...
67
68
  int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
  			    unsigned migratetype)
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
69
70
71
72
73
74
75
76
77
  {
  	unsigned long pfn;
  	struct page *page;
  	BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
  	BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
  	for (pfn = start_pfn;
  	     pfn < end_pfn;
  	     pfn += pageblock_nr_pages) {
  		page = __first_valid_page(pfn, pageblock_nr_pages);
dbc0e4cef   KAMEZAWA Hiroyuki   memory hotremove:...
78
  		if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
79
  			continue;
0815f3d81   Michal Nazarewicz   mm: page_isolatio...
80
  		unset_migratetype_isolate(page, migratetype);
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
81
82
83
84
85
86
87
88
  	}
  	return 0;
  }
  /*
   * Test all pages in the range is free(means isolated) or not.
   * all pages in [start_pfn...end_pfn) must be in the same zone.
   * zone->lock must be held before call this.
   *
0815f3d81   Michal Nazarewicz   mm: page_isolatio...
89
   * Returns 1 if all pages in the range are isolated.
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
   */
  static int
  __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
  {
  	struct page *page;
  
  	while (pfn < end_pfn) {
  		if (!pfn_valid_within(pfn)) {
  			pfn++;
  			continue;
  		}
  		page = pfn_to_page(pfn);
  		if (PageBuddy(page))
  			pfn += 1 << page_order(page);
  		else if (page_count(page) == 0 &&
  				page_private(page) == MIGRATE_ISOLATE)
  			pfn += 1;
  		else
  			break;
  	}
  	if (pfn < end_pfn)
  		return 0;
  	return 1;
  }
  
  int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
  {
6c1b7f680   Gerald Schaefer   memory hotplug: m...
117
  	unsigned long pfn, flags;
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
118
  	struct page *page;
6c1b7f680   Gerald Schaefer   memory hotplug: m...
119
120
  	struct zone *zone;
  	int ret;
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
121

a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
122
123
124
125
126
127
128
  	/*
  	 * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page
  	 * is not aligned to pageblock_nr_pages.
  	 * Then we just check pagetype fist.
  	 */
  	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  		page = __first_valid_page(pfn, pageblock_nr_pages);
dbc0e4cef   KAMEZAWA Hiroyuki   memory hotremove:...
129
  		if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
130
131
  			break;
  	}
a70dcb969   Gerald Schaefer   memory hotplug: f...
132
133
  	page = __first_valid_page(start_pfn, end_pfn - start_pfn);
  	if ((pfn < end_pfn) || !page)
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
134
135
  		return -EBUSY;
  	/* Check all pages are free or Marked as ISOLATED */
a70dcb969   Gerald Schaefer   memory hotplug: f...
136
  	zone = page_zone(page);
6c1b7f680   Gerald Schaefer   memory hotplug: m...
137
138
139
140
  	spin_lock_irqsave(&zone->lock, flags);
  	ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn);
  	spin_unlock_irqrestore(&zone->lock, flags);
  	return ret ? 0 : -EBUSY;
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
141
  }