Blame view

mm/page_isolation.c 3.56 KB
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
1
2
3
  /*
   * linux/mm/page_isolation.c
   */
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
  #include <linux/mm.h>
  #include <linux/page-isolation.h>
  #include <linux/pageblock-flags.h>
  #include "internal.h"
  
  static inline struct page *
  __first_valid_page(unsigned long pfn, unsigned long nr_pages)
  {
  	int i;
  	for (i = 0; i < nr_pages; i++)
  		if (pfn_valid_within(pfn + i))
  			break;
  	if (unlikely(i == nr_pages))
  		return NULL;
  	return pfn_to_page(pfn + i);
  }
  
  /*
   * start_isolate_page_range() -- make page-allocation-type of range of pages
   * to be MIGRATE_ISOLATE.
   * @start_pfn: The lower PFN of the range to be isolated.
   * @end_pfn: The upper PFN of the range to be isolated.
   *
   * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
   * the range will never be allocated. Any free pages and pages freed in the
   * future will not be allocated again.
   *
   * start_pfn/end_pfn must be aligned to pageblock_order.
   * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
   */
  int
  start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
  {
  	unsigned long pfn;
  	unsigned long undo_pfn;
  	struct page *page;
  
  	BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
  	BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
  
  	for (pfn = start_pfn;
  	     pfn < end_pfn;
  	     pfn += pageblock_nr_pages) {
  		page = __first_valid_page(pfn, pageblock_nr_pages);
  		if (page && set_migratetype_isolate(page)) {
  			undo_pfn = pfn;
  			goto undo;
  		}
  	}
  	return 0;
  undo:
  	for (pfn = start_pfn;
dbc0e4cef   KAMEZAWA Hiroyuki   memory hotremove:...
56
  	     pfn < undo_pfn;
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
  	     pfn += pageblock_nr_pages)
  		unset_migratetype_isolate(pfn_to_page(pfn));
  
  	return -EBUSY;
  }
  
  /*
   * Make isolated pages available again.
   */
  int
  undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
  {
  	unsigned long pfn;
  	struct page *page;
  	BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
  	BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
  	for (pfn = start_pfn;
  	     pfn < end_pfn;
  	     pfn += pageblock_nr_pages) {
  		page = __first_valid_page(pfn, pageblock_nr_pages);
dbc0e4cef   KAMEZAWA Hiroyuki   memory hotremove:...
77
  		if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
78
79
80
81
82
83
84
85
86
87
  			continue;
  		unset_migratetype_isolate(page);
  	}
  	return 0;
  }
  /*
   * Test all pages in the range is free(means isolated) or not.
   * all pages in [start_pfn...end_pfn) must be in the same zone.
   * zone->lock must be held before call this.
   *
f6a3607e5   Bob Liu   mm: page_isolatio...
88
   * Returns 1 if all pages in the range is isolated.
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
   */
  static int
  __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
  {
  	struct page *page;
  
  	while (pfn < end_pfn) {
  		if (!pfn_valid_within(pfn)) {
  			pfn++;
  			continue;
  		}
  		page = pfn_to_page(pfn);
  		if (PageBuddy(page))
  			pfn += 1 << page_order(page);
  		else if (page_count(page) == 0 &&
  				page_private(page) == MIGRATE_ISOLATE)
  			pfn += 1;
  		else
  			break;
  	}
  	if (pfn < end_pfn)
  		return 0;
  	return 1;
  }
  
  int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
  {
6c1b7f680   Gerald Schaefer   memory hotplug: m...
116
  	unsigned long pfn, flags;
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
117
  	struct page *page;
6c1b7f680   Gerald Schaefer   memory hotplug: m...
118
119
  	struct zone *zone;
  	int ret;
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
120

a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
121
122
123
124
125
126
127
  	/*
  	 * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page
  	 * is not aligned to pageblock_nr_pages.
  	 * Then we just check pagetype fist.
  	 */
  	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  		page = __first_valid_page(pfn, pageblock_nr_pages);
dbc0e4cef   KAMEZAWA Hiroyuki   memory hotremove:...
128
  		if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
129
130
  			break;
  	}
a70dcb969   Gerald Schaefer   memory hotplug: f...
131
132
  	page = __first_valid_page(start_pfn, end_pfn - start_pfn);
  	if ((pfn < end_pfn) || !page)
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
133
134
  		return -EBUSY;
  	/* Check all pages are free or Marked as ISOLATED */
a70dcb969   Gerald Schaefer   memory hotplug: f...
135
  	zone = page_zone(page);
6c1b7f680   Gerald Schaefer   memory hotplug: m...
136
137
138
139
  	spin_lock_irqsave(&zone->lock, flags);
  	ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn);
  	spin_unlock_irqrestore(&zone->lock, flags);
  	return ret ? 0 : -EBUSY;
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
140
  }