Blame view

mm/page_ext.c 10.9 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
2
3
  #include <linux/mm.h>
  #include <linux/mmzone.h>
57c8a661d   Mike Rapoport   mm: remove includ...
4
  #include <linux/memblock.h>
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
5
6
7
8
  #include <linux/page_ext.h>
  #include <linux/memory.h>
  #include <linux/vmalloc.h>
  #include <linux/kmemleak.h>
48c96a368   Joonsoo Kim   mm/page_owner: ke...
9
  #include <linux/page_owner.h>
33c3fc71c   Vladimir Davydov   mm: introduce idl...
10
  #include <linux/page_idle.h>
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
  
  /*
   * struct page extension
   *
   * This is the feature to manage memory for extended data per page.
   *
   * Until now, we must modify struct page itself to store extra data per page.
   * This requires rebuilding the kernel and it is really time consuming process.
   * And, sometimes, rebuild is impossible due to third party module dependency.
   * At last, enlarging struct page could cause un-wanted system behaviour change.
   *
   * This feature is intended to overcome above mentioned problems. This feature
   * allocates memory for extended data per page in certain place rather than
   * the struct page itself. This memory can be accessed by the accessor
   * functions provided by this code. During the boot process, it checks whether
   * allocation of huge chunk of memory is needed or not. If not, it avoids
   * allocating memory at all. With this advantage, we can include this feature
   * into the kernel in default and can avoid rebuild and solve related problems.
   *
   * To help these things to work well, there are two callbacks for clients. One
   * is the need callback which is mandatory if user wants to avoid useless
   * memory allocation at boot-time. The other is optional, init callback, which
   * is used to do proper initialization after memory is allocated.
   *
   * The need callback is used to decide whether extended memory allocation is
   * needed or not. Sometimes users want to deactivate some features in this
   * boot and extra memory would be unneccessary. In this case, to avoid
   * allocating huge chunk of memory, each clients represent their need of
   * extra memory through the need callback. If one of the need callbacks
   * returns true, it means that someone needs extra memory so that
   * page extension core should allocates memory for page extension. If
   * none of need callbacks return true, memory isn't needed at all in this boot
   * and page extension core can skip to allocate memory. As result,
   * none of memory is wasted.
   *
980ac1672   Joonsoo Kim   mm/page_ext: supp...
46
47
48
49
50
   * When need callback returns true, page_ext checks if there is a request for
   * extra memory through size in struct page_ext_operations. If it is non-zero,
   * extra space is allocated for each page_ext entry and offset is returned to
   * user through offset in struct page_ext_operations.
   *
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
51
52
53
54
55
56
57
58
59
60
61
   * The init callback is used to do proper initialization after page extension
   * is completely initialized. In sparse memory system, extra memory is
   * allocated some time later than memmap is allocated. In other words, lifetime
   * of memory for page extension isn't same with memmap for struct page.
   * Therefore, clients can't store extra data until page extension is
   * initialized, even if pages are allocated and used freely. This could
   * cause inadequate state of extra data per page, so, to prevent it, client
   * can utilize this callback to initialize the state of it correctly.
   */
  
  static struct page_ext_operations *page_ext_ops[] = {
48c96a368   Joonsoo Kim   mm/page_owner: ke...
62
63
64
  #ifdef CONFIG_PAGE_OWNER
  	&page_owner_ops,
  #endif
33c3fc71c   Vladimir Davydov   mm: introduce idl...
65
66
67
  #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
  	&page_idle_ops,
  #endif
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
68
  };
5556cfe8d   Vlastimil Babka   mm, page_owner: f...
69
  unsigned long page_ext_size = sizeof(struct page_ext);
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
70
71
72
73
74
75
  static unsigned long total_usage;
  
  static bool __init invoke_need_callbacks(void)
  {
  	int i;
  	int entries = ARRAY_SIZE(page_ext_ops);
980ac1672   Joonsoo Kim   mm/page_ext: supp...
76
  	bool need = false;
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
77
78
  
  	for (i = 0; i < entries; i++) {
980ac1672   Joonsoo Kim   mm/page_ext: supp...
79
  		if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
5556cfe8d   Vlastimil Babka   mm, page_owner: f...
80
81
  			page_ext_ops[i]->offset = page_ext_size;
  			page_ext_size += page_ext_ops[i]->size;
980ac1672   Joonsoo Kim   mm/page_ext: supp...
82
83
  			need = true;
  		}
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
84
  	}
980ac1672   Joonsoo Kim   mm/page_ext: supp...
85
  	return need;
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
86
87
88
89
90
91
92
93
94
95
96
97
  }
  
  static void __init invoke_init_callbacks(void)
  {
  	int i;
  	int entries = ARRAY_SIZE(page_ext_ops);
  
  	for (i = 0; i < entries; i++) {
  		if (page_ext_ops[i]->init)
  			page_ext_ops[i]->init();
  	}
  }
980ac1672   Joonsoo Kim   mm/page_ext: supp...
98
99
  static inline struct page_ext *get_entry(void *base, unsigned long index)
  {
5556cfe8d   Vlastimil Babka   mm, page_owner: f...
100
  	return base + page_ext_size * index;
980ac1672   Joonsoo Kim   mm/page_ext: supp...
101
  }
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
102
103
104
105
106
107
108
  #if !defined(CONFIG_SPARSEMEM)
  
  
  void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
  {
  	pgdat->node_page_ext = NULL;
  }
10ed63415   Kirill A. Shutemov   mm/page_ext.c: co...
109
  struct page_ext *lookup_page_ext(const struct page *page)
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
110
111
  {
  	unsigned long pfn = page_to_pfn(page);
0b06bb3f6   Joonsoo Kim   mm/page_ext: rena...
112
  	unsigned long index;
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
113
114
115
  	struct page_ext *base;
  
  	base = NODE_DATA(page_to_nid(page))->node_page_ext;
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
116
117
118
119
120
121
122
123
  	/*
  	 * The sanity checks the page allocator does upon freeing a
  	 * page can reach here before the page_ext arrays are
  	 * allocated when feeding a range of pages to the allocator
  	 * for the first time during bootup or memory hotplug.
  	 */
  	if (unlikely(!base))
  		return NULL;
0b06bb3f6   Joonsoo Kim   mm/page_ext: rena...
124
  	index = pfn - round_down(node_start_pfn(page_to_nid(page)),
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
125
  					MAX_ORDER_NR_PAGES);
980ac1672   Joonsoo Kim   mm/page_ext: supp...
126
  	return get_entry(base, index);
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
127
  }
0a7166ae7   Vijayanand Jitta   ANDROID: mm: Expo...
128
  EXPORT_SYMBOL_GPL(lookup_page_ext);
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
  
  static int __init alloc_node_page_ext(int nid)
  {
  	struct page_ext *base;
  	unsigned long table_size;
  	unsigned long nr_pages;
  
  	nr_pages = NODE_DATA(nid)->node_spanned_pages;
  	if (!nr_pages)
  		return 0;
  
  	/*
  	 * Need extra space if node range is not aligned with
  	 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
  	 * checks buddy's status, range could be out of exact node range.
  	 */
  	if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
  		!IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
  		nr_pages += MAX_ORDER_NR_PAGES;
5556cfe8d   Vlastimil Babka   mm, page_owner: f...
148
  	table_size = page_ext_size * nr_pages;
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
149

26fb3dae0   Mike Rapoport   memblock: drop me...
150
  	base = memblock_alloc_try_nid(
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
151
  			table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
97ad1087e   Mike Rapoport   memblock: replace...
152
  			MEMBLOCK_ALLOC_ACCESSIBLE, nid);
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
  	if (!base)
  		return -ENOMEM;
  	NODE_DATA(nid)->node_page_ext = base;
  	total_usage += table_size;
  	return 0;
  }
  
  void __init page_ext_init_flatmem(void)
  {
  
  	int nid, fail;
  
  	if (!invoke_need_callbacks())
  		return;
  
  	for_each_online_node(nid)  {
  		fail = alloc_node_page_ext(nid);
  		if (fail)
  			goto fail;
  	}
  	pr_info("allocated %ld bytes of page_ext
  ", total_usage);
  	invoke_init_callbacks();
  	return;
  
  fail:
  	pr_crit("allocation of page_ext failed.
  ");
  	panic("Out of memory");
  }
  
  #else /* CONFIG_FLAT_NODE_MEM_MAP */
10ed63415   Kirill A. Shutemov   mm/page_ext.c: co...
185
  struct page_ext *lookup_page_ext(const struct page *page)
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
186
187
188
  {
  	unsigned long pfn = page_to_pfn(page);
  	struct mem_section *section = __pfn_to_section(pfn);
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
189
190
191
192
193
194
195
196
  	/*
  	 * The sanity checks the page allocator does upon freeing a
  	 * page can reach here before the page_ext arrays are
  	 * allocated when feeding a range of pages to the allocator
  	 * for the first time during bootup or memory hotplug.
  	 */
  	if (!section->page_ext)
  		return NULL;
980ac1672   Joonsoo Kim   mm/page_ext: supp...
197
  	return get_entry(section->page_ext, pfn);
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
198
  }
0a7166ae7   Vijayanand Jitta   ANDROID: mm: Expo...
199
  EXPORT_SYMBOL_GPL(lookup_page_ext);
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
200
201
202
203
204
205
206
207
208
209
210
  
  static void *__meminit alloc_page_ext(size_t size, int nid)
  {
  	gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
  	void *addr = NULL;
  
  	addr = alloc_pages_exact_nid(nid, size, flags);
  	if (addr) {
  		kmemleak_alloc(addr, size, 1, flags);
  		return addr;
  	}
b95046b04   Michal Hocko   mm, sparse, page_...
211
  	addr = vzalloc_node(size, nid);
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
212
213
214
215
216
217
218
219
220
221
222
223
224
225
  
  	return addr;
  }
  
  static int __meminit init_section_page_ext(unsigned long pfn, int nid)
  {
  	struct mem_section *section;
  	struct page_ext *base;
  	unsigned long table_size;
  
  	section = __pfn_to_section(pfn);
  
  	if (section->page_ext)
  		return 0;
5556cfe8d   Vlastimil Babka   mm, page_owner: f...
226
  	table_size = page_ext_size * PAGES_PER_SECTION;
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
  	base = alloc_page_ext(table_size, nid);
  
  	/*
  	 * The value stored in section->page_ext is (base - pfn)
  	 * and it does not point to the memory block allocated above,
  	 * causing kmemleak false positives.
  	 */
  	kmemleak_not_leak(base);
  
  	if (!base) {
  		pr_err("page ext allocation failure
  ");
  		return -ENOMEM;
  	}
  
  	/*
  	 * The passed "pfn" may not be aligned to SECTION.  For the calculation
  	 * we need to apply a mask.
  	 */
  	pfn &= PAGE_SECTION_MASK;
5556cfe8d   Vlastimil Babka   mm, page_owner: f...
247
  	section->page_ext = (void *)base - page_ext_size * pfn;
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
248
249
250
251
252
253
254
255
256
257
258
  	total_usage += table_size;
  	return 0;
  }
  #ifdef CONFIG_MEMORY_HOTPLUG
  static void free_page_ext(void *addr)
  {
  	if (is_vmalloc_addr(addr)) {
  		vfree(addr);
  	} else {
  		struct page *page = virt_to_page(addr);
  		size_t table_size;
5556cfe8d   Vlastimil Babka   mm, page_owner: f...
259
  		table_size = page_ext_size * PAGES_PER_SECTION;
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
260
261
  
  		BUG_ON(PageReserved(page));
0c8158549   Qian Cai   mm/page_ext.c: fi...
262
  		kmemleak_free(addr);
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
263
264
265
266
267
268
269
270
271
272
273
274
  		free_pages_exact(addr, table_size);
  	}
  }
  
  static void __free_page_ext(unsigned long pfn)
  {
  	struct mem_section *ms;
  	struct page_ext *base;
  
  	ms = __pfn_to_section(pfn);
  	if (!ms || !ms->page_ext)
  		return;
980ac1672   Joonsoo Kim   mm/page_ext: supp...
275
  	base = get_entry(ms->page_ext, pfn);
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
276
277
278
279
280
281
282
283
284
285
286
287
288
  	free_page_ext(base);
  	ms->page_ext = NULL;
  }
  
  static int __meminit online_page_ext(unsigned long start_pfn,
  				unsigned long nr_pages,
  				int nid)
  {
  	unsigned long start, end, pfn;
  	int fail = 0;
  
  	start = SECTION_ALIGN_DOWN(start_pfn);
  	end = SECTION_ALIGN_UP(start_pfn + nr_pages);
98fa15f34   Anshuman Khandual   mm: replace all o...
289
  	if (nid == NUMA_NO_NODE) {
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
290
291
292
293
294
295
296
297
  		/*
  		 * In this case, "nid" already exists and contains valid memory.
  		 * "start_pfn" passed to us is a pfn which is an arg for
  		 * online__pages(), and start_pfn should exist.
  		 */
  		nid = pfn_to_nid(start_pfn);
  		VM_BUG_ON(!node_state(nid, N_ONLINE));
  	}
dccacf8de   David Hildenbrand   mm/page_ext.c: dr...
298
  	for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION)
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
299
  		fail = init_section_page_ext(pfn, nid);
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
  	if (!fail)
  		return 0;
  
  	/* rollback */
  	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
  		__free_page_ext(pfn);
  
  	return -ENOMEM;
  }
  
  static int __meminit offline_page_ext(unsigned long start_pfn,
  				unsigned long nr_pages, int nid)
  {
  	unsigned long start, end, pfn;
  
  	start = SECTION_ALIGN_DOWN(start_pfn);
  	end = SECTION_ALIGN_UP(start_pfn + nr_pages);
  
  	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
  		__free_page_ext(pfn);
  	return 0;
  
  }
  
  static int __meminit page_ext_callback(struct notifier_block *self,
  			       unsigned long action, void *arg)
  {
  	struct memory_notify *mn = arg;
  	int ret = 0;
  
  	switch (action) {
  	case MEM_GOING_ONLINE:
  		ret = online_page_ext(mn->start_pfn,
  				   mn->nr_pages, mn->status_change_nid);
  		break;
  	case MEM_OFFLINE:
  		offline_page_ext(mn->start_pfn,
  				mn->nr_pages, mn->status_change_nid);
  		break;
  	case MEM_CANCEL_ONLINE:
  		offline_page_ext(mn->start_pfn,
  				mn->nr_pages, mn->status_change_nid);
  		break;
  	case MEM_GOING_OFFLINE:
  		break;
  	case MEM_ONLINE:
  	case MEM_CANCEL_OFFLINE:
  		break;
  	}
  
  	return notifier_from_errno(ret);
  }
  
  #endif
  
  void __init page_ext_init(void)
  {
  	unsigned long pfn;
  	int nid;
  
  	if (!invoke_need_callbacks())
  		return;
  
  	for_each_node_state(nid, N_MEMORY) {
  		unsigned long start_pfn, end_pfn;
  
  		start_pfn = node_start_pfn(nid);
  		end_pfn = node_end_pfn(nid);
  		/*
  		 * start_pfn and end_pfn may not be aligned to SECTION and the
  		 * page->flags of out of node pages are not initialized.  So we
  		 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
  		 */
  		for (pfn = start_pfn; pfn < end_pfn;
  			pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
  
  			if (!pfn_valid(pfn))
  				continue;
  			/*
  			 * Nodes's pfns can be overlapping.
  			 * We know some arch can have a nodes layout such as
  			 * -------------pfn-------------->
  			 * N0 | N1 | N2 | N0 | N1 | N2|....
  			 */
2f1ee0913   Qian Cai   Revert "mm: use e...
384
  			if (pfn_to_nid(pfn) != nid)
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
385
386
387
  				continue;
  			if (init_section_page_ext(pfn, nid))
  				goto oom;
0fc542b7d   Vlastimil Babka   mm, page_ext: per...
388
  			cond_resched();
eefa864b7   Joonsoo Kim   mm/page_ext: resu...
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
  		}
  	}
  	hotplug_memory_notifier(page_ext_callback, 0);
  	pr_info("allocated %ld bytes of page_ext
  ", total_usage);
  	invoke_init_callbacks();
  	return;
  
  oom:
  	panic("Out of memory");
  }
  
  void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
  {
  }
  
  #endif