Blame view

mm/memory-failure.c 38.5 KB
6a46079cf   Andi Kleen   HWPOISON: The hig...
1
2
3
4
5
6
7
8
9
  /*
   * Copyright (C) 2008, 2009 Intel Corporation
   * Authors: Andi Kleen, Fengguang Wu
   *
   * This software may be redistributed and/or modified under the terms of
   * the GNU General Public License ("GPL") version 2 only as published by the
   * Free Software Foundation.
   *
   * High level machine check handler. Handles pages reported by the
1c80b990a   Andi Kleen   HWPOISON: Improve...
10
   * hardware as being corrupted usually due to a multi-bit ECC memory or cache
6a46079cf   Andi Kleen   HWPOISON: The hig...
11
   * failure.
1c80b990a   Andi Kleen   HWPOISON: Improve...
12
13
14
   * 
   * In addition there is a "soft offline" entry point that allows stop using
   * not-yet-corrupted-by-suspicious pages without killing anything.
6a46079cf   Andi Kleen   HWPOISON: The hig...
15
16
   *
   * Handles page cache pages in various states.	The tricky part
1c80b990a   Andi Kleen   HWPOISON: Improve...
17
18
19
20
21
22
23
24
25
26
27
28
29
   * here is that we can access any page asynchronously in respect to 
   * other VM users, because memory failures could happen anytime and 
   * anywhere. This could violate some of their assumptions. This is why 
   * this code has to be extremely careful. Generally it tries to use 
   * normal locking rules, as in get the standard locks, even if that means 
   * the error handling takes potentially a long time.
   * 
   * There are several operations here with exponential complexity because
   * of unsuitable VM data structures. For example the operation to map back 
   * from RMAP chains to processes has to walk the complete process list and 
   * has non linear complexity with the number. But since memory corruptions
   * are rare we hope to get away with this. This avoids impacting the core 
   * VM.
6a46079cf   Andi Kleen   HWPOISON: The hig...
30
31
32
33
34
35
36
37
   */
  
  /*
   * Notebook:
   * - hugetlb needs more code
   * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages
   * - pass bad pages to kdump next kernel
   */
6a46079cf   Andi Kleen   HWPOISON: The hig...
38
39
40
  #include <linux/kernel.h>
  #include <linux/mm.h>
  #include <linux/page-flags.h>
478c5ffc0   Wu Fengguang   HWPOISON: add pag...
41
  #include <linux/kernel-page-flags.h>
6a46079cf   Andi Kleen   HWPOISON: The hig...
42
  #include <linux/sched.h>
01e00f880   Hugh Dickins   HWPOISON: fix oop...
43
  #include <linux/ksm.h>
6a46079cf   Andi Kleen   HWPOISON: The hig...
44
45
46
47
  #include <linux/rmap.h>
  #include <linux/pagemap.h>
  #include <linux/swap.h>
  #include <linux/backing-dev.h>
facb6011f   Andi Kleen   HWPOISON: Add sof...
48
49
50
  #include <linux/migrate.h>
  #include <linux/page-isolation.h>
  #include <linux/suspend.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
51
  #include <linux/slab.h>
bf998156d   Huang Ying   KVM: Avoid killin...
52
  #include <linux/swapops.h>
7af446a84   Naoya Horiguchi   HWPOISON, hugetlb...
53
  #include <linux/hugetlb.h>
20d6c96b5   KOSAKI Motohiro   mem-hotplug: intr...
54
  #include <linux/memory_hotplug.h>
6a46079cf   Andi Kleen   HWPOISON: The hig...
55
56
57
58
59
60
61
  #include "internal.h"
  
  int sysctl_memory_failure_early_kill __read_mostly = 0;
  
  int sysctl_memory_failure_recovery __read_mostly = 1;
  
  atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
27df5068e   Andi Kleen   HWPOISON: Add PRO...
62
  #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
1bfe5febe   Haicheng Li   HWPOISON: add an ...
63
  u32 hwpoison_filter_enable = 0;
7c116f2b0   Wu Fengguang   HWPOISON: add fs/...
64
65
  u32 hwpoison_filter_dev_major = ~0U;
  u32 hwpoison_filter_dev_minor = ~0U;
478c5ffc0   Wu Fengguang   HWPOISON: add pag...
66
67
  u64 hwpoison_filter_flags_mask;
  u64 hwpoison_filter_flags_value;
1bfe5febe   Haicheng Li   HWPOISON: add an ...
68
  EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
7c116f2b0   Wu Fengguang   HWPOISON: add fs/...
69
70
  EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
  EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
478c5ffc0   Wu Fengguang   HWPOISON: add pag...
71
72
  EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
  EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
7c116f2b0   Wu Fengguang   HWPOISON: add fs/...
73
74
75
76
77
78
79
80
81
82
83
  
  static int hwpoison_filter_dev(struct page *p)
  {
  	struct address_space *mapping;
  	dev_t dev;
  
  	if (hwpoison_filter_dev_major == ~0U &&
  	    hwpoison_filter_dev_minor == ~0U)
  		return 0;
  
  	/*
1c80b990a   Andi Kleen   HWPOISON: Improve...
84
  	 * page_mapping() does not accept slab pages.
7c116f2b0   Wu Fengguang   HWPOISON: add fs/...
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
  	 */
  	if (PageSlab(p))
  		return -EINVAL;
  
  	mapping = page_mapping(p);
  	if (mapping == NULL || mapping->host == NULL)
  		return -EINVAL;
  
  	dev = mapping->host->i_sb->s_dev;
  	if (hwpoison_filter_dev_major != ~0U &&
  	    hwpoison_filter_dev_major != MAJOR(dev))
  		return -EINVAL;
  	if (hwpoison_filter_dev_minor != ~0U &&
  	    hwpoison_filter_dev_minor != MINOR(dev))
  		return -EINVAL;
  
  	return 0;
  }
478c5ffc0   Wu Fengguang   HWPOISON: add pag...
103
104
105
106
107
108
109
110
111
112
113
  static int hwpoison_filter_flags(struct page *p)
  {
  	if (!hwpoison_filter_flags_mask)
  		return 0;
  
  	if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
  				    hwpoison_filter_flags_value)
  		return 0;
  	else
  		return -EINVAL;
  }
4fd466eb4   Andi Kleen   HWPOISON: add mem...
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
  /*
   * This allows stress tests to limit test scope to a collection of tasks
   * by putting them under some memcg. This prevents killing unrelated/important
   * processes such as /sbin/init. Note that the target task may share clean
   * pages with init (eg. libc text), which is harmless. If the target task
   * share _dirty_ pages with another task B, the test scheme must make sure B
   * is also included in the memcg. At last, due to race conditions this filter
   * can only guarantee that the page either belongs to the memcg tasks, or is
   * a freed page.
   */
  #ifdef	CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  u64 hwpoison_filter_memcg;
  EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
  static int hwpoison_filter_task(struct page *p)
  {
  	struct mem_cgroup *mem;
  	struct cgroup_subsys_state *css;
  	unsigned long ino;
  
  	if (!hwpoison_filter_memcg)
  		return 0;
  
  	mem = try_get_mem_cgroup_from_page(p);
  	if (!mem)
  		return -EINVAL;
  
  	css = mem_cgroup_css(mem);
  	/* root_mem_cgroup has NULL dentries */
  	if (!css->cgroup->dentry)
  		return -EINVAL;
  
  	ino = css->cgroup->dentry->d_inode->i_ino;
  	css_put(css);
  
  	if (ino != hwpoison_filter_memcg)
  		return -EINVAL;
  
  	return 0;
  }
  #else
  static int hwpoison_filter_task(struct page *p) { return 0; }
  #endif
7c116f2b0   Wu Fengguang   HWPOISON: add fs/...
156
157
  int hwpoison_filter(struct page *p)
  {
1bfe5febe   Haicheng Li   HWPOISON: add an ...
158
159
  	if (!hwpoison_filter_enable)
  		return 0;
7c116f2b0   Wu Fengguang   HWPOISON: add fs/...
160
161
  	if (hwpoison_filter_dev(p))
  		return -EINVAL;
478c5ffc0   Wu Fengguang   HWPOISON: add pag...
162
163
  	if (hwpoison_filter_flags(p))
  		return -EINVAL;
4fd466eb4   Andi Kleen   HWPOISON: add mem...
164
165
  	if (hwpoison_filter_task(p))
  		return -EINVAL;
7c116f2b0   Wu Fengguang   HWPOISON: add fs/...
166
167
  	return 0;
  }
27df5068e   Andi Kleen   HWPOISON: Add PRO...
168
169
170
171
172
173
  #else
  int hwpoison_filter(struct page *p)
  {
  	return 0;
  }
  #endif
7c116f2b0   Wu Fengguang   HWPOISON: add fs/...
174
  EXPORT_SYMBOL_GPL(hwpoison_filter);
6a46079cf   Andi Kleen   HWPOISON: The hig...
175
176
177
178
179
  /*
   * Send all the processes who have the page mapped an ``action optional''
   * signal.
   */
  static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
0d9ee6a2d   Andi Kleen   HWPOISON: Report ...
180
  			unsigned long pfn, struct page *page)
6a46079cf   Andi Kleen   HWPOISON: The hig...
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
  {
  	struct siginfo si;
  	int ret;
  
  	printk(KERN_ERR
  		"MCE %#lx: Killing %s:%d early due to hardware memory corruption
  ",
  		pfn, t->comm, t->pid);
  	si.si_signo = SIGBUS;
  	si.si_errno = 0;
  	si.si_code = BUS_MCEERR_AO;
  	si.si_addr = (void *)addr;
  #ifdef __ARCH_SI_TRAPNO
  	si.si_trapno = trapno;
  #endif
0d9ee6a2d   Andi Kleen   HWPOISON: Report ...
196
  	si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
6a46079cf   Andi Kleen   HWPOISON: The hig...
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
  	/*
  	 * Don't use force here, it's convenient if the signal
  	 * can be temporarily blocked.
  	 * This could cause a loop when the user sets SIGBUS
  	 * to SIG_IGN, but hopefully noone will do that?
  	 */
  	ret = send_sig_info(SIGBUS, &si, t);  /* synchronous? */
  	if (ret < 0)
  		printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d
  ",
  		       t->comm, t->pid, ret);
  	return ret;
  }
  
  /*
588f9ce6c   Andi Kleen   HWPOISON: Be more...
212
213
214
   * When a unknown page type is encountered drain as many buffers as possible
   * in the hope to turn the page into a LRU or free page, which we can handle.
   */
facb6011f   Andi Kleen   HWPOISON: Add sof...
215
  void shake_page(struct page *p, int access)
588f9ce6c   Andi Kleen   HWPOISON: Be more...
216
217
218
219
220
221
222
223
224
  {
  	if (!PageSlab(p)) {
  		lru_add_drain_all();
  		if (PageLRU(p))
  			return;
  		drain_all_pages();
  		if (PageLRU(p) || is_free_buddy_page(p))
  			return;
  	}
facb6011f   Andi Kleen   HWPOISON: Add sof...
225

588f9ce6c   Andi Kleen   HWPOISON: Be more...
226
  	/*
facb6011f   Andi Kleen   HWPOISON: Add sof...
227
228
  	 * Only all shrink_slab here (which would also
  	 * shrink other caches) if access is not potentially fatal.
588f9ce6c   Andi Kleen   HWPOISON: Be more...
229
  	 */
facb6011f   Andi Kleen   HWPOISON: Add sof...
230
231
232
233
  	if (access) {
  		int nr;
  		do {
  			nr = shrink_slab(1000, GFP_KERNEL, 1000);
47f43e7ef   Andi Kleen   HWPOISON: Stop sh...
234
  			if (page_count(p) == 1)
facb6011f   Andi Kleen   HWPOISON: Add sof...
235
236
237
  				break;
  		} while (nr > 10);
  	}
588f9ce6c   Andi Kleen   HWPOISON: Be more...
238
239
240
241
  }
  EXPORT_SYMBOL_GPL(shake_page);
  
  /*
6a46079cf   Andi Kleen   HWPOISON: The hig...
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
   * Kill all processes that have a poisoned page mapped and then isolate
   * the page.
   *
   * General strategy:
   * Find all processes having the page mapped and kill them.
   * But we keep a page reference around so that the page is not
   * actually freed yet.
   * Then stash the page away
   *
   * There's no convenient way to get back to mapped processes
   * from the VMAs. So do a brute-force search over all
   * running processes.
   *
   * Remember that machine checks are not common (or rather
   * if they are common you have other problems), so this shouldn't
   * be a performance issue.
   *
   * Also there are some races possible while we get from the
   * error detection to actually handle it.
   */
  
  struct to_kill {
  	struct list_head nd;
  	struct task_struct *tsk;
  	unsigned long addr;
9033ae164   Andi Kleen   HWPOISON: Turn ad...
267
  	char addr_valid;
6a46079cf   Andi Kleen   HWPOISON: The hig...
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
  };
  
  /*
   * Failure handling: if we can't find or can't kill a process there's
   * not much we can do.	We just print a message and ignore otherwise.
   */
  
  /*
   * Schedule a process for later kill.
   * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
   * TBD would GFP_NOIO be enough?
   */
  static void add_to_kill(struct task_struct *tsk, struct page *p,
  		       struct vm_area_struct *vma,
  		       struct list_head *to_kill,
  		       struct to_kill **tkc)
  {
  	struct to_kill *tk;
  
  	if (*tkc) {
  		tk = *tkc;
  		*tkc = NULL;
  	} else {
  		tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
  		if (!tk) {
  			printk(KERN_ERR
  		"MCE: Out of memory while machine check handling
  ");
  			return;
  		}
  	}
  	tk->addr = page_address_in_vma(p, vma);
  	tk->addr_valid = 1;
  
  	/*
  	 * In theory we don't have to kill when the page was
  	 * munmaped. But it could be also a mremap. Since that's
  	 * likely very rare kill anyways just out of paranoia, but use
  	 * a SIGKILL because the error is not contained anymore.
  	 */
  	if (tk->addr == -EFAULT) {
fb46e7352   Andi Kleen   HWPOISON: Convert...
309
310
  		pr_info("MCE: Unable to find user space address %lx in %s
  ",
6a46079cf   Andi Kleen   HWPOISON: The hig...
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
  			page_to_pfn(p), tsk->comm);
  		tk->addr_valid = 0;
  	}
  	get_task_struct(tsk);
  	tk->tsk = tsk;
  	list_add_tail(&tk->nd, to_kill);
  }
  
  /*
   * Kill the processes that have been collected earlier.
   *
   * Only do anything when DOIT is set, otherwise just free the list
   * (this is used for clean pages which do not need killing)
   * Also when FAIL is set do a force kill because something went
   * wrong earlier.
   */
  static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno,
0d9ee6a2d   Andi Kleen   HWPOISON: Report ...
328
  			  int fail, struct page *page, unsigned long pfn)
6a46079cf   Andi Kleen   HWPOISON: The hig...
329
330
331
332
333
334
  {
  	struct to_kill *tk, *next;
  
  	list_for_each_entry_safe (tk, next, to_kill, nd) {
  		if (doit) {
  			/*
af901ca18   AndrĂ© Goddard Rosa   tree-wide: fix as...
335
  			 * In case something went wrong with munmapping
6a46079cf   Andi Kleen   HWPOISON: The hig...
336
337
  			 * make sure the process doesn't catch the
  			 * signal and then access the memory. Just kill it.
6a46079cf   Andi Kleen   HWPOISON: The hig...
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
  			 */
  			if (fail || tk->addr_valid == 0) {
  				printk(KERN_ERR
  		"MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page
  ",
  					pfn, tk->tsk->comm, tk->tsk->pid);
  				force_sig(SIGKILL, tk->tsk);
  			}
  
  			/*
  			 * In theory the process could have mapped
  			 * something else on the address in-between. We could
  			 * check for that, but we need to tell the
  			 * process anyways.
  			 */
  			else if (kill_proc_ao(tk->tsk, tk->addr, trapno,
0d9ee6a2d   Andi Kleen   HWPOISON: Report ...
354
  					      pfn, page) < 0)
6a46079cf   Andi Kleen   HWPOISON: The hig...
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
  				printk(KERN_ERR
  		"MCE %#lx: Cannot send advisory machine check signal to %s:%d
  ",
  					pfn, tk->tsk->comm, tk->tsk->pid);
  		}
  		put_task_struct(tk->tsk);
  		kfree(tk);
  	}
  }
  
  static int task_early_kill(struct task_struct *tsk)
  {
  	if (!tsk->mm)
  		return 0;
  	if (tsk->flags & PF_MCE_PROCESS)
  		return !!(tsk->flags & PF_MCE_EARLY);
  	return sysctl_memory_failure_early_kill;
  }
  
  /*
   * Collect processes when the error hit an anonymous page.
   */
  static void collect_procs_anon(struct page *page, struct list_head *to_kill,
  			      struct to_kill **tkc)
  {
  	struct vm_area_struct *vma;
  	struct task_struct *tsk;
  	struct anon_vma *av;
  
  	read_lock(&tasklist_lock);
  	av = page_lock_anon_vma(page);
  	if (av == NULL)	/* Not actually mapped anymore */
  		goto out;
  	for_each_process (tsk) {
5beb49305   Rik van Riel   mm: change anon_v...
389
  		struct anon_vma_chain *vmac;
6a46079cf   Andi Kleen   HWPOISON: The hig...
390
391
  		if (!task_early_kill(tsk))
  			continue;
5beb49305   Rik van Riel   mm: change anon_v...
392
393
  		list_for_each_entry(vmac, &av->head, same_anon_vma) {
  			vma = vmac->vma;
6a46079cf   Andi Kleen   HWPOISON: The hig...
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
  			if (!page_mapped_in_vma(page, vma))
  				continue;
  			if (vma->vm_mm == tsk->mm)
  				add_to_kill(tsk, page, vma, to_kill, tkc);
  		}
  	}
  	page_unlock_anon_vma(av);
  out:
  	read_unlock(&tasklist_lock);
  }
  
  /*
   * Collect processes when the error hit a file mapped page.
   */
  static void collect_procs_file(struct page *page, struct list_head *to_kill,
  			      struct to_kill **tkc)
  {
  	struct vm_area_struct *vma;
  	struct task_struct *tsk;
  	struct prio_tree_iter iter;
  	struct address_space *mapping = page->mapping;
  
  	/*
  	 * A note on the locking order between the two locks.
  	 * We don't rely on this particular order.
  	 * If you have some other code that needs a different order
  	 * feel free to switch them around. Or add a reverse link
  	 * from mm_struct to task_struct, then this could be all
  	 * done without taking tasklist_lock and looping over all tasks.
  	 */
  
  	read_lock(&tasklist_lock);
  	spin_lock(&mapping->i_mmap_lock);
  	for_each_process(tsk) {
  		pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  
  		if (!task_early_kill(tsk))
  			continue;
  
  		vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff,
  				      pgoff) {
  			/*
  			 * Send early kill signal to tasks where a vma covers
  			 * the page but the corrupted page is not necessarily
  			 * mapped it in its pte.
  			 * Assume applications who requested early kill want
  			 * to be informed of all such data corruptions.
  			 */
  			if (vma->vm_mm == tsk->mm)
  				add_to_kill(tsk, page, vma, to_kill, tkc);
  		}
  	}
  	spin_unlock(&mapping->i_mmap_lock);
  	read_unlock(&tasklist_lock);
  }
  
  /*
   * Collect the processes who have the corrupted page mapped to kill.
   * This is done in two steps for locking reasons.
   * First preallocate one tokill structure outside the spin locks,
   * so that we can kill at least one process reasonably reliable.
   */
  static void collect_procs(struct page *page, struct list_head *tokill)
  {
  	struct to_kill *tk;
  
  	if (!page->mapping)
  		return;
  
  	tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
  	if (!tk)
  		return;
  	if (PageAnon(page))
  		collect_procs_anon(page, tokill, &tk);
  	else
  		collect_procs_file(page, tokill, &tk);
  	kfree(tk);
  }
  
  /*
   * Error handlers for various types of pages.
   */
  
  enum outcome {
d95ea51e3   Wu Fengguang   HWPOISON: make se...
478
479
  	IGNORED,	/* Error: cannot be handled */
  	FAILED,		/* Error: handling failed */
6a46079cf   Andi Kleen   HWPOISON: The hig...
480
  	DELAYED,	/* Will be handled later */
6a46079cf   Andi Kleen   HWPOISON: The hig...
481
482
483
484
  	RECOVERED,	/* Successfully recovered */
  };
  
  static const char *action_name[] = {
d95ea51e3   Wu Fengguang   HWPOISON: make se...
485
  	[IGNORED] = "Ignored",
6a46079cf   Andi Kleen   HWPOISON: The hig...
486
487
  	[FAILED] = "Failed",
  	[DELAYED] = "Delayed",
6a46079cf   Andi Kleen   HWPOISON: The hig...
488
489
490
491
  	[RECOVERED] = "Recovered",
  };
  
  /*
dc2a1cbf7   Wu Fengguang   HWPOISON: introdu...
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
   * XXX: It is possible that a page is isolated from LRU cache,
   * and then kept in swap cache or failed to remove from page cache.
   * The page count will stop it from being freed by unpoison.
   * Stress tests should be aware of this memory leak problem.
   */
  static int delete_from_lru_cache(struct page *p)
  {
  	if (!isolate_lru_page(p)) {
  		/*
  		 * Clear sensible page flags, so that the buddy system won't
  		 * complain when the page is unpoison-and-freed.
  		 */
  		ClearPageActive(p);
  		ClearPageUnevictable(p);
  		/*
  		 * drop the page count elevated by isolate_lru_page()
  		 */
  		page_cache_release(p);
  		return 0;
  	}
  	return -EIO;
  }
  
  /*
6a46079cf   Andi Kleen   HWPOISON: The hig...
516
517
518
519
520
521
   * Error hit kernel page.
   * Do nothing, try to be lucky and not touch this instead. For a few cases we
   * could be more sophisticated.
   */
  static int me_kernel(struct page *p, unsigned long pfn)
  {
6a46079cf   Andi Kleen   HWPOISON: The hig...
522
523
524
525
526
527
528
529
530
531
532
533
534
535
  	return IGNORED;
  }
  
  /*
   * Page in unknown state. Do nothing.
   */
  static int me_unknown(struct page *p, unsigned long pfn)
  {
  	printk(KERN_ERR "MCE %#lx: Unknown page state
  ", pfn);
  	return FAILED;
  }
  
  /*
6a46079cf   Andi Kleen   HWPOISON: The hig...
536
537
538
539
540
541
542
   * Clean (or cleaned) page cache page.
   */
  static int me_pagecache_clean(struct page *p, unsigned long pfn)
  {
  	int err;
  	int ret = FAILED;
  	struct address_space *mapping;
dc2a1cbf7   Wu Fengguang   HWPOISON: introdu...
543
  	delete_from_lru_cache(p);
6a46079cf   Andi Kleen   HWPOISON: The hig...
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
  	/*
  	 * For anonymous pages we're done the only reference left
  	 * should be the one m_f() holds.
  	 */
  	if (PageAnon(p))
  		return RECOVERED;
  
  	/*
  	 * Now truncate the page in the page cache. This is really
  	 * more like a "temporary hole punch"
  	 * Don't do this for block devices when someone else
  	 * has a reference, because it could be file system metadata
  	 * and that's not safe to truncate.
  	 */
  	mapping = page_mapping(p);
  	if (!mapping) {
  		/*
  		 * Page has been teared down in the meanwhile
  		 */
  		return FAILED;
  	}
  
  	/*
  	 * Truncation is a bit tricky. Enable it per file system for now.
  	 *
  	 * Open: to take i_mutex or not for this? Right now we don't.
  	 */
  	if (mapping->a_ops->error_remove_page) {
  		err = mapping->a_ops->error_remove_page(mapping, p);
  		if (err != 0) {
  			printk(KERN_INFO "MCE %#lx: Failed to punch page: %d
  ",
  					pfn, err);
  		} else if (page_has_private(p) &&
  				!try_to_release_page(p, GFP_NOIO)) {
fb46e7352   Andi Kleen   HWPOISON: Convert...
579
580
  			pr_info("MCE %#lx: failed to release buffers
  ", pfn);
6a46079cf   Andi Kleen   HWPOISON: The hig...
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
  		} else {
  			ret = RECOVERED;
  		}
  	} else {
  		/*
  		 * If the file system doesn't support it just invalidate
  		 * This fails on dirty or anything with private pages
  		 */
  		if (invalidate_inode_page(p))
  			ret = RECOVERED;
  		else
  			printk(KERN_INFO "MCE %#lx: Failed to invalidate
  ",
  				pfn);
  	}
  	return ret;
  }
  
  /*
   * Dirty cache page page
   * Issues: when the error hit a hole page the error is not properly
   * propagated.
   */
  static int me_pagecache_dirty(struct page *p, unsigned long pfn)
  {
  	struct address_space *mapping = page_mapping(p);
  
  	SetPageError(p);
  	/* TBD: print more information about the file. */
  	if (mapping) {
  		/*
  		 * IO error will be reported by write(), fsync(), etc.
  		 * who check the mapping.
  		 * This way the application knows that something went
  		 * wrong with its dirty file data.
  		 *
  		 * There's one open issue:
  		 *
  		 * The EIO will be only reported on the next IO
  		 * operation and then cleared through the IO map.
  		 * Normally Linux has two mechanisms to pass IO error
  		 * first through the AS_EIO flag in the address space
  		 * and then through the PageError flag in the page.
  		 * Since we drop pages on memory failure handling the
  		 * only mechanism open to use is through AS_AIO.
  		 *
  		 * This has the disadvantage that it gets cleared on
  		 * the first operation that returns an error, while
  		 * the PageError bit is more sticky and only cleared
  		 * when the page is reread or dropped.  If an
  		 * application assumes it will always get error on
  		 * fsync, but does other operations on the fd before
  		 * and the page is dropped inbetween then the error
  		 * will not be properly reported.
  		 *
  		 * This can already happen even without hwpoisoned
  		 * pages: first on metadata IO errors (which only
  		 * report through AS_EIO) or when the page is dropped
  		 * at the wrong time.
  		 *
  		 * So right now we assume that the application DTRT on
  		 * the first EIO, but we're not worse than other parts
  		 * of the kernel.
  		 */
  		mapping_set_error(mapping, EIO);
  	}
  
  	return me_pagecache_clean(p, pfn);
  }
  
  /*
   * Clean and dirty swap cache.
   *
   * Dirty swap cache page is tricky to handle. The page could live both in page
   * cache and swap cache(ie. page is freshly swapped in). So it could be
   * referenced concurrently by 2 types of PTEs:
   * normal PTEs and swap PTEs. We try to handle them consistently by calling
   * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
   * and then
   *      - clear dirty bit to prevent IO
   *      - remove from LRU
   *      - but keep in the swap cache, so that when we return to it on
   *        a later page fault, we know the application is accessing
   *        corrupted data and shall be killed (we installed simple
   *        interception code in do_swap_page to catch it).
   *
   * Clean swap cache pages can be directly isolated. A later page fault will
   * bring in the known good data from disk.
   */
  static int me_swapcache_dirty(struct page *p, unsigned long pfn)
  {
6a46079cf   Andi Kleen   HWPOISON: The hig...
672
673
674
  	ClearPageDirty(p);
  	/* Trigger EIO in shmem: */
  	ClearPageUptodate(p);
dc2a1cbf7   Wu Fengguang   HWPOISON: introdu...
675
676
677
678
  	if (!delete_from_lru_cache(p))
  		return DELAYED;
  	else
  		return FAILED;
6a46079cf   Andi Kleen   HWPOISON: The hig...
679
680
681
682
  }
  
  static int me_swapcache_clean(struct page *p, unsigned long pfn)
  {
6a46079cf   Andi Kleen   HWPOISON: The hig...
683
  	delete_from_swap_cache(p);
e43c3afb3   Wu Fengguang   HWPOISON: return ...
684

dc2a1cbf7   Wu Fengguang   HWPOISON: introdu...
685
686
687
688
  	if (!delete_from_lru_cache(p))
  		return RECOVERED;
  	else
  		return FAILED;
6a46079cf   Andi Kleen   HWPOISON: The hig...
689
690
691
692
693
  }
  
  /*
   * Huge pages. Needs work.
   * Issues:
93f70f900   Naoya Horiguchi   HWPOISON, hugetlb...
694
695
   * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
   *   To narrow down kill region to one page, we need to break up pmd.
6a46079cf   Andi Kleen   HWPOISON: The hig...
696
697
698
   */
  static int me_huge_page(struct page *p, unsigned long pfn)
  {
6de2b1aab   Naoya Horiguchi   HWPOISON, hugetlb...
699
  	int res = 0;
93f70f900   Naoya Horiguchi   HWPOISON, hugetlb...
700
701
702
703
704
705
706
707
708
709
710
711
  	struct page *hpage = compound_head(p);
  	/*
  	 * We can safely recover from error on free or reserved (i.e.
  	 * not in-use) hugepage by dequeuing it from freelist.
  	 * To check whether a hugepage is in-use or not, we can't use
  	 * page->lru because it can be used in other hugepage operations,
  	 * such as __unmap_hugepage_range() and gather_surplus_pages().
  	 * So instead we use page_mapping() and PageAnon().
  	 * We assume that this function is called with page lock held,
  	 * so there is no race between isolation and mapping/unmapping.
  	 */
  	if (!(page_mapping(hpage) || PageAnon(hpage))) {
6de2b1aab   Naoya Horiguchi   HWPOISON, hugetlb...
712
713
714
  		res = dequeue_hwpoisoned_huge_page(hpage);
  		if (!res)
  			return RECOVERED;
93f70f900   Naoya Horiguchi   HWPOISON, hugetlb...
715
716
  	}
  	return DELAYED;
6a46079cf   Andi Kleen   HWPOISON: The hig...
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
  }
  
  /*
   * Various page states we can handle.
   *
   * A page state is defined by its current page->flags bits.
   * The table matches them in order and calls the right handler.
   *
   * This is quite tricky because we can access page at any time
   * in its live cycle, so all accesses have to be extremly careful.
   *
   * This is not complete. More states could be added.
   * For any missing state don't attempt recovery.
   */
  
  #define dirty		(1UL << PG_dirty)
  #define sc		(1UL << PG_swapcache)
  #define unevict		(1UL << PG_unevictable)
  #define mlock		(1UL << PG_mlocked)
  #define writeback	(1UL << PG_writeback)
  #define lru		(1UL << PG_lru)
  #define swapbacked	(1UL << PG_swapbacked)
  #define head		(1UL << PG_head)
  #define tail		(1UL << PG_tail)
  #define compound	(1UL << PG_compound)
  #define slab		(1UL << PG_slab)
6a46079cf   Andi Kleen   HWPOISON: The hig...
743
744
745
746
747
748
749
750
  #define reserved	(1UL << PG_reserved)
  
  static struct page_state {
  	unsigned long mask;
  	unsigned long res;
  	char *msg;
  	int (*action)(struct page *p, unsigned long pfn);
  } error_states[] = {
d95ea51e3   Wu Fengguang   HWPOISON: make se...
751
  	{ reserved,	reserved,	"reserved kernel",	me_kernel },
95d01fc66   Wu Fengguang   HWPOISON: remove ...
752
753
754
755
  	/*
  	 * free pages are specially detected outside this table:
  	 * PG_buddy pages only make a small fraction of all free pages.
  	 */
6a46079cf   Andi Kleen   HWPOISON: The hig...
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
  
  	/*
  	 * Could in theory check if slab page is free or if we can drop
  	 * currently unused objects without touching them. But just
  	 * treat it as standard kernel for now.
  	 */
  	{ slab,		slab,		"kernel slab",	me_kernel },
  
  #ifdef CONFIG_PAGEFLAGS_EXTENDED
  	{ head,		head,		"huge",		me_huge_page },
  	{ tail,		tail,		"huge",		me_huge_page },
  #else
  	{ compound,	compound,	"huge",		me_huge_page },
  #endif
  
  	{ sc|dirty,	sc|dirty,	"swapcache",	me_swapcache_dirty },
  	{ sc|dirty,	sc,		"swapcache",	me_swapcache_clean },
  
  	{ unevict|dirty, unevict|dirty,	"unevictable LRU", me_pagecache_dirty},
  	{ unevict,	unevict,	"unevictable LRU", me_pagecache_clean},
6a46079cf   Andi Kleen   HWPOISON: The hig...
776
777
  	{ mlock|dirty,	mlock|dirty,	"mlocked LRU",	me_pagecache_dirty },
  	{ mlock,	mlock,		"mlocked LRU",	me_pagecache_clean },
6a46079cf   Andi Kleen   HWPOISON: The hig...
778
779
780
  
  	{ lru|dirty,	lru|dirty,	"LRU",		me_pagecache_dirty },
  	{ lru|dirty,	lru,		"clean LRU",	me_pagecache_clean },
6a46079cf   Andi Kleen   HWPOISON: The hig...
781
782
783
784
785
786
  
  	/*
  	 * Catchall entry: must be at end.
  	 */
  	{ 0,		0,		"unknown page state",	me_unknown },
  };
2326c467d   Andi Kleen   HWPOISON: Undefin...
787
788
789
790
791
792
793
794
795
796
797
798
  #undef dirty
  #undef sc
  #undef unevict
  #undef mlock
  #undef writeback
  #undef lru
  #undef swapbacked
  #undef head
  #undef tail
  #undef compound
  #undef slab
  #undef reserved
6a46079cf   Andi Kleen   HWPOISON: The hig...
799
800
  static void action_result(unsigned long pfn, char *msg, int result)
  {
a7560fc80   Wu Fengguang   HWPOISON: return ...
801
  	struct page *page = pfn_to_page(pfn);
6a46079cf   Andi Kleen   HWPOISON: The hig...
802
803
804
805
  
  	printk(KERN_ERR "MCE %#lx: %s%s page recovery: %s
  ",
  		pfn,
a7560fc80   Wu Fengguang   HWPOISON: return ...
806
  		PageDirty(page) ? "dirty " : "",
6a46079cf   Andi Kleen   HWPOISON: The hig...
807
808
809
810
  		msg, action_name[result]);
  }
  
  static int page_action(struct page_state *ps, struct page *p,
bd1ce5f91   Wu Fengguang   HWPOISON: avoid g...
811
  			unsigned long pfn)
6a46079cf   Andi Kleen   HWPOISON: The hig...
812
813
  {
  	int result;
7456b0405   Wu Fengguang   HWPOISON: fix inv...
814
  	int count;
6a46079cf   Andi Kleen   HWPOISON: The hig...
815
816
817
  
  	result = ps->action(p, pfn);
  	action_result(pfn, ps->msg, result);
7456b0405   Wu Fengguang   HWPOISON: fix inv...
818

bd1ce5f91   Wu Fengguang   HWPOISON: avoid g...
819
  	count = page_count(p) - 1;
138ce286e   Wu Fengguang   HWPOISON: return ...
820
821
822
  	if (ps->action == me_swapcache_dirty && result == DELAYED)
  		count--;
  	if (count != 0) {
6a46079cf   Andi Kleen   HWPOISON: The hig...
823
824
825
  		printk(KERN_ERR
  		       "MCE %#lx: %s page still referenced by %d users
  ",
7456b0405   Wu Fengguang   HWPOISON: fix inv...
826
  		       pfn, ps->msg, count);
138ce286e   Wu Fengguang   HWPOISON: return ...
827
828
  		result = FAILED;
  	}
6a46079cf   Andi Kleen   HWPOISON: The hig...
829
830
831
832
833
  
  	/* Could do more checks here if page looks ok */
  	/*
  	 * Could adjust zone counters here to correct for the missing page.
  	 */
138ce286e   Wu Fengguang   HWPOISON: return ...
834
  	return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY;
6a46079cf   Andi Kleen   HWPOISON: The hig...
835
  }
6a46079cf   Andi Kleen   HWPOISON: The hig...
836
837
838
839
  /*
   * Do all that is necessary to remove user space mappings. Unmap
   * the pages and send SIGBUS to the processes if the data was dirty.
   */
1668bfd5b   Wu Fengguang   HWPOISON: abort o...
840
  static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
6a46079cf   Andi Kleen   HWPOISON: The hig...
841
842
843
844
845
846
  				  int trapno)
  {
  	enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
  	struct address_space *mapping;
  	LIST_HEAD(tokill);
  	int ret;
6a46079cf   Andi Kleen   HWPOISON: The hig...
847
  	int kill = 1;
7af446a84   Naoya Horiguchi   HWPOISON, hugetlb...
848
  	struct page *hpage = compound_head(p);
6a46079cf   Andi Kleen   HWPOISON: The hig...
849

1668bfd5b   Wu Fengguang   HWPOISON: abort o...
850
851
  	if (PageReserved(p) || PageSlab(p))
  		return SWAP_SUCCESS;
6a46079cf   Andi Kleen   HWPOISON: The hig...
852

6a46079cf   Andi Kleen   HWPOISON: The hig...
853
854
855
856
  	/*
  	 * This check implies we don't kill processes if their pages
  	 * are in the swap cache early. Those are always late kills.
  	 */
7af446a84   Naoya Horiguchi   HWPOISON, hugetlb...
857
  	if (!page_mapped(hpage))
1668bfd5b   Wu Fengguang   HWPOISON: abort o...
858
  		return SWAP_SUCCESS;
7af446a84   Naoya Horiguchi   HWPOISON, hugetlb...
859
  	if (PageKsm(p))
1668bfd5b   Wu Fengguang   HWPOISON: abort o...
860
  		return SWAP_FAIL;
6a46079cf   Andi Kleen   HWPOISON: The hig...
861
862
863
864
865
866
867
868
869
870
871
  
  	if (PageSwapCache(p)) {
  		printk(KERN_ERR
  		       "MCE %#lx: keeping poisoned page in swap cache
  ", pfn);
  		ttu |= TTU_IGNORE_HWPOISON;
  	}
  
  	/*
  	 * Propagate the dirty bit from PTEs to struct page first, because we
  	 * need this to decide if we should kill or just drop the page.
db0480b3a   Wu Fengguang   HWPOISON: comment...
872
873
  	 * XXX: the dirty test could be racy: set_page_dirty() may not always
  	 * be called inside page lock (it's recommended but not enforced).
6a46079cf   Andi Kleen   HWPOISON: The hig...
874
  	 */
7af446a84   Naoya Horiguchi   HWPOISON, hugetlb...
875
876
877
878
879
  	mapping = page_mapping(hpage);
  	if (!PageDirty(hpage) && mapping &&
  	    mapping_cap_writeback_dirty(mapping)) {
  		if (page_mkclean(hpage)) {
  			SetPageDirty(hpage);
6a46079cf   Andi Kleen   HWPOISON: The hig...
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
  		} else {
  			kill = 0;
  			ttu |= TTU_IGNORE_HWPOISON;
  			printk(KERN_INFO
  	"MCE %#lx: corrupted page was clean: dropped without side effects
  ",
  				pfn);
  		}
  	}
  
  	/*
  	 * First collect all the processes that have the page
  	 * mapped in dirty form.  This has to be done before try_to_unmap,
  	 * because ttu takes the rmap data structures down.
  	 *
  	 * Error handling: We ignore errors here because
  	 * there's nothing that can be done.
  	 */
  	if (kill)
7af446a84   Naoya Horiguchi   HWPOISON, hugetlb...
899
  		collect_procs(hpage, &tokill);
6a46079cf   Andi Kleen   HWPOISON: The hig...
900

a08c80ebb   Andi Kleen   HWPOISON: Remove ...
901
  	ret = try_to_unmap(hpage, ttu);
6a46079cf   Andi Kleen   HWPOISON: The hig...
902
903
904
  	if (ret != SWAP_SUCCESS)
  		printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)
  ",
7af446a84   Naoya Horiguchi   HWPOISON, hugetlb...
905
  				pfn, page_mapcount(hpage));
6a46079cf   Andi Kleen   HWPOISON: The hig...
906
907
908
909
910
911
912
913
914
915
  
  	/*
  	 * Now that the dirty bit has been propagated to the
  	 * struct page and all unmaps done we can decide if
  	 * killing is needed or not.  Only kill when the page
  	 * was dirty, otherwise the tokill list is merely
  	 * freed.  When there was a problem unmapping earlier
  	 * use a more force-full uncatchable kill to prevent
  	 * any accesses to the poisoned memory.
  	 */
7af446a84   Naoya Horiguchi   HWPOISON, hugetlb...
916
  	kill_procs_ao(&tokill, !!PageDirty(hpage), trapno,
0d9ee6a2d   Andi Kleen   HWPOISON: Report ...
917
  		      ret != SWAP_SUCCESS, p, pfn);
1668bfd5b   Wu Fengguang   HWPOISON: abort o...
918
919
  
  	return ret;
6a46079cf   Andi Kleen   HWPOISON: The hig...
920
  }
7013febc8   Naoya Horiguchi   HWPOISON, hugetlb...
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
  static void set_page_hwpoison_huge_page(struct page *hpage)
  {
  	int i;
  	int nr_pages = 1 << compound_order(hpage);
  	for (i = 0; i < nr_pages; i++)
  		SetPageHWPoison(hpage + i);
  }
  
  static void clear_page_hwpoison_huge_page(struct page *hpage)
  {
  	int i;
  	int nr_pages = 1 << compound_order(hpage);
  	for (i = 0; i < nr_pages; i++)
  		ClearPageHWPoison(hpage + i);
  }
82ba011b9   Andi Kleen   HWPOISON: Turn re...
936
  int __memory_failure(unsigned long pfn, int trapno, int flags)
6a46079cf   Andi Kleen   HWPOISON: The hig...
937
938
939
  {
  	struct page_state *ps;
  	struct page *p;
7af446a84   Naoya Horiguchi   HWPOISON, hugetlb...
940
  	struct page *hpage;
6a46079cf   Andi Kleen   HWPOISON: The hig...
941
  	int res;
c9fbdd5f1   Naoya Horiguchi   HWPOISON, hugetlb...
942
  	unsigned int nr_pages;
6a46079cf   Andi Kleen   HWPOISON: The hig...
943
944
945
946
947
  
  	if (!sysctl_memory_failure_recovery)
  		panic("Memory failure from trap %d on page %lx", trapno, pfn);
  
  	if (!pfn_valid(pfn)) {
a7560fc80   Wu Fengguang   HWPOISON: return ...
948
949
950
951
952
  		printk(KERN_ERR
  		       "MCE %#lx: memory outside kernel control
  ",
  		       pfn);
  		return -ENXIO;
6a46079cf   Andi Kleen   HWPOISON: The hig...
953
954
955
  	}
  
  	p = pfn_to_page(pfn);
7af446a84   Naoya Horiguchi   HWPOISON, hugetlb...
956
  	hpage = compound_head(p);
6a46079cf   Andi Kleen   HWPOISON: The hig...
957
  	if (TestSetPageHWPoison(p)) {
d95ea51e3   Wu Fengguang   HWPOISON: make se...
958
959
  		printk(KERN_ERR "MCE %#lx: already hardware poisoned
  ", pfn);
6a46079cf   Andi Kleen   HWPOISON: The hig...
960
961
  		return 0;
  	}
c9fbdd5f1   Naoya Horiguchi   HWPOISON, hugetlb...
962
963
  	nr_pages = 1 << compound_order(hpage);
  	atomic_long_add(nr_pages, &mce_bad_pages);
6a46079cf   Andi Kleen   HWPOISON: The hig...
964
965
966
967
968
  
  	/*
  	 * We need/can do nothing about count=0 pages.
  	 * 1) it's a free page, and therefore in safe hand:
  	 *    prep_new_page() will be the gate keeper.
8c6c2ecb4   Naoya Horiguchi   HWPOSION, hugetlb...
969
970
971
972
  	 * 2) it's a free hugepage, which is also safe:
  	 *    an affected hugepage will be dequeued from hugepage freelist,
  	 *    so there's no concern about reusing it ever after.
  	 * 3) it's part of a non-compound high order page.
6a46079cf   Andi Kleen   HWPOISON: The hig...
973
974
975
976
977
978
  	 *    Implies some kernel user: cannot stop them from
  	 *    R/W the page; let's pray that the page has been
  	 *    used and will be freed some time later.
  	 * In fact it's dangerous to directly bump up page count from 0,
  	 * that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
  	 */
82ba011b9   Andi Kleen   HWPOISON: Turn re...
979
  	if (!(flags & MF_COUNT_INCREASED) &&
7af446a84   Naoya Horiguchi   HWPOISON, hugetlb...
980
  		!get_page_unless_zero(hpage)) {
8d22ba1b7   Wu Fengguang   HWPOISON: detect ...
981
982
983
  		if (is_free_buddy_page(p)) {
  			action_result(pfn, "free buddy", DELAYED);
  			return 0;
8c6c2ecb4   Naoya Horiguchi   HWPOSION, hugetlb...
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
  		} else if (PageHuge(hpage)) {
  			/*
  			 * Check "just unpoisoned", "filter hit", and
  			 * "race with other subpage."
  			 */
  			lock_page_nosync(hpage);
  			if (!PageHWPoison(hpage)
  			    || (hwpoison_filter(p) && TestClearPageHWPoison(p))
  			    || (p != hpage && TestSetPageHWPoison(hpage))) {
  				atomic_long_sub(nr_pages, &mce_bad_pages);
  				return 0;
  			}
  			set_page_hwpoison_huge_page(hpage);
  			res = dequeue_hwpoisoned_huge_page(hpage);
  			action_result(pfn, "free huge",
  				      res ? IGNORED : DELAYED);
  			unlock_page(hpage);
  			return res;
8d22ba1b7   Wu Fengguang   HWPOISON: detect ...
1002
1003
1004
1005
  		} else {
  			action_result(pfn, "high order kernel", IGNORED);
  			return -EBUSY;
  		}
6a46079cf   Andi Kleen   HWPOISON: The hig...
1006
1007
1008
  	}
  
  	/*
e43c3afb3   Wu Fengguang   HWPOISON: return ...
1009
1010
1011
1012
1013
1014
1015
  	 * We ignore non-LRU pages for good reasons.
  	 * - PG_locked is only well defined for LRU pages and a few others
  	 * - to avoid races with __set_page_locked()
  	 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
  	 * The check (unnecessarily) ignores LRU pages being isolated and
  	 * walked by the page reclaim code, however that's not a big loss.
  	 */
7af446a84   Naoya Horiguchi   HWPOISON, hugetlb...
1016
  	if (!PageLRU(p) && !PageHuge(p))
facb6011f   Andi Kleen   HWPOISON: Add sof...
1017
  		shake_page(p, 0);
7af446a84   Naoya Horiguchi   HWPOISON, hugetlb...
1018
  	if (!PageLRU(p) && !PageHuge(p)) {
0474a60ec   Andi Kleen   HWPOISON: Use new...
1019
1020
1021
1022
1023
1024
1025
  		/*
  		 * shake_page could have turned it free.
  		 */
  		if (is_free_buddy_page(p)) {
  			action_result(pfn, "free buddy, 2nd try", DELAYED);
  			return 0;
  		}
e43c3afb3   Wu Fengguang   HWPOISON: return ...
1026
1027
1028
1029
  		action_result(pfn, "non LRU", IGNORED);
  		put_page(p);
  		return -EBUSY;
  	}
e43c3afb3   Wu Fengguang   HWPOISON: return ...
1030
1031
  
  	/*
6a46079cf   Andi Kleen   HWPOISON: The hig...
1032
1033
1034
1035
  	 * Lock the page and wait for writeback to finish.
  	 * It's very difficult to mess with pages currently under IO
  	 * and in many cases impossible, so we just avoid it here.
  	 */
7af446a84   Naoya Horiguchi   HWPOISON, hugetlb...
1036
  	lock_page_nosync(hpage);
847ce401d   Wu Fengguang   HWPOISON: Add unp...
1037
1038
1039
1040
1041
  
  	/*
  	 * unpoison always clear PG_hwpoison inside page lock
  	 */
  	if (!PageHWPoison(p)) {
d95ea51e3   Wu Fengguang   HWPOISON: make se...
1042
1043
  		printk(KERN_ERR "MCE %#lx: just unpoisoned
  ", pfn);
847ce401d   Wu Fengguang   HWPOISON: Add unp...
1044
1045
1046
  		res = 0;
  		goto out;
  	}
7c116f2b0   Wu Fengguang   HWPOISON: add fs/...
1047
1048
  	if (hwpoison_filter(p)) {
  		if (TestClearPageHWPoison(p))
c9fbdd5f1   Naoya Horiguchi   HWPOISON, hugetlb...
1049
  			atomic_long_sub(nr_pages, &mce_bad_pages);
7af446a84   Naoya Horiguchi   HWPOISON, hugetlb...
1050
1051
  		unlock_page(hpage);
  		put_page(hpage);
7c116f2b0   Wu Fengguang   HWPOISON: add fs/...
1052
1053
  		return 0;
  	}
847ce401d   Wu Fengguang   HWPOISON: Add unp...
1054

7013febc8   Naoya Horiguchi   HWPOISON, hugetlb...
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
  	/*
  	 * For error on the tail page, we should set PG_hwpoison
  	 * on the head page to show that the hugepage is hwpoisoned
  	 */
  	if (PageTail(p) && TestSetPageHWPoison(hpage)) {
  		action_result(pfn, "hugepage already hardware poisoned",
  				IGNORED);
  		unlock_page(hpage);
  		put_page(hpage);
  		return 0;
  	}
  	/*
  	 * Set PG_hwpoison on all pages in an error hugepage,
  	 * because containment is done in hugepage unit for now.
  	 * Since we have done TestSetPageHWPoison() for the head page with
  	 * page lock held, we can safely set PG_hwpoison bits on tail pages.
  	 */
  	if (PageHuge(p))
  		set_page_hwpoison_huge_page(hpage);
6a46079cf   Andi Kleen   HWPOISON: The hig...
1074
1075
1076
1077
  	wait_on_page_writeback(p);
  
  	/*
  	 * Now take care of user space mappings.
1668bfd5b   Wu Fengguang   HWPOISON: abort o...
1078
  	 * Abort on fail: __remove_from_page_cache() assumes unmapped page.
6a46079cf   Andi Kleen   HWPOISON: The hig...
1079
  	 */
1668bfd5b   Wu Fengguang   HWPOISON: abort o...
1080
1081
1082
1083
1084
1085
  	if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) {
  		printk(KERN_ERR "MCE %#lx: cannot unmap page, give up
  ", pfn);
  		res = -EBUSY;
  		goto out;
  	}
6a46079cf   Andi Kleen   HWPOISON: The hig...
1086
1087
1088
1089
  
  	/*
  	 * Torn down by someone else?
  	 */
dc2a1cbf7   Wu Fengguang   HWPOISON: introdu...
1090
  	if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
6a46079cf   Andi Kleen   HWPOISON: The hig...
1091
  		action_result(pfn, "already truncated LRU", IGNORED);
d95ea51e3   Wu Fengguang   HWPOISON: make se...
1092
  		res = -EBUSY;
6a46079cf   Andi Kleen   HWPOISON: The hig...
1093
1094
1095
1096
1097
  		goto out;
  	}
  
  	res = -EBUSY;
  	for (ps = error_states;; ps++) {
dc2a1cbf7   Wu Fengguang   HWPOISON: introdu...
1098
  		if ((p->flags & ps->mask) == ps->res) {
bd1ce5f91   Wu Fengguang   HWPOISON: avoid g...
1099
  			res = page_action(ps, p, pfn);
6a46079cf   Andi Kleen   HWPOISON: The hig...
1100
1101
1102
1103
  			break;
  		}
  	}
  out:
7af446a84   Naoya Horiguchi   HWPOISON, hugetlb...
1104
  	unlock_page(hpage);
6a46079cf   Andi Kleen   HWPOISON: The hig...
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
  	return res;
  }
  EXPORT_SYMBOL_GPL(__memory_failure);
  
  /**
   * memory_failure - Handle memory failure of a page.
   * @pfn: Page Number of the corrupted page
   * @trapno: Trap number reported in the signal to user space.
   *
   * This function is called by the low level machine check code
   * of an architecture when it detects hardware memory corruption
   * of a page. It tries its best to recover, which includes
   * dropping pages, killing processes etc.
   *
   * The function is primarily of use for corruptions that
   * happen outside the current execution context (e.g. when
   * detected by a background scrubber)
   *
   * Must run in process context (e.g. a work queue) with interrupts
   * enabled and no spinlocks hold.
   */
  void memory_failure(unsigned long pfn, int trapno)
  {
  	__memory_failure(pfn, trapno, 0);
  }
847ce401d   Wu Fengguang   HWPOISON: Add unp...
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
  
  /**
   * unpoison_memory - Unpoison a previously poisoned page
   * @pfn: Page number of the to be unpoisoned page
   *
   * Software-unpoison a page that has been poisoned by
   * memory_failure() earlier.
   *
   * This is only done on the software-level, so it only works
   * for linux injected failures, not real hardware failures
   *
   * Returns 0 for success, otherwise -errno.
   */
  int unpoison_memory(unsigned long pfn)
  {
  	struct page *page;
  	struct page *p;
  	int freeit = 0;
c9fbdd5f1   Naoya Horiguchi   HWPOISON, hugetlb...
1148
  	unsigned int nr_pages;
847ce401d   Wu Fengguang   HWPOISON: Add unp...
1149
1150
1151
1152
1153
1154
1155
1156
  
  	if (!pfn_valid(pfn))
  		return -ENXIO;
  
  	p = pfn_to_page(pfn);
  	page = compound_head(p);
  
  	if (!PageHWPoison(p)) {
fb46e7352   Andi Kleen   HWPOISON: Convert...
1157
1158
  		pr_info("MCE: Page was already unpoisoned %#lx
  ", pfn);
847ce401d   Wu Fengguang   HWPOISON: Add unp...
1159
1160
  		return 0;
  	}
c9fbdd5f1   Naoya Horiguchi   HWPOISON, hugetlb...
1161
  	nr_pages = 1 << compound_order(page);
847ce401d   Wu Fengguang   HWPOISON: Add unp...
1162
  	if (!get_page_unless_zero(page)) {
8c6c2ecb4   Naoya Horiguchi   HWPOSION, hugetlb...
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
  		/*
  		 * Since HWPoisoned hugepage should have non-zero refcount,
  		 * race between memory failure and unpoison seems to happen.
  		 * In such case unpoison fails and memory failure runs
  		 * to the end.
  		 */
  		if (PageHuge(page)) {
  			pr_debug("MCE: Memory failure is now running on free hugepage %#lx
  ", pfn);
  			return 0;
  		}
847ce401d   Wu Fengguang   HWPOISON: Add unp...
1174
  		if (TestClearPageHWPoison(p))
c9fbdd5f1   Naoya Horiguchi   HWPOISON, hugetlb...
1175
  			atomic_long_sub(nr_pages, &mce_bad_pages);
fb46e7352   Andi Kleen   HWPOISON: Convert...
1176
1177
  		pr_info("MCE: Software-unpoisoned free page %#lx
  ", pfn);
847ce401d   Wu Fengguang   HWPOISON: Add unp...
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
  		return 0;
  	}
  
  	lock_page_nosync(page);
  	/*
  	 * This test is racy because PG_hwpoison is set outside of page lock.
  	 * That's acceptable because that won't trigger kernel panic. Instead,
  	 * the PG_hwpoison page will be caught and isolated on the entrance to
  	 * the free buddy page pool.
  	 */
c9fbdd5f1   Naoya Horiguchi   HWPOISON, hugetlb...
1188
  	if (TestClearPageHWPoison(page)) {
fb46e7352   Andi Kleen   HWPOISON: Convert...
1189
1190
  		pr_info("MCE: Software-unpoisoned page %#lx
  ", pfn);
c9fbdd5f1   Naoya Horiguchi   HWPOISON, hugetlb...
1191
  		atomic_long_sub(nr_pages, &mce_bad_pages);
847ce401d   Wu Fengguang   HWPOISON: Add unp...
1192
  		freeit = 1;
6a90181c7   Naoya Horiguchi   HWPOISON, hugetlb...
1193
1194
  		if (PageHuge(page))
  			clear_page_hwpoison_huge_page(page);
847ce401d   Wu Fengguang   HWPOISON: Add unp...
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
  	}
  	unlock_page(page);
  
  	put_page(page);
  	if (freeit)
  		put_page(page);
  
  	return 0;
  }
  EXPORT_SYMBOL(unpoison_memory);
facb6011f   Andi Kleen   HWPOISON: Add sof...
1205
1206
1207
  
  static struct page *new_page(struct page *p, unsigned long private, int **x)
  {
12686d153   Andi Kleen   HWPOISON: Try to ...
1208
  	int nid = page_to_nid(p);
d950b9588   Naoya Horiguchi   HWPOISON, hugetlb...
1209
1210
1211
1212
1213
  	if (PageHuge(p))
  		return alloc_huge_page_node(page_hstate(compound_head(p)),
  						   nid);
  	else
  		return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0);
facb6011f   Andi Kleen   HWPOISON: Add sof...
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
  }
  
  /*
   * Safely get reference count of an arbitrary page.
   * Returns 0 for a free page, -EIO for a zero refcount page
   * that is not free, and 1 for any other page type.
   * For 1 the page is returned with increased page count, otherwise not.
   */
  static int get_any_page(struct page *p, unsigned long pfn, int flags)
  {
  	int ret;
  
  	if (flags & MF_COUNT_INCREASED)
  		return 1;
  
  	/*
20d6c96b5   KOSAKI Motohiro   mem-hotplug: intr...
1230
  	 * The lock_memory_hotplug prevents a race with memory hotplug.
facb6011f   Andi Kleen   HWPOISON: Add sof...
1231
1232
  	 * This is a big hammer, a better would be nicer.
  	 */
20d6c96b5   KOSAKI Motohiro   mem-hotplug: intr...
1233
  	lock_memory_hotplug();
facb6011f   Andi Kleen   HWPOISON: Add sof...
1234
1235
1236
1237
1238
1239
  
  	/*
  	 * Isolate the page, so that it doesn't get reallocated if it
  	 * was free.
  	 */
  	set_migratetype_isolate(p);
d950b9588   Naoya Horiguchi   HWPOISON, hugetlb...
1240
1241
1242
1243
  	/*
  	 * When the target page is a free hugepage, just remove it
  	 * from free hugepage list.
  	 */
facb6011f   Andi Kleen   HWPOISON: Add sof...
1244
  	if (!get_page_unless_zero(compound_head(p))) {
d950b9588   Naoya Horiguchi   HWPOISON, hugetlb...
1245
  		if (PageHuge(p)) {
46e387bbd   Andi Kleen   Merge branch 'hwp...
1246
1247
  			pr_info("get_any_page: %#lx free huge page
  ", pfn);
d950b9588   Naoya Horiguchi   HWPOISON, hugetlb...
1248
1249
  			ret = dequeue_hwpoisoned_huge_page(compound_head(p));
  		} else if (is_free_buddy_page(p)) {
fb46e7352   Andi Kleen   HWPOISON: Convert...
1250
1251
  			pr_info("get_any_page: %#lx free buddy page
  ", pfn);
facb6011f   Andi Kleen   HWPOISON: Add sof...
1252
1253
1254
1255
  			/* Set hwpoison bit while page is still isolated */
  			SetPageHWPoison(p);
  			ret = 0;
  		} else {
fb46e7352   Andi Kleen   HWPOISON: Convert...
1256
1257
  			pr_info("get_any_page: %#lx: unknown zero refcount page type %lx
  ",
facb6011f   Andi Kleen   HWPOISON: Add sof...
1258
1259
1260
1261
1262
1263
1264
1265
  				pfn, p->flags);
  			ret = -EIO;
  		}
  	} else {
  		/* Not a free page */
  		ret = 1;
  	}
  	unset_migratetype_isolate(p);
20d6c96b5   KOSAKI Motohiro   mem-hotplug: intr...
1266
  	unlock_memory_hotplug();
facb6011f   Andi Kleen   HWPOISON: Add sof...
1267
1268
  	return ret;
  }
d950b9588   Naoya Horiguchi   HWPOISON, hugetlb...
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
  static int soft_offline_huge_page(struct page *page, int flags)
  {
  	int ret;
  	unsigned long pfn = page_to_pfn(page);
  	struct page *hpage = compound_head(page);
  	LIST_HEAD(pagelist);
  
  	ret = get_any_page(page, pfn, flags);
  	if (ret < 0)
  		return ret;
  	if (ret == 0)
  		goto done;
  
  	if (PageHWPoison(hpage)) {
  		put_page(hpage);
  		pr_debug("soft offline: %#lx hugepage already poisoned
  ", pfn);
  		return -EBUSY;
  	}
  
  	/* Keep page count to indicate a given hugepage is isolated. */
  
  	list_add(&hpage->lru, &pagelist);
  	ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0);
  	if (ret) {
cf608ac19   Minchan Kim   mm: compaction: f...
1294
  			putback_lru_pages(&pagelist);
d950b9588   Naoya Horiguchi   HWPOISON, hugetlb...
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
  		pr_debug("soft offline: %#lx: migration failed %d, type %lx
  ",
  			 pfn, ret, page->flags);
  		if (ret > 0)
  			ret = -EIO;
  		return ret;
  	}
  done:
  	if (!PageHWPoison(hpage))
  		atomic_long_add(1 << compound_order(hpage), &mce_bad_pages);
  	set_page_hwpoison_huge_page(hpage);
  	dequeue_hwpoisoned_huge_page(hpage);
  	/* keep elevated page count for bad page */
  	return ret;
  }
facb6011f   Andi Kleen   HWPOISON: Add sof...
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
  /**
   * soft_offline_page - Soft offline a page.
   * @page: page to offline
   * @flags: flags. Same as memory_failure().
   *
   * Returns 0 on success, otherwise negated errno.
   *
   * Soft offline a page, by migration or invalidation,
   * without killing anything. This is for the case when
   * a page is not corrupted yet (so it's still valid to access),
   * but has had a number of corrected errors and is better taken
   * out.
   *
   * The actual policy on when to do that is maintained by
   * user space.
   *
   * This should never impact any application or cause data loss,
   * however it might take some time.
   *
   * This is not a 100% solution for all memory, but tries to be
   * ``good enough'' for the majority of memory.
   */
  int soft_offline_page(struct page *page, int flags)
  {
  	int ret;
  	unsigned long pfn = page_to_pfn(page);
d950b9588   Naoya Horiguchi   HWPOISON, hugetlb...
1336
1337
  	if (PageHuge(page))
  		return soft_offline_huge_page(page, flags);
facb6011f   Andi Kleen   HWPOISON: Add sof...
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
  	ret = get_any_page(page, pfn, flags);
  	if (ret < 0)
  		return ret;
  	if (ret == 0)
  		goto done;
  
  	/*
  	 * Page cache page we can handle?
  	 */
  	if (!PageLRU(page)) {
  		/*
  		 * Try to free it.
  		 */
  		put_page(page);
  		shake_page(page, 1);
  
  		/*
  		 * Did it turn free?
  		 */
  		ret = get_any_page(page, pfn, 0);
  		if (ret < 0)
  			return ret;
  		if (ret == 0)
  			goto done;
  	}
  	if (!PageLRU(page)) {
fb46e7352   Andi Kleen   HWPOISON: Convert...
1364
1365
  		pr_info("soft_offline: %#lx: unknown non LRU page type %lx
  ",
facb6011f   Andi Kleen   HWPOISON: Add sof...
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
  				pfn, page->flags);
  		return -EIO;
  	}
  
  	lock_page(page);
  	wait_on_page_writeback(page);
  
  	/*
  	 * Synchronized using the page lock with memory_failure()
  	 */
  	if (PageHWPoison(page)) {
  		unlock_page(page);
  		put_page(page);
fb46e7352   Andi Kleen   HWPOISON: Convert...
1379
1380
  		pr_info("soft offline: %#lx page already poisoned
  ", pfn);
facb6011f   Andi Kleen   HWPOISON: Add sof...
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
  		return -EBUSY;
  	}
  
  	/*
  	 * Try to invalidate first. This should work for
  	 * non dirty unmapped page cache pages.
  	 */
  	ret = invalidate_inode_page(page);
  	unlock_page(page);
  
  	/*
  	 * Drop count because page migration doesn't like raised
  	 * counts. The page could get re-allocated, but if it becomes
  	 * LRU the isolation will just fail.
  	 * RED-PEN would be better to keep it isolated here, but we
  	 * would need to fix isolation locking first.
  	 */
  	put_page(page);
  	if (ret == 1) {
  		ret = 0;
fb46e7352   Andi Kleen   HWPOISON: Convert...
1401
1402
  		pr_info("soft_offline: %#lx: invalidated
  ", pfn);
facb6011f   Andi Kleen   HWPOISON: Add sof...
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
  		goto done;
  	}
  
  	/*
  	 * Simple invalidation didn't work.
  	 * Try to migrate to a new page instead. migrate.c
  	 * handles a large number of cases for us.
  	 */
  	ret = isolate_lru_page(page);
  	if (!ret) {
  		LIST_HEAD(pagelist);
  
  		list_add(&page->lru, &pagelist);
  		ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0);
  		if (ret) {
fb46e7352   Andi Kleen   HWPOISON: Convert...
1418
1419
  			pr_info("soft offline: %#lx: migration failed %d, type %lx
  ",
facb6011f   Andi Kleen   HWPOISON: Add sof...
1420
1421
1422
1423
1424
  				pfn, ret, page->flags);
  			if (ret > 0)
  				ret = -EIO;
  		}
  	} else {
fb46e7352   Andi Kleen   HWPOISON: Convert...
1425
1426
  		pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx
  ",
facb6011f   Andi Kleen   HWPOISON: Add sof...
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
  				pfn, ret, page_count(page), page->flags);
  	}
  	if (ret)
  		return ret;
  
  done:
  	atomic_long_add(1, &mce_bad_pages);
  	SetPageHWPoison(page);
  	/* keep elevated page count for bad page */
  	return ret;
  }
bf998156d   Huang Ying   KVM: Avoid killin...
1438

bbeb34062   Huang Ying   KVM: Fix a race c...
1439
1440
1441
  /*
   * The caller must hold current->mm->mmap_sem in read mode.
   */
bf998156d   Huang Ying   KVM: Avoid killin...
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
  int is_hwpoison_address(unsigned long addr)
  {
  	pgd_t *pgdp;
  	pud_t pud, *pudp;
  	pmd_t pmd, *pmdp;
  	pte_t pte, *ptep;
  	swp_entry_t entry;
  
  	pgdp = pgd_offset(current->mm, addr);
  	if (!pgd_present(*pgdp))
  		return 0;
  	pudp = pud_offset(pgdp, addr);
  	pud = *pudp;
  	if (!pud_present(pud) || pud_large(pud))
  		return 0;
  	pmdp = pmd_offset(pudp, addr);
  	pmd = *pmdp;
  	if (!pmd_present(pmd) || pmd_large(pmd))
  		return 0;
  	ptep = pte_offset_map(pmdp, addr);
  	pte = *ptep;
  	pte_unmap(ptep);
  	if (!is_swap_pte(pte))
  		return 0;
  	entry = pte_to_swp_entry(pte);
  	return is_hwpoison_entry(entry);
  }
  EXPORT_SYMBOL_GPL(is_hwpoison_address);