Commit 81e88fdc432a1552401d6e91a984dcccce72b8dc

Authored by Huang Ying
Committed by Len Brown
1 parent 32c361f574

ACPI, APEI, Generic Hardware Error Source POLL/IRQ/NMI notification type support

Generic Hardware Error Source provides a way to report platform
hardware errors (such as that from chipset). It works in so called
"Firmware First" mode, that is, hardware errors are reported to
firmware firstly, then reported to Linux by firmware. This way, some
non-standard hardware error registers or non-standard hardware link
can be checked by firmware to produce more valuable hardware error
information for Linux.

This patch adds POLL/IRQ/NMI notification types support.

Because the memory area used to transfer hardware error information
from BIOS to Linux can be determined only in NMI, IRQ or timer
handler, but general ioremap can not be used in atomic context, so a
special version of atomic ioremap is implemented for that.

Known issue:

- Error information can not be printed for recoverable errors notified
  via NMI, because printk is not NMI-safe. Will fix this via delay
  printing to IRQ context via irq_work or make printk NMI-safe.

v2:

- adjust printk format per comments.

Signed-off-by: Huang Ying <ying.huang@intel.com>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>

Showing 6 changed files with 328 additions and 84 deletions Side-by-side Diff

arch/x86/kernel/acpi/boot.c
... ... @@ -504,6 +504,7 @@
504 504  
505 505 return 0;
506 506 }
  507 +EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
507 508  
508 509 int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
509 510 {
arch/x86/kernel/dumpstack.c
... ... @@ -240,6 +240,7 @@
240 240 bust_spinlocks(1);
241 241 return flags;
242 242 }
  243 +EXPORT_SYMBOL_GPL(oops_begin);
243 244  
244 245 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
245 246 {
drivers/acpi/apei/ghes.c
... ... @@ -12,10 +12,6 @@
12 12 * For more information about Generic Hardware Error Source, please
13 13 * refer to ACPI Specification version 4.0, section 17.3.2.6
14 14 *
15   - * Now, only SCI notification type and memory errors are
16   - * supported. More notification type and hardware error type will be
17   - * added later.
18   - *
19 15 * Copyright 2010 Intel Corp.
20 16 * Author: Huang Ying <ying.huang@intel.com>
21 17 *
22 18  
23 19  
... ... @@ -39,15 +35,18 @@
39 35 #include <linux/acpi.h>
40 36 #include <linux/io.h>
41 37 #include <linux/interrupt.h>
  38 +#include <linux/timer.h>
42 39 #include <linux/cper.h>
43 40 #include <linux/kdebug.h>
44 41 #include <linux/platform_device.h>
45 42 #include <linux/mutex.h>
46 43 #include <linux/ratelimit.h>
  44 +#include <linux/vmalloc.h>
47 45 #include <acpi/apei.h>
48 46 #include <acpi/atomicio.h>
49 47 #include <acpi/hed.h>
50 48 #include <asm/mce.h>
  49 +#include <asm/tlbflush.h>
51 50  
52 51 #include "apei-internal.h"
53 52  
54 53  
55 54  
56 55  
57 56  
58 57  
59 58  
60 59  
61 60  
62 61  
... ... @@ -56,42 +55,131 @@
56 55 #define GHES_ESTATUS_MAX_SIZE 65536
57 56  
58 57 /*
59   - * One struct ghes is created for each generic hardware error
60   - * source.
61   - *
  58 + * One struct ghes is created for each generic hardware error source.
62 59 * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
63   - * handler. Handler for one generic hardware error source is only
64   - * triggered after the previous one is done. So handler can uses
65   - * struct ghes without locking.
  60 + * handler.
66 61 *
67 62 * estatus: memory buffer for error status block, allocated during
68 63 * HEST parsing.
69 64 */
70 65 #define GHES_TO_CLEAR 0x0001
  66 +#define GHES_EXITING 0x0002
71 67  
72 68 struct ghes {
73 69 struct acpi_hest_generic *generic;
74 70 struct acpi_hest_generic_status *estatus;
75   - struct list_head list;
76 71 u64 buffer_paddr;
77 72 unsigned long flags;
  73 + union {
  74 + struct list_head list;
  75 + struct timer_list timer;
  76 + unsigned int irq;
  77 + };
78 78 };
79 79  
  80 +static int ghes_panic_timeout __read_mostly = 30;
  81 +
80 82 /*
81   - * Error source lists, one list for each notification method. The
82   - * members in lists are struct ghes.
  83 + * All error sources notified with SCI shares one notifier function,
  84 + * so they need to be linked and checked one by one. This is applied
  85 + * to NMI too.
83 86 *
84   - * The list members are only added in HEST parsing and deleted during
85   - * module_exit, that is, single-threaded. So no lock is needed for
86   - * that.
87   - *
88   - * But the mutual exclusion is needed between members adding/deleting
89   - * and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is
90   - * used for that.
  87 + * RCU is used for these lists, so ghes_list_mutex is only used for
  88 + * list changing, not for traversing.
91 89 */
92 90 static LIST_HEAD(ghes_sci);
  91 +static LIST_HEAD(ghes_nmi);
93 92 static DEFINE_MUTEX(ghes_list_mutex);
94 93  
  94 +/*
  95 + * NMI may be triggered on any CPU, so ghes_nmi_lock is used for
  96 + * mutual exclusion.
  97 + */
  98 +static DEFINE_RAW_SPINLOCK(ghes_nmi_lock);
  99 +
  100 +/*
  101 + * Because the memory area used to transfer hardware error information
  102 + * from BIOS to Linux can be determined only in NMI, IRQ or timer
  103 + * handler, but general ioremap can not be used in atomic context, so
  104 + * a special version of atomic ioremap is implemented for that.
  105 + */
  106 +
  107 +/*
  108 + * Two virtual pages are used, one for NMI context, the other for
  109 + * IRQ/PROCESS context
  110 + */
  111 +#define GHES_IOREMAP_PAGES 2
  112 +#define GHES_IOREMAP_NMI_PAGE(base) (base)
  113 +#define GHES_IOREMAP_IRQ_PAGE(base) ((base) + PAGE_SIZE)
  114 +
  115 +/* virtual memory area for atomic ioremap */
  116 +static struct vm_struct *ghes_ioremap_area;
  117 +/*
  118 + * These 2 spinlock is used to prevent atomic ioremap virtual memory
  119 + * area from being mapped simultaneously.
  120 + */
  121 +static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
  122 +static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
  123 +
  124 +static int ghes_ioremap_init(void)
  125 +{
  126 + ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
  127 + VM_IOREMAP, VMALLOC_START, VMALLOC_END);
  128 + if (!ghes_ioremap_area) {
  129 + pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n");
  130 + return -ENOMEM;
  131 + }
  132 +
  133 + return 0;
  134 +}
  135 +
  136 +static void ghes_ioremap_exit(void)
  137 +{
  138 + free_vm_area(ghes_ioremap_area);
  139 +}
  140 +
  141 +static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
  142 +{
  143 + unsigned long vaddr;
  144 +
  145 + vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr);
  146 + ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
  147 + pfn << PAGE_SHIFT, PAGE_KERNEL);
  148 +
  149 + return (void __iomem *)vaddr;
  150 +}
  151 +
  152 +static void __iomem *ghes_ioremap_pfn_irq(u64 pfn)
  153 +{
  154 + unsigned long vaddr;
  155 +
  156 + vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr);
  157 + ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
  158 + pfn << PAGE_SHIFT, PAGE_KERNEL);
  159 +
  160 + return (void __iomem *)vaddr;
  161 +}
  162 +
  163 +static void ghes_iounmap_nmi(void __iomem *vaddr_ptr)
  164 +{
  165 + unsigned long vaddr = (unsigned long __force)vaddr_ptr;
  166 + void *base = ghes_ioremap_area->addr;
  167 +
  168 + BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base));
  169 + unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
  170 + __flush_tlb_one(vaddr);
  171 +}
  172 +
  173 +static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
  174 +{
  175 + unsigned long vaddr = (unsigned long __force)vaddr_ptr;
  176 + void *base = ghes_ioremap_area->addr;
  177 +
  178 + BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base));
  179 + unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
  180 + __flush_tlb_one(vaddr);
  181 +}
  182 +
95 183 static struct ghes *ghes_new(struct acpi_hest_generic *generic)
96 184 {
97 185 struct ghes *ghes;
... ... @@ -102,7 +190,6 @@
102 190 if (!ghes)
103 191 return ERR_PTR(-ENOMEM);
104 192 ghes->generic = generic;
105   - INIT_LIST_HEAD(&ghes->list);
106 193 rc = acpi_pre_map_gar(&generic->error_status_address);
107 194 if (rc)
108 195 goto err_free;
109 196  
110 197  
... ... @@ -159,22 +246,41 @@
159 246 }
160 247 }
161 248  
162   -/* SCI handler run in work queue, so ioremap can be used here */
163   -static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
164   - int from_phys)
  249 +static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
  250 + int from_phys)
165 251 {
166   - void *vaddr;
  252 + void __iomem *vaddr;
  253 + unsigned long flags = 0;
  254 + int in_nmi = in_nmi();
  255 + u64 offset;
  256 + u32 trunk;
167 257  
168   - vaddr = ioremap_cache(paddr, len);
169   - if (!vaddr)
170   - return -ENOMEM;
171   - if (from_phys)
172   - memcpy(buffer, vaddr, len);
173   - else
174   - memcpy(vaddr, buffer, len);
175   - iounmap(vaddr);
176   -
177   - return 0;
  258 + while (len > 0) {
  259 + offset = paddr - (paddr & PAGE_MASK);
  260 + if (in_nmi) {
  261 + raw_spin_lock(&ghes_ioremap_lock_nmi);
  262 + vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT);
  263 + } else {
  264 + spin_lock_irqsave(&ghes_ioremap_lock_irq, flags);
  265 + vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT);
  266 + }
  267 + trunk = PAGE_SIZE - offset;
  268 + trunk = min(trunk, len);
  269 + if (from_phys)
  270 + memcpy_fromio(buffer, vaddr + offset, trunk);
  271 + else
  272 + memcpy_toio(vaddr + offset, buffer, trunk);
  273 + len -= trunk;
  274 + paddr += trunk;
  275 + buffer += trunk;
  276 + if (in_nmi) {
  277 + ghes_iounmap_nmi(vaddr);
  278 + raw_spin_unlock(&ghes_ioremap_lock_nmi);
  279 + } else {
  280 + ghes_iounmap_irq(vaddr);
  281 + spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags);
  282 + }
  283 + }
178 284 }
179 285  
180 286 static int ghes_read_estatus(struct ghes *ghes, int silent)
... ... @@ -195,10 +301,8 @@
195 301 if (!buf_paddr)
196 302 return -ENOENT;
197 303  
198   - rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
199   - sizeof(*ghes->estatus), 1);
200   - if (rc)
201   - return rc;
  304 + ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
  305 + sizeof(*ghes->estatus), 1);
202 306 if (!ghes->estatus->block_status)
203 307 return -ENOENT;
204 308  
205 309  
... ... @@ -213,17 +317,15 @@
213 317 goto err_read_block;
214 318 if (apei_estatus_check_header(ghes->estatus))
215 319 goto err_read_block;
216   - rc = ghes_copy_tofrom_phys(ghes->estatus + 1,
217   - buf_paddr + sizeof(*ghes->estatus),
218   - len - sizeof(*ghes->estatus), 1);
219   - if (rc)
220   - return rc;
  320 + ghes_copy_tofrom_phys(ghes->estatus + 1,
  321 + buf_paddr + sizeof(*ghes->estatus),
  322 + len - sizeof(*ghes->estatus), 1);
221 323 if (apei_estatus_check(ghes->estatus))
222 324 goto err_read_block;
223 325 rc = 0;
224 326  
225 327 err_read_block:
226   - if (rc && !silent)
  328 + if (rc && !silent && printk_ratelimit())
227 329 pr_warning(FW_WARN GHES_PFX
228 330 "Failed to read error status block!\n");
229 331 return rc;
... ... @@ -293,6 +395,42 @@
293 395 return 0;
294 396 }
295 397  
  398 +static void ghes_add_timer(struct ghes *ghes)
  399 +{
  400 + struct acpi_hest_generic *g = ghes->generic;
  401 + unsigned long expire;
  402 +
  403 + if (!g->notify.poll_interval) {
  404 + pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
  405 + g->header.source_id);
  406 + return;
  407 + }
  408 + expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
  409 + ghes->timer.expires = round_jiffies_relative(expire);
  410 + add_timer(&ghes->timer);
  411 +}
  412 +
  413 +static void ghes_poll_func(unsigned long data)
  414 +{
  415 + struct ghes *ghes = (void *)data;
  416 +
  417 + ghes_proc(ghes);
  418 + if (!(ghes->flags & GHES_EXITING))
  419 + ghes_add_timer(ghes);
  420 +}
  421 +
  422 +static irqreturn_t ghes_irq_func(int irq, void *data)
  423 +{
  424 + struct ghes *ghes = data;
  425 + int rc;
  426 +
  427 + rc = ghes_proc(ghes);
  428 + if (rc)
  429 + return IRQ_NONE;
  430 +
  431 + return IRQ_HANDLED;
  432 +}
  433 +
296 434 static int ghes_notify_sci(struct notifier_block *this,
297 435 unsigned long event, void *data)
298 436 {
299 437  
... ... @@ -309,10 +447,63 @@
309 447 return ret;
310 448 }
311 449  
  450 +static int ghes_notify_nmi(struct notifier_block *this,
  451 + unsigned long cmd, void *data)
  452 +{
  453 + struct ghes *ghes, *ghes_global = NULL;
  454 + int sev, sev_global = -1;
  455 + int ret = NOTIFY_DONE;
  456 +
  457 + if (cmd != DIE_NMI)
  458 + return ret;
  459 +
  460 + raw_spin_lock(&ghes_nmi_lock);
  461 + list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
  462 + if (ghes_read_estatus(ghes, 1)) {
  463 + ghes_clear_estatus(ghes);
  464 + continue;
  465 + }
  466 + sev = ghes_severity(ghes->estatus->error_severity);
  467 + if (sev > sev_global) {
  468 + sev_global = sev;
  469 + ghes_global = ghes;
  470 + }
  471 + ret = NOTIFY_STOP;
  472 + }
  473 +
  474 + if (ret == NOTIFY_DONE)
  475 + goto out;
  476 +
  477 + if (sev_global >= GHES_SEV_PANIC) {
  478 + oops_begin();
  479 + ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global);
  480 + /* reboot to log the error! */
  481 + if (panic_timeout == 0)
  482 + panic_timeout = ghes_panic_timeout;
  483 + panic("Fatal hardware error!");
  484 + }
  485 +
  486 + list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
  487 + if (!(ghes->flags & GHES_TO_CLEAR))
  488 + continue;
  489 + /* Do not print estatus because printk is not NMI safe */
  490 + ghes_do_proc(ghes);
  491 + ghes_clear_estatus(ghes);
  492 + }
  493 +
  494 +out:
  495 + raw_spin_unlock(&ghes_nmi_lock);
  496 + return ret;
  497 +}
  498 +
312 499 static struct notifier_block ghes_notifier_sci = {
313 500 .notifier_call = ghes_notify_sci,
314 501 };
315 502  
  503 +static struct notifier_block ghes_notifier_nmi = {
  504 + .notifier_call = ghes_notify_nmi,
  505 +};
  506 +
316 507 static int __devinit ghes_probe(struct platform_device *ghes_dev)
317 508 {
318 509 struct acpi_hest_generic *generic;
319 510  
320 511  
321 512  
322 513  
... ... @@ -323,59 +514,73 @@
323 514 if (!generic->enabled)
324 515 return -ENODEV;
325 516  
  517 + switch (generic->notify.type) {
  518 + case ACPI_HEST_NOTIFY_POLLED:
  519 + case ACPI_HEST_NOTIFY_EXTERNAL:
  520 + case ACPI_HEST_NOTIFY_SCI:
  521 + case ACPI_HEST_NOTIFY_NMI:
  522 + break;
  523 + case ACPI_HEST_NOTIFY_LOCAL:
  524 + pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
  525 + generic->header.source_id);
  526 + goto err;
  527 + default:
  528 + pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
  529 + generic->notify.type, generic->header.source_id);
  530 + goto err;
  531 + }
  532 +
  533 + rc = -EIO;
326 534 if (generic->error_block_length <
327 535 sizeof(struct acpi_hest_generic_status)) {
328   - pr_warning(FW_BUG GHES_PFX
329   -"Invalid error block length: %u for generic hardware error source: %d\n",
  536 + pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
330 537 generic->error_block_length,
331 538 generic->header.source_id);
332 539 goto err;
333 540 }
334   - if (generic->records_to_preallocate == 0) {
335   - pr_warning(FW_BUG GHES_PFX
336   -"Invalid records to preallocate: %u for generic hardware error source: %d\n",
337   - generic->records_to_preallocate,
338   - generic->header.source_id);
339   - goto err;
340   - }
341 541 ghes = ghes_new(generic);
342 542 if (IS_ERR(ghes)) {
343 543 rc = PTR_ERR(ghes);
344 544 ghes = NULL;
345 545 goto err;
346 546 }
347   - if (generic->notify.type == ACPI_HEST_NOTIFY_SCI) {
  547 + switch (generic->notify.type) {
  548 + case ACPI_HEST_NOTIFY_POLLED:
  549 + ghes->timer.function = ghes_poll_func;
  550 + ghes->timer.data = (unsigned long)ghes;
  551 + init_timer_deferrable(&ghes->timer);
  552 + ghes_add_timer(ghes);
  553 + break;
  554 + case ACPI_HEST_NOTIFY_EXTERNAL:
  555 + /* External interrupt vector is GSI */
  556 + if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) {
  557 + pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
  558 + generic->header.source_id);
  559 + goto err;
  560 + }
  561 + if (request_irq(ghes->irq, ghes_irq_func,
  562 + 0, "GHES IRQ", ghes)) {
  563 + pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
  564 + generic->header.source_id);
  565 + goto err;
  566 + }
  567 + break;
  568 + case ACPI_HEST_NOTIFY_SCI:
348 569 mutex_lock(&ghes_list_mutex);
349 570 if (list_empty(&ghes_sci))
350 571 register_acpi_hed_notifier(&ghes_notifier_sci);
351 572 list_add_rcu(&ghes->list, &ghes_sci);
352 573 mutex_unlock(&ghes_list_mutex);
353   - } else {
354   - unsigned char *notify = NULL;
355   -
356   - switch (generic->notify.type) {
357   - case ACPI_HEST_NOTIFY_POLLED:
358   - notify = "POLL";
359   - break;
360   - case ACPI_HEST_NOTIFY_EXTERNAL:
361   - case ACPI_HEST_NOTIFY_LOCAL:
362   - notify = "IRQ";
363   - break;
364   - case ACPI_HEST_NOTIFY_NMI:
365   - notify = "NMI";
366   - break;
367   - }
368   - if (notify) {
369   - pr_warning(GHES_PFX
370   -"Generic hardware error source: %d notified via %s is not supported!\n",
371   - generic->header.source_id, notify);
372   - } else {
373   - pr_warning(FW_WARN GHES_PFX
374   -"Unknown notification type: %u for generic hardware error source: %d\n",
375   - generic->notify.type, generic->header.source_id);
376   - }
377   - rc = -ENODEV;
378   - goto err;
  574 + break;
  575 + case ACPI_HEST_NOTIFY_NMI:
  576 + mutex_lock(&ghes_list_mutex);
  577 + if (list_empty(&ghes_nmi))
  578 + register_die_notifier(&ghes_notifier_nmi);
  579 + list_add_rcu(&ghes->list, &ghes_nmi);
  580 + mutex_unlock(&ghes_list_mutex);
  581 + break;
  582 + default:
  583 + BUG();
379 584 }
380 585 platform_set_drvdata(ghes_dev, ghes);
381 586  
382 587  
... ... @@ -396,7 +601,14 @@
396 601 ghes = platform_get_drvdata(ghes_dev);
397 602 generic = ghes->generic;
398 603  
  604 + ghes->flags |= GHES_EXITING;
399 605 switch (generic->notify.type) {
  606 + case ACPI_HEST_NOTIFY_POLLED:
  607 + del_timer_sync(&ghes->timer);
  608 + break;
  609 + case ACPI_HEST_NOTIFY_EXTERNAL:
  610 + free_irq(ghes->irq, ghes);
  611 + break;
400 612 case ACPI_HEST_NOTIFY_SCI:
401 613 mutex_lock(&ghes_list_mutex);
402 614 list_del_rcu(&ghes->list);
403 615  
... ... @@ -404,12 +616,23 @@
404 616 unregister_acpi_hed_notifier(&ghes_notifier_sci);
405 617 mutex_unlock(&ghes_list_mutex);
406 618 break;
  619 + case ACPI_HEST_NOTIFY_NMI:
  620 + mutex_lock(&ghes_list_mutex);
  621 + list_del_rcu(&ghes->list);
  622 + if (list_empty(&ghes_nmi))
  623 + unregister_die_notifier(&ghes_notifier_nmi);
  624 + mutex_unlock(&ghes_list_mutex);
  625 + /*
  626 + * To synchronize with NMI handler, ghes can only be
  627 + * freed after NMI handler finishes.
  628 + */
  629 + synchronize_rcu();
  630 + break;
407 631 default:
408 632 BUG();
409 633 break;
410 634 }
411 635  
412   - synchronize_rcu();
413 636 ghes_fini(ghes);
414 637 kfree(ghes);
415 638  
... ... @@ -429,6 +652,8 @@
429 652  
430 653 static int __init ghes_init(void)
431 654 {
  655 + int rc;
  656 +
432 657 if (acpi_disabled)
433 658 return -ENODEV;
434 659  
435 660  
... ... @@ -437,12 +662,25 @@
437 662 return -EINVAL;
438 663 }
439 664  
440   - return platform_driver_register(&ghes_platform_driver);
  665 + rc = ghes_ioremap_init();
  666 + if (rc)
  667 + goto err;
  668 +
  669 + rc = platform_driver_register(&ghes_platform_driver);
  670 + if (rc)
  671 + goto err_ioremap_exit;
  672 +
  673 + return 0;
  674 +err_ioremap_exit:
  675 + ghes_ioremap_exit();
  676 +err:
  677 + return rc;
441 678 }
442 679  
443 680 static void __exit ghes_exit(void)
444 681 {
445 682 platform_driver_unregister(&ghes_platform_driver);
  683 + ghes_ioremap_exit();
446 684 }
447 685  
448 686 module_init(ghes_init);
... ... @@ -34,6 +34,7 @@
34 34 static DEFINE_SPINLOCK(pause_on_oops_lock);
35 35  
36 36 int panic_timeout;
  37 +EXPORT_SYMBOL_GPL(panic_timeout);
37 38  
38 39 ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
39 40  
... ... @@ -9,6 +9,7 @@
9 9 #include <linux/mm.h>
10 10 #include <linux/sched.h>
11 11 #include <linux/io.h>
  12 +#include <linux/module.h>
12 13 #include <asm/cacheflush.h>
13 14 #include <asm/pgtable.h>
14 15  
... ... @@ -90,4 +91,5 @@
90 91  
91 92 return err;
92 93 }
  94 +EXPORT_SYMBOL_GPL(ioremap_page_range);
... ... @@ -1175,6 +1175,7 @@
1175 1175 {
1176 1176 vunmap_page_range(addr, addr + size);
1177 1177 }
  1178 +EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
1178 1179  
1179 1180 /**
1180 1181 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB