Commit a776878d6cf8a81fa65b29aa9bd6a10a5131e71c

Authored by Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, efi: Calling __pa() with an ioremap()ed address is invalid
  x86, hpet: Immediately disable HPET timer 1 if rtc irq is masked
  x86/intel_mid: Kconfig select fix
  x86/intel_mid: Fix the Kconfig for MID selection

Showing 8 changed files Side-by-side Diff

... ... @@ -390,7 +390,7 @@
390 390 This option compiles in support for the CE4100 SOC for settop
391 391 boxes and media devices.
392 392  
393   -config X86_INTEL_MID
  393 +config X86_WANT_INTEL_MID
394 394 bool "Intel MID platform support"
395 395 depends on X86_32
396 396 depends on X86_EXTENDED_PLATFORM
397 397  
... ... @@ -399,8 +399,11 @@
399 399 systems which do not have the PCI legacy interfaces (Moorestown,
400 400 Medfield). If you are building for a PC class system say N here.
401 401  
402   -if X86_INTEL_MID
  402 +if X86_WANT_INTEL_MID
403 403  
  404 +config X86_INTEL_MID
  405 + bool
  406 +
404 407 config X86_MRST
405 408 bool "Moorestown MID platform"
406 409 depends on PCI
... ... @@ -411,6 +414,7 @@
411 414 select SPI
412 415 select INTEL_SCU_IPC
413 416 select X86_PLATFORM_DEVICES
  417 + select X86_INTEL_MID
414 418 ---help---
415 419 Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin
416 420 Internet Device(MID) platform. Moorestown consists of two chips:
arch/x86/include/asm/e820.h
... ... @@ -53,6 +53,13 @@
53 53 */
54 54 #define E820_RESERVED_KERN 128
55 55  
  56 +/*
  57 + * Address ranges that need to be mapped by the kernel direct
  58 + * mapping. This is used to make sure regions such as
  59 + * EFI_RUNTIME_SERVICES_DATA are directly mapped. See setup_arch().
  60 + */
  61 +#define E820_RESERVED_EFI 129
  62 +
56 63 #ifndef __ASSEMBLY__
57 64 #include <linux/types.h>
58 65 struct e820entry {
... ... @@ -115,6 +122,7 @@
115 122 }
116 123 #endif
117 124  
  125 +extern unsigned long e820_end_pfn(unsigned long limit_pfn, unsigned type);
118 126 extern unsigned long e820_end_of_ram_pfn(void);
119 127 extern unsigned long e820_end_of_low_ram_pfn(void);
120 128 extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
arch/x86/include/asm/efi.h
... ... @@ -33,8 +33,6 @@
33 33 #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
34 34 efi_call_virt(f, a1, a2, a3, a4, a5, a6)
35 35  
36   -#define efi_ioremap(addr, size, type) ioremap_cache(addr, size)
37   -
38 36 #else /* !CONFIG_X86_32 */
39 37  
40 38 extern u64 efi_call0(void *fp);
... ... @@ -83,9 +81,6 @@
83 81 #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
84 82 efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
85 83 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
86   -
87   -extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
88   - u32 type);
89 84  
90 85 #endif /* CONFIG_X86_32 */
91 86  
arch/x86/kernel/e820.c
... ... @@ -135,6 +135,7 @@
135 135 printk(KERN_CONT "(usable)");
136 136 break;
137 137 case E820_RESERVED:
  138 + case E820_RESERVED_EFI:
138 139 printk(KERN_CONT "(reserved)");
139 140 break;
140 141 case E820_ACPI:
... ... @@ -783,7 +784,7 @@
783 784 /*
784 785 * Find the highest page frame number we have available
785 786 */
786   -static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
  787 +unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
787 788 {
788 789 int i;
789 790 unsigned long last_pfn = 0;
arch/x86/kernel/hpet.c
... ... @@ -1049,6 +1049,14 @@
1049 1049 }
1050 1050 EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
1051 1051  
  1052 +static void hpet_disable_rtc_channel(void)
  1053 +{
  1054 + unsigned long cfg;
  1055 + cfg = hpet_readl(HPET_T1_CFG);
  1056 + cfg &= ~HPET_TN_ENABLE;
  1057 + hpet_writel(cfg, HPET_T1_CFG);
  1058 +}
  1059 +
1052 1060 /*
1053 1061 * The functions below are called from rtc driver.
1054 1062 * Return 0 if HPET is not being used.
... ... @@ -1060,6 +1068,9 @@
1060 1068 return 0;
1061 1069  
1062 1070 hpet_rtc_flags &= ~bit_mask;
  1071 + if (unlikely(!hpet_rtc_flags))
  1072 + hpet_disable_rtc_channel();
  1073 +
1063 1074 return 1;
1064 1075 }
1065 1076 EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
1066 1077  
... ... @@ -1125,15 +1136,11 @@
1125 1136  
1126 1137 static void hpet_rtc_timer_reinit(void)
1127 1138 {
1128   - unsigned int cfg, delta;
  1139 + unsigned int delta;
1129 1140 int lost_ints = -1;
1130 1141  
1131   - if (unlikely(!hpet_rtc_flags)) {
1132   - cfg = hpet_readl(HPET_T1_CFG);
1133   - cfg &= ~HPET_TN_ENABLE;
1134   - hpet_writel(cfg, HPET_T1_CFG);
1135   - return;
1136   - }
  1142 + if (unlikely(!hpet_rtc_flags))
  1143 + hpet_disable_rtc_channel();
1137 1144  
1138 1145 if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
1139 1146 delta = hpet_default_delta;
arch/x86/kernel/setup.c
... ... @@ -691,6 +691,8 @@
691 691  
692 692 void __init setup_arch(char **cmdline_p)
693 693 {
  694 + unsigned long end_pfn;
  695 +
694 696 #ifdef CONFIG_X86_32
695 697 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
696 698 visws_early_detect();
... ... @@ -932,7 +934,24 @@
932 934 init_gbpages();
933 935  
934 936 /* max_pfn_mapped is updated here */
935   - max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
  937 + end_pfn = max_low_pfn;
  938 +
  939 +#ifdef CONFIG_X86_64
  940 + /*
  941 + * There may be regions after the last E820_RAM region that we
  942 + * want to include in the kernel direct mapping, such as
  943 + * EFI_RUNTIME_SERVICES_DATA.
  944 + */
  945 + if (efi_enabled) {
  946 + unsigned long efi_end;
  947 +
  948 + efi_end = e820_end_pfn(MAXMEM>>PAGE_SHIFT, E820_RESERVED_EFI);
  949 + if (efi_end > max_low_pfn)
  950 + end_pfn = efi_end;
  951 + }
  952 +#endif
  953 +
  954 + max_low_pfn_mapped = init_memory_mapping(0, end_pfn << PAGE_SHIFT);
936 955 max_pfn_mapped = max_low_pfn_mapped;
937 956  
938 957 #ifdef CONFIG_X86_64
arch/x86/platform/efi/efi.c
... ... @@ -323,10 +323,13 @@
323 323 case EFI_UNUSABLE_MEMORY:
324 324 e820_type = E820_UNUSABLE;
325 325 break;
  326 + case EFI_RUNTIME_SERVICES_DATA:
  327 + e820_type = E820_RESERVED_EFI;
  328 + break;
326 329 default:
327 330 /*
328 331 * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
329   - * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
  332 + * EFI_MEMORY_MAPPED_IO
330 333 * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
331 334 */
332 335 e820_type = E820_RESERVED;
333 336  
334 337  
335 338  
... ... @@ -671,24 +674,28 @@
671 674 end_pfn = PFN_UP(end);
672 675 if (end_pfn <= max_low_pfn_mapped
673 676 || (end_pfn > (1UL << (32 - PAGE_SHIFT))
674   - && end_pfn <= max_pfn_mapped))
  677 + && end_pfn <= max_pfn_mapped)) {
675 678 va = __va(md->phys_addr);
676   - else
677   - va = efi_ioremap(md->phys_addr, size, md->type);
678 679  
  680 + if (!(md->attribute & EFI_MEMORY_WB)) {
  681 + addr = (u64) (unsigned long)va;
  682 + npages = md->num_pages;
  683 + memrange_efi_to_native(&addr, &npages);
  684 + set_memory_uc(addr, npages);
  685 + }
  686 + } else {
  687 + if (!(md->attribute & EFI_MEMORY_WB))
  688 + va = ioremap_nocache(md->phys_addr, size);
  689 + else
  690 + va = ioremap_cache(md->phys_addr, size);
  691 + }
  692 +
679 693 md->virt_addr = (u64) (unsigned long) va;
680 694  
681 695 if (!va) {
682 696 printk(KERN_ERR PFX "ioremap of 0x%llX failed!\n",
683 697 (unsigned long long)md->phys_addr);
684 698 continue;
685   - }
686   -
687   - if (!(md->attribute & EFI_MEMORY_WB)) {
688   - addr = md->virt_addr;
689   - npages = md->num_pages;
690   - memrange_efi_to_native(&addr, &npages);
691   - set_memory_uc(addr, npages);
692 699 }
693 700  
694 701 systab = (u64) (unsigned long) efi_phys.systab;
arch/x86/platform/efi/efi_64.c
... ... @@ -80,21 +80,4 @@
80 80 local_irq_restore(efi_flags);
81 81 early_code_mapping_set_exec(0);
82 82 }
83   -
84   -void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
85   - u32 type)
86   -{
87   - unsigned long last_map_pfn;
88   -
89   - if (type == EFI_MEMORY_MAPPED_IO)
90   - return ioremap(phys_addr, size);
91   -
92   - last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
93   - if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
94   - unsigned long top = last_map_pfn << PAGE_SHIFT;
95   - efi_ioremap(top, size - (top - phys_addr), type);
96   - }
97   -
98   - return (void __iomem *)__va(phys_addr);
99   -}