Commit 869c34f5208adde010065c387354f2ebe4ec3bfc
Exists in
master
and in
7 other branches
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/…
…git/tip/linux-2.6-tip * 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86: ce4100: Set pci ops via callback instead of module init x86/mm: Fix pgd_lock deadlock x86/mm: Handle mm_fault_error() in kernel space x86: Don't check for BIOS corruption in first 64K when there's no need to
Showing 9 changed files Side-by-side Diff
arch/x86/include/asm/ce4100.h
arch/x86/kernel/check.c
... | ... | @@ -106,8 +106,8 @@ |
106 | 106 | addr += size; |
107 | 107 | } |
108 | 108 | |
109 | - printk(KERN_INFO "Scanning %d areas for low memory corruption\n", | |
110 | - num_scan_areas); | |
109 | + if (num_scan_areas) | |
110 | + printk(KERN_INFO "Scanning %d areas for low memory corruption\n", num_scan_areas); | |
111 | 111 | } |
112 | 112 | |
113 | 113 | |
114 | 114 | |
... | ... | @@ -143,12 +143,12 @@ |
143 | 143 | { |
144 | 144 | check_for_bios_corruption(); |
145 | 145 | schedule_delayed_work(&bios_check_work, |
146 | - round_jiffies_relative(corruption_check_period*HZ)); | |
146 | + round_jiffies_relative(corruption_check_period*HZ)); | |
147 | 147 | } |
148 | 148 | |
149 | 149 | static int start_periodic_check_for_corruption(void) |
150 | 150 | { |
151 | - if (!memory_corruption_check || corruption_check_period == 0) | |
151 | + if (!num_scan_areas || !memory_corruption_check || corruption_check_period == 0) | |
152 | 152 | return 0; |
153 | 153 | |
154 | 154 | printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n", |
arch/x86/mm/fault.c
... | ... | @@ -229,15 +229,14 @@ |
229 | 229 | for (address = VMALLOC_START & PMD_MASK; |
230 | 230 | address >= TASK_SIZE && address < FIXADDR_TOP; |
231 | 231 | address += PMD_SIZE) { |
232 | - | |
233 | - unsigned long flags; | |
234 | 232 | struct page *page; |
235 | 233 | |
236 | - spin_lock_irqsave(&pgd_lock, flags); | |
234 | + spin_lock(&pgd_lock); | |
237 | 235 | list_for_each_entry(page, &pgd_list, lru) { |
238 | 236 | spinlock_t *pgt_lock; |
239 | 237 | pmd_t *ret; |
240 | 238 | |
239 | + /* the pgt_lock only for Xen */ | |
241 | 240 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
242 | 241 | |
243 | 242 | spin_lock(pgt_lock); |
... | ... | @@ -247,7 +246,7 @@ |
247 | 246 | if (!ret) |
248 | 247 | break; |
249 | 248 | } |
250 | - spin_unlock_irqrestore(&pgd_lock, flags); | |
249 | + spin_unlock(&pgd_lock); | |
251 | 250 | } |
252 | 251 | } |
253 | 252 | |
... | ... | @@ -828,6 +827,13 @@ |
828 | 827 | unsigned long address, unsigned int fault) |
829 | 828 | { |
830 | 829 | if (fault & VM_FAULT_OOM) { |
830 | + /* Kernel mode? Handle exceptions or die: */ | |
831 | + if (!(error_code & PF_USER)) { | |
832 | + up_read(¤t->mm->mmap_sem); | |
833 | + no_context(regs, error_code, address); | |
834 | + return; | |
835 | + } | |
836 | + | |
831 | 837 | out_of_memory(regs, error_code, address); |
832 | 838 | } else { |
833 | 839 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| |
arch/x86/mm/init_64.c
... | ... | @@ -105,18 +105,18 @@ |
105 | 105 | |
106 | 106 | for (address = start; address <= end; address += PGDIR_SIZE) { |
107 | 107 | const pgd_t *pgd_ref = pgd_offset_k(address); |
108 | - unsigned long flags; | |
109 | 108 | struct page *page; |
110 | 109 | |
111 | 110 | if (pgd_none(*pgd_ref)) |
112 | 111 | continue; |
113 | 112 | |
114 | - spin_lock_irqsave(&pgd_lock, flags); | |
113 | + spin_lock(&pgd_lock); | |
115 | 114 | list_for_each_entry(page, &pgd_list, lru) { |
116 | 115 | pgd_t *pgd; |
117 | 116 | spinlock_t *pgt_lock; |
118 | 117 | |
119 | 118 | pgd = (pgd_t *)page_address(page) + pgd_index(address); |
119 | + /* the pgt_lock only for Xen */ | |
120 | 120 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
121 | 121 | spin_lock(pgt_lock); |
122 | 122 | |
... | ... | @@ -128,7 +128,7 @@ |
128 | 128 | |
129 | 129 | spin_unlock(pgt_lock); |
130 | 130 | } |
131 | - spin_unlock_irqrestore(&pgd_lock, flags); | |
131 | + spin_unlock(&pgd_lock); | |
132 | 132 | } |
133 | 133 | } |
134 | 134 |
arch/x86/mm/pageattr.c
... | ... | @@ -57,12 +57,10 @@ |
57 | 57 | |
58 | 58 | void update_page_count(int level, unsigned long pages) |
59 | 59 | { |
60 | - unsigned long flags; | |
61 | - | |
62 | 60 | /* Protect against CPA */ |
63 | - spin_lock_irqsave(&pgd_lock, flags); | |
61 | + spin_lock(&pgd_lock); | |
64 | 62 | direct_pages_count[level] += pages; |
65 | - spin_unlock_irqrestore(&pgd_lock, flags); | |
63 | + spin_unlock(&pgd_lock); | |
66 | 64 | } |
67 | 65 | |
68 | 66 | static void split_page_count(int level) |
... | ... | @@ -394,7 +392,7 @@ |
394 | 392 | try_preserve_large_page(pte_t *kpte, unsigned long address, |
395 | 393 | struct cpa_data *cpa) |
396 | 394 | { |
397 | - unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; | |
395 | + unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn; | |
398 | 396 | pte_t new_pte, old_pte, *tmp; |
399 | 397 | pgprot_t old_prot, new_prot, req_prot; |
400 | 398 | int i, do_split = 1; |
... | ... | @@ -403,7 +401,7 @@ |
403 | 401 | if (cpa->force_split) |
404 | 402 | return 1; |
405 | 403 | |
406 | - spin_lock_irqsave(&pgd_lock, flags); | |
404 | + spin_lock(&pgd_lock); | |
407 | 405 | /* |
408 | 406 | * Check for races, another CPU might have split this page |
409 | 407 | * up already: |
410 | 408 | |
... | ... | @@ -498,14 +496,14 @@ |
498 | 496 | } |
499 | 497 | |
500 | 498 | out_unlock: |
501 | - spin_unlock_irqrestore(&pgd_lock, flags); | |
499 | + spin_unlock(&pgd_lock); | |
502 | 500 | |
503 | 501 | return do_split; |
504 | 502 | } |
505 | 503 | |
506 | 504 | static int split_large_page(pte_t *kpte, unsigned long address) |
507 | 505 | { |
508 | - unsigned long flags, pfn, pfninc = 1; | |
506 | + unsigned long pfn, pfninc = 1; | |
509 | 507 | unsigned int i, level; |
510 | 508 | pte_t *pbase, *tmp; |
511 | 509 | pgprot_t ref_prot; |
... | ... | @@ -519,7 +517,7 @@ |
519 | 517 | if (!base) |
520 | 518 | return -ENOMEM; |
521 | 519 | |
522 | - spin_lock_irqsave(&pgd_lock, flags); | |
520 | + spin_lock(&pgd_lock); | |
523 | 521 | /* |
524 | 522 | * Check for races, another CPU might have split this page |
525 | 523 | * up for us already: |
... | ... | @@ -591,7 +589,7 @@ |
591 | 589 | */ |
592 | 590 | if (base) |
593 | 591 | __free_page(base); |
594 | - spin_unlock_irqrestore(&pgd_lock, flags); | |
592 | + spin_unlock(&pgd_lock); | |
595 | 593 | |
596 | 594 | return 0; |
597 | 595 | } |
arch/x86/mm/pgtable.c
... | ... | @@ -121,14 +121,12 @@ |
121 | 121 | |
122 | 122 | static void pgd_dtor(pgd_t *pgd) |
123 | 123 | { |
124 | - unsigned long flags; /* can be called from interrupt context */ | |
125 | - | |
126 | 124 | if (SHARED_KERNEL_PMD) |
127 | 125 | return; |
128 | 126 | |
129 | - spin_lock_irqsave(&pgd_lock, flags); | |
127 | + spin_lock(&pgd_lock); | |
130 | 128 | pgd_list_del(pgd); |
131 | - spin_unlock_irqrestore(&pgd_lock, flags); | |
129 | + spin_unlock(&pgd_lock); | |
132 | 130 | } |
133 | 131 | |
134 | 132 | /* |
... | ... | @@ -260,7 +258,6 @@ |
260 | 258 | { |
261 | 259 | pgd_t *pgd; |
262 | 260 | pmd_t *pmds[PREALLOCATED_PMDS]; |
263 | - unsigned long flags; | |
264 | 261 | |
265 | 262 | pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); |
266 | 263 | |
267 | 264 | |
... | ... | @@ -280,12 +277,12 @@ |
280 | 277 | * respect to anything walking the pgd_list, so that they |
281 | 278 | * never see a partially populated pgd. |
282 | 279 | */ |
283 | - spin_lock_irqsave(&pgd_lock, flags); | |
280 | + spin_lock(&pgd_lock); | |
284 | 281 | |
285 | 282 | pgd_ctor(mm, pgd); |
286 | 283 | pgd_prepopulate_pmd(mm, pgd, pmds); |
287 | 284 | |
288 | - spin_unlock_irqrestore(&pgd_lock, flags); | |
285 | + spin_unlock(&pgd_lock); | |
289 | 286 | |
290 | 287 | return pgd; |
291 | 288 |
arch/x86/pci/ce4100.c
... | ... | @@ -34,6 +34,7 @@ |
34 | 34 | #include <linux/pci.h> |
35 | 35 | #include <linux/init.h> |
36 | 36 | |
37 | +#include <asm/ce4100.h> | |
37 | 38 | #include <asm/pci_x86.h> |
38 | 39 | |
39 | 40 | struct sim_reg { |
40 | 41 | |
41 | 42 | |
... | ... | @@ -306,11 +307,11 @@ |
306 | 307 | .write = ce4100_conf_write, |
307 | 308 | }; |
308 | 309 | |
309 | -static int __init ce4100_pci_init(void) | |
310 | +int __init ce4100_pci_init(void) | |
310 | 311 | { |
311 | 312 | init_sim_regs(); |
312 | 313 | raw_pci_ops = &ce4100_pci_conf; |
313 | - return 0; | |
314 | + /* Indicate caller that it should invoke pci_legacy_init() */ | |
315 | + return 1; | |
314 | 316 | } |
315 | -subsys_initcall(ce4100_pci_init); |
arch/x86/platform/ce4100/ce4100.c
... | ... | @@ -15,6 +15,7 @@ |
15 | 15 | #include <linux/serial_reg.h> |
16 | 16 | #include <linux/serial_8250.h> |
17 | 17 | |
18 | +#include <asm/ce4100.h> | |
18 | 19 | #include <asm/setup.h> |
19 | 20 | #include <asm/io.h> |
20 | 21 | |
... | ... | @@ -129,5 +130,6 @@ |
129 | 130 | x86_init.resources.probe_roms = x86_init_noop; |
130 | 131 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; |
131 | 132 | x86_init.mpparse.find_smp_config = sdv_find_smp_config; |
133 | + x86_init.pci.init = ce4100_pci_init; | |
132 | 134 | } |
arch/x86/xen/mmu.c
... | ... | @@ -986,10 +986,9 @@ |
986 | 986 | */ |
987 | 987 | void xen_mm_pin_all(void) |
988 | 988 | { |
989 | - unsigned long flags; | |
990 | 989 | struct page *page; |
991 | 990 | |
992 | - spin_lock_irqsave(&pgd_lock, flags); | |
991 | + spin_lock(&pgd_lock); | |
993 | 992 | |
994 | 993 | list_for_each_entry(page, &pgd_list, lru) { |
995 | 994 | if (!PagePinned(page)) { |
... | ... | @@ -998,7 +997,7 @@ |
998 | 997 | } |
999 | 998 | } |
1000 | 999 | |
1001 | - spin_unlock_irqrestore(&pgd_lock, flags); | |
1000 | + spin_unlock(&pgd_lock); | |
1002 | 1001 | } |
1003 | 1002 | |
1004 | 1003 | /* |
1005 | 1004 | |
... | ... | @@ -1099,10 +1098,9 @@ |
1099 | 1098 | */ |
1100 | 1099 | void xen_mm_unpin_all(void) |
1101 | 1100 | { |
1102 | - unsigned long flags; | |
1103 | 1101 | struct page *page; |
1104 | 1102 | |
1105 | - spin_lock_irqsave(&pgd_lock, flags); | |
1103 | + spin_lock(&pgd_lock); | |
1106 | 1104 | |
1107 | 1105 | list_for_each_entry(page, &pgd_list, lru) { |
1108 | 1106 | if (PageSavePinned(page)) { |
... | ... | @@ -1112,7 +1110,7 @@ |
1112 | 1110 | } |
1113 | 1111 | } |
1114 | 1112 | |
1115 | - spin_unlock_irqrestore(&pgd_lock, flags); | |
1113 | + spin_unlock(&pgd_lock); | |
1116 | 1114 | } |
1117 | 1115 | |
1118 | 1116 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) |